content
stringlengths 5
1.05M
|
|---|
'''
0. 问大家一个问题:Python 支持常量吗?相信很多鱼油的答案都是否定的,
但实际上 Python 内建的命名空间是支持一小部分常量的,比如我们熟悉的
True,False,None 等,只是 Python 没有提供定义常量的直接方式而已。
那么这一题的要求是创建一个 const 模块,功能是让 Python 支持常量。
说到这里大家可能还是一头雾水,没关系,我们举个栗子。
test.py 是我们的测试代码,内容如下:
# const 模块就是这道题要求我们自己写的
# const 模块用于让 Python 支持常量操作
import const
const.NAME = "FishC"
print(const.NAME)
try:
# 尝试修改常量
const.NAME = "FishC.com"
except TypeError as Err:
print(Err)
try:
# 变量名需要大写
const.name = "FishC"
except TypeError as Err:
print(Err)
执行后的结果是:
>>>
FishC
常量无法改变!
常量名必须由大写字母组成!
在 const 模块中我们到底做了什么,使得这个模块这么有“魔力”呢?
大家跟着小甲鱼的提示,一步步来做你就懂了:
提示一:我们需要一个 Const 类
提示二:重写 Const 类的某一个魔法方法,指定当实例对象的属性被修改时的行为
提示三:检查该属性是否已存在
提示四:检查该属性的名字是否为大写
提示五:细心的鱼油可能发现了,怎么我们这个 const 模块导入之后就把它当对象来使用
(const.NAME = "FishC")了呢?难道模块也可以是一个对象?
没错啦,在 Python 中无处不对象,到处都是你的对象。
使用以下方法可以将你的模块与类 A 的对象挂钩。
sys.modules 是一个字典,它包含了从 Python 开始运行起,被导入的所有模块。键就是模块名,值就是模块对象。
import sys
sys.modules[__name__] = A()
'''
class Const:
def __setattr__(self , name , value):
if hasattr(self , name) :
print('常量无法改变!')
elif not str(name).isupper():
print('常量名必须由大写字母组成!')
else:
super().__setattr__(name , value)
import sys
sys.modules[__name__] = Const()
|
from Utils.menu import Resto
items = {
1: 3.50,
2: 2.50,
3: 4.00,
4: 3.5,
5: 1.75,
6: 1.50,
7: 2.25,
8: 3.75,
9: 1.25
}
def total(order):
item = [int(n) for n in order]
totals = 0
for unit in item:
totals += items.get(unit)
return totals
cont = int(input("enter 0 for exit 1 for continue"))
while cont != 0:
if cont == 1:
print(Resto.MENU)
order = input("enter your order: ")
if len(order) <= 0:
print("")
print(f"your total bill is ${total(order)}")
cont = int(input(f"enter 0 for exit 1 for continue"))
print("Thanks!!!")
|
__licence__ = 'MIT'
__author__ = 'kuyaki'
__credits__ = ['kuyaki']
__maintainer__ = 'kuyaki'
__date__ = '2021/03/23'
from collections import defaultdict
from typing import Dict, Optional, Set, List, Iterable
import networkx
from program_slicing.graph.parse import parse, Lang
from program_slicing.graph.cdg import ControlDependenceGraph
from program_slicing.graph.cfg import ControlFlowGraph
from program_slicing.graph.ddg import DataDependenceGraph
from program_slicing.graph.pdg import ProgramDependenceGraph
from program_slicing.graph.point import Point
from program_slicing.graph.statement import Statement, StatementType
from program_slicing.graph.basic_block import BasicBlock
from program_slicing.graph import convert
class ProgramGraphsManager:
def __init__(self, source_code: str = None, lang: Lang = None) -> None:
self.__cdg: Optional[ControlDependenceGraph] = None
self.__cfg: Optional[ControlFlowGraph] = None
self.__ddg: Optional[DataDependenceGraph] = None
self.__pdg: Optional[ProgramDependenceGraph] = None
self.__basic_blocks: Optional[Dict[Statement, BasicBlock]] = None
self.__dom_blocks: Optional[Dict[BasicBlock, Set[BasicBlock]]] = None
self.__reach_blocks: Optional[Dict[BasicBlock, Set[BasicBlock]]] = None
self.__scope_dependency: Optional[Dict[Statement, Statement]] = None
self.__scope_dependency_backward: Optional[Dict[Statement, Set[Statement]]] = None
self.__function_dependency: Optional[Dict[Statement, Statement]] = None
self.__statement_line_numbers: Optional[Dict[Statement, Set[int]]] = None
self.__general_statements: Optional[Set[Statement]] = None
self.__sorted_statements: Optional[List[Statement]] = None
if source_code is not None and lang is not None:
self.__build_cdg = lambda: parse.control_dependence_graph(source_code, lang)
self.__build_cfg = lambda: convert.cdg.to_cfg(self.control_dependence_graph)
self.__build_ddg = lambda: convert.cdg.to_ddg(self.control_dependence_graph)
self.__build_pdg = lambda: convert.cdg.to_pdg(self.control_dependence_graph)
else:
self.__build_cdg = lambda: ControlDependenceGraph()
self.__build_cfg = lambda: ControlFlowGraph()
self.__build_ddg = lambda: DataDependenceGraph()
self.__build_pdg = lambda: ProgramDependenceGraph()
@classmethod
def from_source_code(cls, source_code: str, lang: Lang) -> 'ProgramGraphsManager':
"""
Build all the graphs by a given source code string and a language description.
:param source_code: string with the source code.
:param lang: the source code Lang.
:return: Program Graphs Manager.
"""
return cls(source_code, lang)
@classmethod
def from_control_dependence_graph(cls, graph: ControlDependenceGraph) -> 'ProgramGraphsManager':
"""
Build all the graphs by a given Control Dependence Graph.
:param graph: Control Dependence Graph.
:return: Program Graphs Manager.
"""
result = cls()
result.__build_cdg = lambda: graph
result.__build_cfg = lambda: convert.cdg.to_cfg(result.control_dependence_graph)
result.__build_ddg = lambda: convert.cdg.to_ddg(result.control_dependence_graph)
result.__build_pdg = lambda: convert.cdg.to_pdg(result.control_dependence_graph)
return result
@classmethod
def from_control_flow_graph(cls, graph: ControlFlowGraph) -> 'ProgramGraphsManager':
"""
Build all the graphs by a given Control Flow Graph.
:param graph: Control Flow Graph.
:return: Program Graphs Manager.
"""
result = cls()
result.__build_cdg = lambda: convert.cfg.to_cdg(result.control_flow_graph)
result.__build_cfg = lambda: graph
result.__build_ddg = lambda: convert.cfg.to_ddg(result.control_flow_graph)
result.__build_pdg = lambda: convert.cfg.to_pdg(result.control_flow_graph)
return result
@classmethod
def from_data_dependence_graph(cls, graph: DataDependenceGraph) -> 'ProgramGraphsManager':
"""
Build all the graphs by a given Data Dependence Graph.
:param graph: Data Dependence Graph.
:return: Program Graphs Manager.
"""
result = cls()
result.__build_cdg = lambda: convert.ddg.to_cdg(result.data_dependence_graph)
result.__build_cfg = lambda: convert.ddg.to_cfg(result.data_dependence_graph)
result.__build_ddg = lambda: graph
result.__build_pdg = lambda: convert.ddg.to_pdg(result.data_dependence_graph)
return result
@classmethod
def from_program_dependence_graph(cls, graph: ProgramDependenceGraph) -> 'ProgramGraphsManager':
"""
Build all the graphs by a given Program Dependence Graph.
:param graph: Program Dependence Graph.
:return: Program Graphs Manager.
"""
result = cls()
result.__build_cdg = lambda: convert.pdg.to_cdg(result.program_dependence_graph)
result.__build_cfg = lambda: convert.pdg.to_cfg(result.program_dependence_graph)
result.__build_ddg = lambda: convert.pdg.to_ddg(result.program_dependence_graph)
result.__build_pdg = lambda: graph
return result
@property
def control_dependence_graph(self) -> ControlDependenceGraph:
"""
Structure that represents Control Dependence Graph (inherited from networkx.DiGraph) with corresponding methods.
:return: Control Dependence Graph.
"""
if self.__cdg is None:
self.__cdg = self.__build_cdg()
return self.__cdg
@property
def control_flow_graph(self) -> ControlFlowGraph:
"""
Structure that represents Control Flow Graph (inherited from networkx.DiGraph) with corresponding methods.
:return: Control Flow Graph.
"""
if self.__cfg is None:
self.__cfg = self.__build_cfg()
return self.__cfg
@property
def data_dependence_graph(self) -> DataDependenceGraph:
"""
Structure that represents Data Dependence Graph (inherited from networkx.DiGraph) with corresponding methods.
:return: Data Dependence Graph.
"""
if self.__ddg is None:
self.__ddg = self.__build_ddg()
return self.__ddg
@property
def program_dependence_graph(self) -> ProgramDependenceGraph:
"""
Structure that represents Program Dependence Graph (inherited from networkx.DiGraph) with corresponding methods.
:return: Program Dependence Graph.
"""
if self.__pdg is None:
self.__pdg = self.__build_pdg()
return self.__pdg
@property
def sorted_statements(self) -> List[Statement]:
"""
Statements are sorted first increasing of their start_point, then by decreasing of their end_point.
:return: sorted list of all Statements.
"""
if self.__sorted_statements is None:
self.__sorted_statements = self.__build_sorted_statements()
return self.__sorted_statements
@property
def general_statements(self) -> Set[Statement]:
"""
Statement is 'general' if it is not contained in any non SCOPE, BRANCH, LOOP, FUNCTION or EXIT Statement.
:return: set of general Statements.
"""
if self.__general_statements is None:
self.__general_statements = self.__build_general_statements()
return self.__general_statements
@property
def scope_statements(self) -> Iterable[Statement]:
"""
Statement is a 'scope' Statement if it is SCOPE, BRANCH, LOOP or FUNCTION.
:return: set of scope Statements.
"""
if self.__scope_dependency_backward is None:
self.__scope_dependency_backward = self.__build_statements_in_scope()
return self.__scope_dependency_backward.keys()
def get_basic_block(self, statement: Statement) -> Optional[BasicBlock]:
"""
Basic Block - structure that represents Control Flow Graph nodes.
:return: Basic Block that contains the given Statement.
"""
if self.__basic_blocks is None:
self.__basic_blocks = self.__build_basic_blocks()
return self.__basic_blocks.get(statement, None)
def get_boundary_blocks(self, block: BasicBlock) -> Set[BasicBlock]:
"""
Get a set of Basic Blocks which intersection of dominated and reach blocks contain the given one block.
:param block: Basic Block for which the boundary blocks should to be obtained.
:return: set of boundary Basic Blocks.
"""
boundary_blocks = set()
for basic_block in self.control_flow_graph:
if block in self.get_dominated_blocks(basic_block).intersection(self.get_reach_blocks(basic_block)):
boundary_blocks.add(basic_block)
return boundary_blocks
def get_boundary_blocks_for_statement(self, statement: Statement) -> Set[BasicBlock]:
"""
Get a set of boundary blocks for BasicBlock in which the given Statement is placed.
:param statement: Statement for which the boundary blocks should to be obtained.
:return: set of boundary Basic Blocks.
"""
block = self.get_basic_block(statement)
return self.get_boundary_blocks(block)
def get_dominated_blocks(self, block: BasicBlock) -> Set[BasicBlock]:
"""
Get a set of Basic Blocks which are reachable in Control Dependence Graph from the parent of the given block.
:param block: Basic Block for which the dominated blocks should to be obtained.
:return: set of dominated Basic Blocks.
"""
if self.__dom_blocks is None:
self.__dom_blocks = {}
if block in self.__dom_blocks:
return self.__dom_blocks[block]
result = {block}
root = block.root
if root is None:
return result
predecessors = [predecessor for predecessor in self.control_dependence_graph.predecessors(root)]
if len(predecessors) == 0:
predecessors = [root]
for root in predecessors:
for statement in networkx.algorithms.bfs_tree(self.control_dependence_graph, root):
if statement == root:
continue
current_block = self.get_basic_block(statement)
if current_block is not None:
result.add(current_block)
self.__dom_blocks[block] = result
return result
def get_reach_blocks(self, block: BasicBlock) -> Set[BasicBlock]:
"""
Get a set of Basic Blocks which are reachable in Control Flow Graph from the the given block (including itself).
:param block: Basic Block for which the reach blocks should to be obtained.
:return: set of reach Basic Blocks.
"""
if self.__reach_blocks is None:
self.__reach_blocks = {}
return self.__build_reach_blocks(block)
def get_statement_line_numbers(self, statement: Statement) -> Set[int]:
"""
Get a set of line numbers in which the given Statement is placed.
:param statement: Statement for which the line numbers should to be obtained.
:return: set of line numbers (integers).
"""
if self.__statement_line_numbers is None:
self.__statement_line_numbers = {}
if statement in self.__statement_line_numbers:
return self.__statement_line_numbers[statement]
inner_statements = self.get_statements_in_scope(statement)
if inner_statements:
result = set()
if statement.statement_type in {StatementType.SCOPE, StatementType.BRANCH, StatementType.LOOP}:
result.add(statement.start_point.line_number)
if statement.statement_type == StatementType.SCOPE:
result.add(statement.end_point.line_number)
for inner_statement in inner_statements:
result.update(self.get_statement_line_numbers(inner_statement))
self.__statement_line_numbers[statement] = result
else:
result = {
number
for number in range(statement.start_point.line_number, statement.end_point.line_number + 1)
}
self.__statement_line_numbers[statement] = result
return result
def get_function_statement(self, statement: Statement) -> Optional[Statement]:
"""
Get the minimal FUNCTION Statement in which the given Statement is placed.
:param statement: Statement for which the FUNCTION statement should to be obtained.
:return: FUNCTION Statement or None if not found.
"""
if self.__function_dependency is None:
self.__function_dependency = self.__build_function_dependency()
return self.__function_dependency.get(statement, None)
def get_function_statement_by_range(self, start_point: Point, end_point: Point) -> Optional[Statement]:
"""
Get the minimal FUNCTION Statement in which the given range is placed.
:param start_point: start Point of the given range.
:param end_point: end Point of the given range.
:return: FUNCTION Statement or None if not found.
"""
statements = self.sorted_statements
start_statement_idx = self.__bisect_range_left(start_point, end_point)
if start_statement_idx >= len(statements):
return None
return self.get_function_statement(statements[start_statement_idx])
def get_scope_statement(self, statement: Statement) -> Optional[Statement]:
"""
Get the minimal SCOPE, BRANCH, LOOP or FUNCTION Statement in which the given Statement is placed.
:param statement: Statement for which the scope statement should to be obtained.
:return: SCOPE, BRANCH, LOOP or FUNCTION Statement (or None if not found).
"""
if self.__scope_dependency is None:
self.__scope_dependency = self.control_dependence_graph.scope_dependency
return self.__scope_dependency.get(statement, None)
def get_statements_in_scope(self, scope: Statement) -> Set[Statement]:
"""
Get all the Statements in the given scope Statement.
:param scope: Statement for which contained Statements should to be obtained.
:return: set of Statements contained in the given Statement,
set will be empty if the given Statement is not SCOPE, BRANCH, LOOP or FUNCTION.
"""
if self.__scope_dependency_backward is None:
self.__scope_dependency_backward = self.__build_statements_in_scope()
return self.__scope_dependency_backward.get(scope, set())
def get_statements_in_range(self, start_point: Point = None, end_point: Point = None) -> Set[Statement]:
"""
Get all the Statements in the given range.
:param start_point: start Point of the given range.
:param end_point: end Point of the given range.
:return: set of Statements contained in the given range.
"""
statements = self.sorted_statements
start_statement_idx = 0 if start_point is None else self.__bisect_range_left(start_point, end_point)
end_statement_idx = len(statements) if end_point is None else self.__bisect_range_right(end_point, end_point)
return set(
statements[idx]
for idx in range(start_statement_idx, end_statement_idx)
if (start_point is None or start_point <= statements[idx].start_point) and
(end_point is None or end_point >= statements[idx].end_point)
)
def get_exit_statements(self, statements: Set[Statement]) -> Set[Statement]:
"""
Get Statements that are Flow Dependence children of the given statements but not one of them.
:param statements: set of Statements for which exit Statements should to be obtained.
:return: set of exit Statements (may have not only EXIT type).
"""
start_point = min(statement.start_point for statement in statements)
end_point = max(statement.end_point for statement in statements)
exit_statements = set()
flow_statements = set()
for statement in statements:
if statement not in self.control_dependence_graph.control_flow:
continue
flow_statements.update(self.control_dependence_graph.control_flow[statement])
visited = set()
while flow_statements:
level = set()
for flow_statement in flow_statements:
if flow_statement in statements:
continue
if flow_statement.start_point < start_point or flow_statement.end_point > end_point:
exit_statements.add(flow_statement)
elif flow_statement.statement_type == StatementType.EXIT:
exit_statements.add(flow_statement)
elif flow_statement not in visited and flow_statement in self.control_dependence_graph.control_flow:
level.update(self.control_dependence_graph.control_flow[flow_statement])
visited.add(flow_statement)
flow_statements = level
return exit_statements
def get_affecting_statements(self, statements: Set[Statement]) -> Set[Statement]:
"""
Get Statements from the given set that affect by Data Dependence some Statement not form the given set.
:param statements: set of Statements for which affecting Statements should to be obtained.
:return: set of affecting Statements (may have VARIABLE or ASSIGNMENT type).
"""
assignment_statements = [
statement for statement in statements
if
statement.statement_type == StatementType.ASSIGNMENT or
statement.statement_type == StatementType.VARIABLE
]
arg_statements_by_arg_name = self.__get_arg_statements_by_arg_name(statements)
affecting_statements = set()
for assignment_statement in assignment_statements:
if assignment_statement not in self.data_dependence_graph:
continue
for affected_statement in self.data_dependence_graph.successors(assignment_statement):
if affected_statement not in statements or \
affected_statement.end_point <= assignment_statement.end_point and \
affected_statement in arg_statements_by_arg_name.get(assignment_statement.name, set()):
affecting_statements.add(assignment_statement)
break
return affecting_statements
def get_changed_variables_statements(self, statements: Iterable[Statement]) -> Set[Statement]:
"""
Get VARIABLE Statements that represent variables changed in the given set of Statements.
:param statements: set of Statements for which changed variables should to be obtained.
:return: set of changed variables (Statements with VARIABLE type).
"""
changed_variables = set()
ddg = self.data_dependence_graph
for statement in statements:
if statement not in ddg:
continue
if statement.statement_type == StatementType.VARIABLE:
changed_variables.add(statement)
if statement.statement_type == StatementType.ASSIGNMENT:
if statement not in self.data_dependence_graph:
continue
for ancestor in networkx.ancestors(ddg, statement):
if ancestor.statement_type == StatementType.VARIABLE and ancestor.name == statement.name:
changed_variables.add(ancestor)
return changed_variables
def get_involved_variables_statements(self, statements: Iterable[Statement]) -> Set[Statement]:
"""
Get VARIABLE Statements that represent variables involved (including usage) in the given set of Statements.
:param statements: set of Statements for which involved variables should to be obtained.
:return: set of involved variables (Statements with VARIABLE type).
"""
involved_variables = set()
ddg = self.data_dependence_graph
for statement in statements:
if statement not in ddg:
continue
if statement.statement_type == StatementType.VARIABLE:
involved_variables.add(statement)
continue
for ancestor in networkx.ancestors(ddg, statement):
if ancestor.statement_type == StatementType.VARIABLE and ancestor.name in statement.affected_by:
involved_variables.add(ancestor)
return involved_variables
def __build_basic_blocks(self) -> Dict[Statement, BasicBlock]:
basic_blocks = {}
for block in networkx.traversal.dfs_tree(self.control_flow_graph):
for statement in block:
basic_blocks[statement] = block
return basic_blocks
def __build_function_dependency(self) -> Dict[Statement, Statement]:
function_dependency = {}
for function_statement in sorted(
(s for s in self.control_dependence_graph if s.statement_type == StatementType.FUNCTION),
key=lambda x: (x.start_point, -x.end_point)):
for statement in networkx.traversal.dfs_tree(self.control_dependence_graph, function_statement):
function_dependency[statement] = function_statement
return function_dependency
def __build_sorted_statements(self) -> List[Statement]:
return sorted(self.control_dependence_graph, key=lambda s: (s.start_point, -s.end_point))
def __build_general_statements(self) -> Set[Statement]:
result = set()
for scope in self.scope_statements:
last_statement = None
for statement in sorted(self.get_statements_in_scope(scope), key=lambda s: (s.start_point, -s.end_point)):
if statement.start_point == statement.end_point:
continue
if not last_statement or statement.end_point > last_statement.end_point:
last_statement = statement
result.add(statement)
return result
def __build_reach_blocks(self, block: BasicBlock, visited_blocks: Set[BasicBlock] = None) -> Set[BasicBlock]:
if block in self.__reach_blocks:
return self.__reach_blocks[block]
if visited_blocks is None:
visited_blocks = set()
visited_blocks.add(block)
result = {block}
for child in self.control_flow_graph.successors(block):
if child not in visited_blocks:
result.update(self.__build_reach_blocks(child, visited_blocks))
self.__reach_blocks[block] = result
visited_blocks.remove(block)
return result
def __build_statements_in_scope(self) -> Dict[Statement, Set[Statement]]:
statements_in_scope = defaultdict(set)
for statement in self.control_dependence_graph:
scope = self.get_scope_statement(statement)
if scope is None:
continue
statements_in_scope[scope].add(statement)
return statements_in_scope
def __get_arg_statements_by_arg_name(self, statements: Set[Statement]) -> Dict[str, Set[Statement]]:
arg_statements_by_arg_name = defaultdict(set)
for statement in statements:
if statement in self.data_dependence_graph and \
statement.statement_type != StatementType.ASSIGNMENT and \
statement.statement_type != StatementType.VARIABLE:
for predecessor in self.data_dependence_graph.predecessors(statement):
if predecessor not in statements:
arg_statements_by_arg_name[predecessor.name].add(statement)
return arg_statements_by_arg_name
def __bisect_range_left(self, start_point: Point, end_point: Point) -> int:
searching_range = (start_point, -end_point)
a = self.sorted_statements
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if (a[mid].start_point, -a[mid].end_point) < searching_range:
lo = mid + 1
else:
hi = mid
return lo
def __bisect_range_right(self, start_point: Point, end_point: Point) -> int:
searching_range = (start_point, -end_point)
a = self.sorted_statements
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if searching_range < (a[mid].start_point, -a[mid].end_point):
hi = mid
else:
lo = mid + 1
return lo
|
'''
Have you got what it takes to be the fuzzbizzard..."
A number will be displayed you have to guess if in a FizzBuzz
the number will be displayed normally or will it show Fizz, Buzz or FizzBuzz!
Enter 0 for normal number
Enter 1 for Fizz
Enter 2 for Buzz
Enter 3 for FizzBuzz
You will have 7 seconds per number
Guess all 10 to become the FUZZBIZZARD
'''
import random
import threading
import _thread as thread
from os import system,name
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def display_instructions():
clear()
print("Have you got what it takes to be the fuzzbizzard...\n")
print("A number will be displayed you have to guess if in a FizzBuzz the number will be displayed normally or will it show Fizz, Buzz or FizzBuzz! \n")
print("Enter 0 for normall number")
print("Enter 1 for Fizz")
print("Enter 2 for Buzz")
print("Enter 3 for FizzBuzz\n")
print("You will have 7 seconds per number")
print("Guess all 10 to become the FUZZBIZZARD")
print("Press ENTER to play")
_ = input()
def input_with_timeout(timeout=7):
timer = threading.Timer(timeout, thread.interrupt_main)
astring = None
try:
timer.start()
astring = input()
except KeyboardInterrupt:
pass
timer.cancel()
return astring
def game(x):
if x % 15 == 0:
return 3
elif x % 3 == 0:
return 1
elif x % 5 == 0:
return 2
else:
return 0
fizzbuzz = {0:"Normal", 1:"FiZz!", 2:"BuZz!", 3:"FiZzBuZz!!!"}
points = 0
def check():
clear()
global points
x = random.randint(1,100)
print("The Number is... ",x)
print("Enter your answer in 7 seconds")
answer = input_with_timeout()
if answer != None:
if game(x) == int(answer):
print("Correct")
print(fizzbuzz[game(x)])
points = points + 1
print("Press Enter")
else:
print("Wrong")
print(fizzbuzz[game(x)])
print("Press Enter")
else:
print("Too Slow")
print(fizzbuzz[game(x)])
print("Press Enter")
display_instructions()
for i in range(10):
answer = None
check()
_ = input()
clear()
print("Your Score:",points)
if points == 10:
print("Wow!!! you are a fuzzbizzard!!!")
else:
print("Maybe Next Time")
|
"""Module for GitHub webhook functionality."""
from typing import Optional
import sys
import tempfile
from string import Template
from urllib.parse import urlsplit
from threading import Timer
import github.PullRequest as ghp # type: ignore
from github import Github
import pygit2
from pyramid.interfaces import IResponse # type: ignore
from hublabbot.util import JsonDict
from hublabbot.settings import HubLabBotSettings
class RemotePushCallback(pygit2.RemoteCallbacks):
"""Callback for remote push."""
def __init__(self, gh_repo_path: str, gl_repo_path: str, pr_num: int):
self.gh_repo_path = gh_repo_path
"""Path like {namespace}/{repo name} in GitHub."""
self.gl_repo_path = gl_repo_path
"""Path like {namespace}/{repo name} in GitLab."""
self.pr_num = pr_num
"""Number of PR."""
def push_update_reference(self, refname: bytes, message: Optional[bytes]) -> None:
"""Overrided callback for remote push.
Args:
refname: The name of the reference (on the remote).
message: Rejection message from the remote. If None, the update was accepted.
"""
if message is None:
print(f'GH:{self.gh_repo_path}: PR#{self.pr_num} ({refname.decode("utf-8")})'
+ f' synced to GL:{self.gl_repo_path}.')
else:
print(f'GH:{self.gh_repo_path}: Fail sync PR#{self.pr_num} ({refname.decode("utf-8")})'
+ f' to GL:{self.gl_repo_path} - "{message.decode("utf-8")}"!', file=sys.stderr)
class GithubWebhook:
"""Main class with GitHub functionality."""
def __init__(self, settings: HubLabBotSettings, repo_path: str, is_admin: bool = False):
self.settings = settings
"""`hublabbot.settings.HubLabBotSettings`."""
self.repo_path = repo_path
"""Path like {namespace}/{repo name} in GitHub."""
self.repo_options = self.settings.get_repo_by_github(repo_path)
"""`hublabbot.settings.RepoOptions`."""
self.github = Github(self.settings.gh_token if is_admin else self.settings.gh_bot_token)
"""Github object with bot credentials."""
def _merge_pr(self, pr: ghp.PullRequest) -> None:
assert self.repo_options.gh_auto_merge_pr is not None
if pr.update() is True:
if pr.state == 'closed' or not pr.mergeable:
return
if self.repo_options.gh_auto_merge_pr.required_label_name not in [l.name for l in pr.labels]:
return
status = pr.merge()
if status.merged:
print(f'GH:{self.repo_path}: PR#{pr.number} merged.')
else:
raise RuntimeError(f'GH:{self.repo_path}: PR#{pr.number} fail to merge - "{status.message}"!')
def get_pr_by_sha(self, sha: str) -> Optional[ghp.PullRequest]:
"""Get PR by head commit sha.
Args:
sha: Head commit sha.
Returns:
PullRequest or `None` if not found.
"""
repo = self.github.get_repo(self.repo_path)
pr_list = [p for p in repo.get_pulls() if p.head.sha == sha]
if len(pr_list) == 0:
return None
else:
return pr_list[0]
def get_branch_head(self, branch_name: str) -> str:
"""Get head sha of `branch_name`.
Args:
branch_name: Branch name.
Returns:
Head commit sha.
"""
repo = self.github.get_repo(self.repo_path)
branch = repo.get_branch(branch_name)
head_sha: str = branch.commit.sha
return head_sha
def is_external_pr(self, pr: JsonDict) -> bool:
"""Check PR is external or not.
Args:
pr: JSON from GitHub with PR dict.
Returns:
`True` if is external, `False` is not.
"""
pr_repo_path: str = pr['head']['repo']['full_name']
return pr_repo_path != self.repo_path
def auto_merge_pr(self, sha: str) -> IResponse:
"""Action - auto-merge PR.
Args:
sha: SHA of HEAD commit in PR.
Returns:
`{'status': 'OK', ...}` if action was successful,</br>
`{'status': 'IGNORE', ...}` if action ignored,</br>
`{'status': 'ERROR', ...}` if action failed.
"""
if self.repo_options.gh_auto_merge_pr is None:
return {
'status': 'IGNORE',
'note': f'Repo option gh_auto_merge_pr disabled for repo {self.repo_path}.'}
pr = self.get_pr_by_sha(sha)
if pr is None or pr.state == 'closed' or not pr.mergeable:
return {'status': 'IGNORE'}
if pr.user.login not in self.repo_options.gh_auto_merge_pr.authors_white_list:
return {'status': 'IGNORE'}
if self.repo_options.gh_auto_merge_pr.required_label_name not in [l.name for l in pr.labels]:
return {'status': 'IGNORE'}
if self.repo_options.gh_auto_merge_pr.delay > 0:
timer = Timer(self.repo_options.gh_auto_merge_pr.delay, lambda: self._merge_pr(pr))
timer.start()
else:
self._merge_pr(pr)
return {'status': 'OK'}
def show_gitlabci_fail(self, failed_job_sha: str, failed_stage: str, failed_job_url: str,
failed_job_log: str) -> IResponse:
"""Action - post comment with GitLab CI fail-report to PR.
Args:
failed_job_sha: SHA of HEAD commit in PR.
failed_stage: Stage at which the error occurred.
failed_job_url: Fail job URL.
failed_job_log: Fail job log.
Returns:
`{'status': 'OK', ...}` if action was successful,</br>
`{'status': 'IGNORE', ...}` if action ignored,</br>
`{'status': 'ERROR', ...}` if action failed.
"""
pr = self.get_pr_by_sha(failed_job_sha)
if pr is None:
return {
'status': 'IGNORE',
'note': f'Commit "{failed_job_sha}" not found in Pull Requests.'}
gitlabci_fail_tmd = (self.settings.assets_path / 'gitlabci_fail.templ.md').read_text()
gitlabci_fail_templ = Template(gitlabci_fail_tmd)
gitlabci_fail_md = gitlabci_fail_templ.substitute(
failed_stage=failed_stage,
failed_job_log=failed_job_log,
failed_job_url=failed_job_url)
pr.create_issue_comment(gitlabci_fail_md)
print(f'GH:{self.repo_path}: Comment with GitLab CI fail-report posted to PR#{pr.number}.')
return {'status': 'OK'}
def sync_pr_to_gitlab(self, pr: JsonDict) -> IResponse:
"""Action - push external PR's branch to GitLab.
Args:
pr: JSON from GitHub with PR dict.
Returns:
`{'status': 'OK', ...}` if action was successful,</br>
`{'status': 'IGNORE', ...}` if action ignored,</br>
`{'status': 'ERROR', ...}` if action failed.
"""
if not self.is_external_pr(pr):
return {'status': 'IGNORE'}
with tempfile.TemporaryDirectory() as gitdir:
repo = pygit2.init_repository(gitdir, bare=True)
pr_num = pr['number']
pr_ref = f'refs/heads/pr-{pr_num}'
creds = f'gitlab-ci-token:{self.settings.gl_token}'
gl_host = urlsplit(self.settings.gl_base_url).netloc
repo.remotes.create('github', f'https://github.com/{self.repo_path}.git')
repo.remotes.create('gitlab', f'https://{creds}@{gl_host}/{self.repo_options.gl_repo_path}.git')
repo.remotes['github'].fetch([f'+refs/pull/{pr_num}/head:{pr_ref}'])
repo.remotes['gitlab'].push(
['+' + pr_ref],
RemotePushCallback(self.repo_path, self.repo_options.gl_repo_path, pr_num))
return {'status': 'OK'}
|
import logging
import random
import os
from unittest import TestLoader, TestSuite
import unittest.util
from exchangelib.util import PrettyXmlHandler
class RandomTestSuite(TestSuite):
def __iter__(self):
tests = list(super().__iter__())
random.shuffle(tests)
return iter(tests)
# Execute test classes in random order
TestLoader.suiteClass = RandomTestSuite
# Execute test methods in random order within each test class
TestLoader.sortTestMethodsUsing = lambda _, x, y: random.choice((1, -1))
# Make sure we're also random in multiprocess test runners
random.seed()
# Always show full repr() output for object instances in unittest error messages
unittest.util._MAX_LENGTH = 2000
if os.environ.get('DEBUG', '').lower() in ('1', 'yes', 'true'):
logging.basicConfig(level=logging.DEBUG, handlers=[PrettyXmlHandler()])
else:
logging.basicConfig(level=logging.CRITICAL)
|
'''
Get's the file's name from the user
'''
def get_file_name() -> str:
filename = ''
while filename == '':
filename = input("Please enter the name of the Python file you wish to obfuscate: ")
return filename
'''
Opens the file the use wants and returns a
string(?) of all the contents of that file
'''
def get_file_contents(filename:str) -> str: # What type does it return?
program = ''
with open(filename) as f:
for line in f.readlines():
program += line
return program
if __name__ == '__main__':
filename = get_file_name()
file_contents = get_file_contents(filename)
print(file_contents)
|
import contextlib
import os
import textwrap
from libqtile.backend.wayland.core import Core
from test.helpers import Backend
wlr_env = {
"WLR_BACKENDS": "headless",
"WLR_LIBINPUT_NO_DEVICES": "1",
"WLR_RENDERER_ALLOW_SOFTWARE": "1",
"WLR_RENDERER": "pixman",
"XDG_RUNTIME_DIR": "/tmp",
}
@contextlib.contextmanager
def wayland_environment(outputs):
"""This backend just needs some environmental variables set"""
env = wlr_env.copy()
env["WLR_HEADLESS_OUTPUTS"] = str(outputs)
yield env
class WaylandBackend(Backend):
name = "wayland"
def __init__(self, env, args=()):
self.env = env
self.args = args
self.core = Core
self.manager = None
def create(self):
"""This is used to instantiate the Core"""
os.environ.update(self.env)
return self.core(*self.args)
def configure(self, manager):
"""This backend needs to get WAYLAND_DISPLAY variable."""
success, display = manager.c.eval("self.core.display_name")
assert success
self.env["WAYLAND_DISPLAY"] = display
def fake_click(self, x, y):
"""Click at the specified coordinates"""
# Currently only restacks windows, and does not trigger bindings
self.manager.c.eval(textwrap.dedent(f"""
self.core.warp_pointer({x}, {y})
self.core._focus_by_click()
"""))
def get_all_windows(self):
"""Get a list of all windows in ascending order of Z position"""
success, result = self.manager.c.eval(textwrap.dedent("""
[win.wid for win in self.core.mapped_windows]
"""))
assert success
return eval(result)
|
#!/usr/bin/python
import os
import sys
import signal
import json
import yfinance as yf
import pandas as pd
import math
import argparse
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
g_StocksDB = None
def Load(filename):
if os.path.isfile(filename) is True:
file = open(filename, "r")
data = file.read()
file.close()
return data
return ""
def Save (filename, data):
file = open(filename, "w")
file.write(data)
file.close()
def Append (filename, data):
file = open(filename, "a")
file.write(data)
file.close()
def Get5D(ticker):
'''
Open,High,Low,Close,Volume,Dividends,Stock Splits
'''
hist = []
objtk = yf.Ticker(ticker)
data = objtk.history(period="1mo", interval="5m")
for idx, row in data.iterrows():
hist.append({
"date": "{0}".format(idx),
"open": row['Open'],
"close": row['Close'],
"high": row['High'],
"low": row['Low'],
"vol": row['Volume']
})
return hist
g_exit = False
def signal_handler(signal, frame):
global g_exit
print("Accepted signal from other app")
g_exit = True
def FindBufferMaxMin(buffer):
pmax = 0
pmin = 0
if len(buffer) > 0:
pmin = buffer[0]
for item in buffer:
if pmax < item:
pmax = item
if pmin > item:
pmin = item
return pmin, pmax
def CreateHistogram(buffer, bin_size):
ret_hist_buffer_y = []
ret_hist_buffer_x = []
freq = 1
try:
if len(buffer) > 0:
# Find min and max for this buffer
pmin, pmax = FindBufferMaxMin(buffer)
# print("pmax:{0}, pmin:{1}, bin_size:{2}".format(pmax,pmin,bin_size))
# Calculate freq
freq = (float(pmax) - float(pmin)) / float(bin_size)
if freq == 0:
return 0, [pmin], [pmax]
# Generate x scale
ret_hist_buffer_x = [(x * freq) + pmin for x in range(0, int(bin_size))]
ret_hist_buffer_y = [0] * int(bin_size)
# Generate y scale
for sample in buffer:
index = int((float(sample) - float(pmin)) / freq)
if index == bin_size:
index = bin_size - 1
#print(index, sample, freq, pmin, pmax)
ret_hist_buffer_y[index] += 1
except Exception as e:
print("Histograme exception {0}".format(e))
return 1, [], []
return 0, ret_hist_buffer_y, ret_hist_buffer_x
def MAVG(buffer, win):
window_size = win
i = 0
moving_averages = []
while i < len(buffer) - window_size + 1:
this_window = buffer[i : i + window_size]
window_average = sum(this_window) / window_size
moving_averages.append(window_average)
i += 1
x_scale = [x for x in range(0, len(moving_averages))]
return moving_averages, x_scale
def GetIntersetPoints(data):
data_diff = []
roi_list = []
pmin, pmax = FindBufferMaxMin(data)
avg = (pmax + pmin) / 2
diff_arr = np.array(data, dtype=np.double)
data_diff = np.diff(diff_arr, n=1).tolist()
# ------------------------------------------------------------------ DIFF REMOVE POSITIVE SLOPE START
for idx, item in enumerate(data_diff):
if item < 0:
if idx < len(data_diff) - 2:
if data_diff[idx + 1] > 0:
pass
else:
data_diff[idx] = 0
if item > 0:
data_diff[idx] = 0
# ------------------------------------------------------------------ DIFF REMOVE POSITIVE SLOPE END
# ------------------------------------------------------------------ HISTOGRAM START
error, hist_y, hist_x = CreateHistogram(data_diff, len(data_diff))
hist_sum = 0.0
for sample in hist_y:
hist_sum += sample
if hist_sum == 0:
return []
filter_thershold = 0.0
perc_integral = 0.0
for idx, sample in enumerate(hist_y):
perc_integral += sample
if (perc_integral / hist_sum) > 0.1:
filter_thershold = hist_x[idx]
break
# ------------------------------------------------------------------ HISTOGRAM END
# ------------------------------------------------------------------ DIFF REMOVE NOISE START
for idx, item in enumerate(data_diff):
if item > filter_thershold:
data_diff[idx] = 0
# ------------------------------------------------------------------ DIFF REMOVE NOISE END
# ------------------------------------------------------------------ FIND GROUPS START
start_of_peak = 0
item_counter = 0
prev_item = 0
for idx, item in enumerate(data_diff):
if prev_item == 0 and item < 0: # START OF PEAK
start_of_peak = idx
elif prev_item < 0 and item < 0: # MIDDLE OF PEAK
pass
elif prev_item == 0 and item == 0: # NO PEAK
# Count items
item_counter += 1
elif prev_item < 0 and item == 0: # END OF PEAK
# Check length of items between previouse peak
if item_counter < 10 and len(roi_list) > 0:
roi_list[-1]["end"] = idx
roi_list[-1]["data"] = data_diff[roi_list[-1]["start"]:idx]
else:
roi_list.append({
"start": start_of_peak,
"end": idx,
"data": data_diff[start_of_peak:idx]
})
# Start items counter
item_counter = 0
else:
# ANY OTHER CASE
pass
prev_item = item
return roi_list
if history == None:
return roi_list[-1]
if history["start"] != roi_list[-1]["start"] and history["end"] != roi_list[-1]["end"]:
return roi_list[-1]
else:
return None
# ------------------------------------------------------------------ FIND GROUPS END
def CompareRoi(a, b):
pass
def main():
parser = argparse.ArgumentParser(description='Stock DB Creator')
parser.add_argument('--update', action='store_true', help='Update DB')
parser.add_argument('--ticker', action='store', dest='ticker', help='Calculate single ticker')
parser.add_argument('--plot', action='store_true', help='Update DB')
args = parser.parse_args()
if args.ticker is not None:
x_buff = []
y_buff = []
ticker = args.ticker
data = Get5D(ticker)
# Build y scale (close data)
for idx, item in enumerate(data):
y_buff.append(item["close"])
y_buff, x_buff = MAVG(y_buff, 10)
rois = None
rois_map = {}
window_size = 96 # hours
w_start = 0
w_end = window_size
# print(len(y_buff), y_buff)
while len(y_buff) - 1 != w_end:
# print(w_start, w_end, len(y_buff))
rois = GetIntersetPoints(y_buff[w_start:w_end])
if len(rois) > 0:
key = w_start+rois[-1]["end"]
rois_map[key] = 1
w_start += 1
w_end += 1
pmin, pmax = FindBufferMaxMin(y_buff)
avg = (pmax + pmin) / 2
scale_const = (pmax - pmin) / 12
y1 = [pmin] * len(x_buff)
for idx in rois_map:
y1[idx] = pmin + scale_const
print(y1)
fig, (ax1) = plt.subplots(1, 1)
fig.subplots_adjust(hspace=0.5)
ax1.plot(x_buff, y_buff, color='green')
ax1.plot(x_buff, y1, color='red')
plt.show()
if __name__ == "__main__":
main()
|
from datetime import datetime
from time import sleep
from celery import shared_task
from celery.exceptions import Reject
from django.db import connections
from django.db.models import Prefetch
from django.utils import timezone
from elasticsearch import ElasticsearchException
from elasticsearch.helpers import BulkIndexError
from urllib3.exceptions import ReadTimeoutError
from capapi.documents import CaseDocument
from scripts.helpers import ordered_query_iterator
from capdb.models import *
### HELPERS ###
def run_task_for_volumes(task, volumes=None, last_run_before=None, synchronous=False, **kwargs):
"""
Run the given celery task for the given queryset of volumes, or all volumes if not specified.
If last_run_before is provided as an ISO timestamp, volumes will only be run if volume.task_statuses indicates that
the task has not succeeded after that time.
"""
if volumes is None:
volumes = VolumeMetadata.objects.all()
if last_run_before:
volumes = volumes.exclude(**{
"task_statuses__%s__success" % task.name: True,
"task_statuses__%s__timestamp__gte" % task.name: last_run_before
})
for volume_id in volumes.values_list('pk', flat=True):
task.delay(volume_id, **kwargs)
@contextmanager
def record_task_status_for_volume(task, volume_id):
"""
Context manager to record in volume.task_statuses whether the given task succeeds or fails.
"""
try:
yield
except Exception as e:
volume = VolumeMetadata.objects.get(pk=volume_id)
volume.task_statuses[task.name] = {
'timestamp': timezone.now().isoformat(),
'error': str(e),
}
volume.save()
raise
else:
volume = VolumeMetadata.objects.get(pk=volume_id)
volume.task_statuses[task.name] = {
'timestamp': timezone.now().isoformat(),
'success': True,
}
volume.save()
### TASKS ###
@shared_task(bind=True, acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def update_in_scope_for_vol(self, volume_id):
"""
Call .update_in_scope() for all cases in given volume.
"""
with record_task_status_for_volume(self, volume_id):
CaseMetadata.objects.filter(volume_id=volume_id).update_in_scope()
@shared_task(bind=True, acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def update_elasticsearch_for_vol(self, volume_id):
"""
Index all cases for given volume with elasticsearch.
"""
with record_task_status_for_volume(self, volume_id):
# fetch cases
cases = (CaseMetadata.objects
.in_scope()
.filter(volume_id=volume_id)
.select_related('volume', 'reporter', 'court', 'jurisdiction', 'body_cache')
.exclude(body_cache=None))
# attempt to store 10 times, with linearly increasing backoff. this gives time for the bulk queue to be processed
# if necessary (in which case we'll get BulkIndexError with error 429, too many requests).
for i in range(10):
try:
CaseDocument().update(cases)
VolumeMetadata.objects.filter(pk=volume_id).update(last_es_index=timezone.now())
return
except (ElasticsearchException, ReadTimeoutError) as e:
if i == 9:
# If all 10 requests fail, re-add job to the back of the queue
if type(e) == BulkIndexError:
# delete submitted data from BulkIndexError, because otherwise error messages are too large to store
for item in e.args[1]:
for v in item.values():
v['data'] = '[data omitted]'
raise Reject('Bulk indexing of volume %s failed: %s' % (volume_id, e), requeue=True)
sleep(i)
@shared_task(bind=True, acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def sync_from_initial_metadata_for_vol(self, volume_id, force):
"""
call sync_from_initial_metadata on cases in given volume
"""
with record_task_status_for_volume(self, volume_id):
cases = (CaseMetadata.objects
.filter(volume_id=volume_id)
.select_related('structure', 'initial_metadata', 'volume')
.exclude(initial_metadata=None)
.exclude(structure=None))
for c in cases:
c.sync_from_initial_metadata(force=force)
@shared_task
def sync_case_body_cache_for_vol(volume_id, rerender=True):
"""
call sync_case_body_cache on cases in given volume
"""
volume = VolumeMetadata.objects.get(pk=volume_id)
pages = list(volume.page_structures.all())
blocks_by_id = PageStructure.blocks_by_id(pages)
fonts_by_id = CaseFont.fonts_by_id(blocks_by_id)
labels_by_block_id = PageStructure.labels_by_block_id(pages)
query = volume.case_metadatas\
.select_related('structure', 'body_cache')\
.defer('body_cache__html', 'body_cache__xml', 'body_cache__text', 'body_cache__json')
for case_metadata in query:
case_metadata.sync_case_body_cache(blocks_by_id, fonts_by_id, labels_by_block_id, rerender=rerender)
def create_case_metadata_from_all_vols(update_existing=False):
"""
iterate through all volumes, call celery task for each volume
"""
query = VolumeXML.objects.all()
# if not updating existing, then only launch jobs for volumes with unindexed cases:
if not update_existing:
query = query.filter(case_xmls__metadata_id=None).distinct()
# launch a job for each volume:
for volume_id in query.values_list('pk', flat=True):
create_case_metadata_from_vol.delay(volume_id, update_existing=update_existing)
@shared_task
def create_case_metadata_from_vol(volume_id, update_existing=False):
"""
create or update cases for each volume
"""
case_xmls = CaseXML.objects\
.filter(volume_id=volume_id)\
.select_related('metadata', 'volume__metadata__reporter')\
.defer('orig_xml', 'volume__orig_xml')
if not update_existing:
case_xmls = case_xmls.filter(metadata_id=None)
for case_xml in case_xmls:
case_xml.create_or_update_metadata(update_existing=update_existing)
@shared_task
def update_volume_metadata(volume_xml_id):
VolumeXML.objects.get(pk=volume_xml_id).update_metadata()
@shared_task
def test_slow(i, ram=10, cpu=30):
"""
Allocate `ram` megabytes of ram and run `cpu` million additions.
"""
print("Task %s" % i)
# waste 0-ram MB of RAM
waste_ram = bytearray(2**20 * ram) # noqa
# waste CPU
total = 0
for i in range(cpu * 1000000):
total += i
@shared_task
@transaction.atomic
def fix_md5_column(volume_id):
"""
Our database has xml fields in the casexml and pagexml tables that are missing the <?xml> declaration, and that also don't have the md5 column filled.
Here we update all the xml fields to add the <?xml> declaration and md5 hash.
"""
with connections['capdb'].cursor() as cursor:
new_xml_sql = "E'<?xml version=''1.0'' encoding=''utf-8''?>\n' || orig_xml"
for table in ('capdb_casexml', 'capdb_pagexml'):
print("Volume %s: updating %s" % (volume_id, table))
update_sql = "UPDATE %(table)s SET orig_xml=xmlparse(CONTENT %(new_xml)s), md5=md5(%(new_xml)s) where volume_id = %%s and md5 is null" % {'table':table, 'new_xml':new_xml_sql}
cursor.execute(update_sql, [volume_id])
@shared_task
def get_reporter_count_for_jur(jurisdiction_id):
"""
Count reporters through the years per jurisdiction. Include totals.
"""
if not jurisdiction_id:
print('Must provide jurisdiction id')
return
with connections['capdb'].cursor() as cursor:
cursor.execute("select r.id, r.start_year, r.full_name, r.volume_count from capdb_reporter r join capdb_reporter_jurisdictions j on (r.id = j.reporter_id) where j.jurisdiction_id=%s order by r.start_year;" % jurisdiction_id)
db_results = cursor.fetchall()
results = {
'total': 0,
'years': {},
'firsts': {
'name': '',
'id': ''
}
}
try:
results['firsts']['name'] = db_results[0][2]
results['firsts']['id'] = db_results[0][0]
except IndexError:
pass
for res in db_results:
rep_id, start_year, full_name, volume_count = res
if start_year in results:
results['years'][start_year] += 1
else:
results['years'][start_year] = 1
results['total'] += 1
results['recorded'] = str(datetime.now())
return results
@shared_task
def get_case_count_for_jur(jurisdiction_id):
if not jurisdiction_id:
print('Must provide jurisdiction id')
return
with connections['capdb'].cursor() as cursor:
cursor.execute("select extract(year from decision_date)::integer as case_year, count(*) from capdb_casemetadata where duplicative=false and jurisdiction_id=%s group by case_year;" % jurisdiction_id)
db_results = cursor.fetchall()
results = {
'total': 0,
'years': {},
'firsts': {
'name_abbreviation': '',
'name': '',
'id': ''
}
}
first_case = CaseMetadata.objects.filter(jurisdiction_id=jurisdiction_id).order_by('decision_date').first()
if first_case:
results['firsts']['name_abbreviation'] = first_case.name_abbreviation
results['firsts']['name'] = first_case.name
results['firsts']['id'] = first_case.id
for res in db_results:
case_year, count = res
results['years'][case_year] = count
results['total'] += count
results['recorded'] = str(datetime.now())
return results
@shared_task
def get_court_count_for_jur(jurisdiction_id):
if not jurisdiction_id:
print("Must provide jurisdiction id")
return
jur = Jurisdiction.objects.get(id=jurisdiction_id)
results = {
'recorded': str(datetime.now()),
'total': jur.courts.count()
}
return results
def create_case_text_for_all_cases(update_existing=False):
"""
Call create_case_text for each volume
"""
run_task_for_volumes(create_case_text, update_existing=update_existing)
@shared_task
def create_case_text(volume_id, update_existing=False):
"""
Create or update cases for each volume
"""
cases = CaseMetadata.objects \
.in_scope() \
.order_by('id') \
.filter(volume_id=volume_id) \
.select_related('case_xml', 'case_text')
if not update_existing:
cases = cases.filter(case_text=None)
for case in ordered_query_iterator(cases):
case.create_or_update_case_text()
def retrieve_images_from_all_cases(update_existing=False):
"""
Call celery task to get images for each volume
"""
run_task_for_volumes(retrieve_images_from_cases, update_existing=update_existing)
@shared_task(bind=True, acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
@transaction.atomic(using='capdb')
def retrieve_images_from_cases(self, volume_id, update_existing=True):
"""
Create or update case images for each volume
"""
with record_task_status_for_volume(self, volume_id):
cases = CaseMetadata.objects.filter(volume_id=volume_id) \
.only('body_cache__html') \
.order_by('id') \
.select_related('body_cache') \
.prefetch_related(Prefetch('caseimages', queryset=CaseImage.objects.only('hash', 'case'))) \
.exclude(body_cache=None) \
.filter(body_cache__html__contains='<img') \
.select_for_update()
if not update_existing:
cases = cases.exclude(caseimages__isnull=False)
for case in cases:
case.retrieve_and_store_images()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""A script to ensure that our docs are not being utterly neglected."""
import argparse
import os
import sys
IGNORES = {
'pydir': ['tests'],
'pyfile': ['__init__.py'],
'docfile': ['index.rst'],
}
class AddDocIgnores(argparse.Action):
"""Add entries to docfile ignores list."""
def __call__(self, parser, namespace, values, option_string=None):
"""Add entries to docfile ignores list."""
global IGNORES
ignores = values.split(',')
IGNORES['docfile'] += ignores
setattr(namespace, 'doc_ignores', ignores)
class DocParityCheck(object):
"""Ensure proper python module and documentation parity."""
def __init__(self):
self._args = None
@property
def args(self):
"""Parsed command-line arguments."""
if self._args is None:
parser = self._build_parser()
self._args = parser.parse_args()
return self._args
def build_pypackage_basename(self, pytree, base):
"""Build the string representing the parsed package basename.
:param str pytree: The pytree absolute path.
:param str pytree: The absolute path of the pytree sub-package of which determine the
parsed name.
:rtype: str
"""
dirname = os.path.dirname(pytree)
parsed_package_name = base.replace(dirname, '').strip('/')
return parsed_package_name
def _build_parser(self):
"""Build the needed command-line parser."""
parser = argparse.ArgumentParser()
parser.add_argument('--pytree',
required=True,
type=self._valid_directory,
help='This is the path, absolute or relative, of the Python package '
'that is to be parsed.')
parser.add_argument('--doctree',
required=True,
type=self._valid_directory,
help='This is the path, absolute or relative, of the documentation '
'package that is to be parsed.')
parser.add_argument('--no-fail',
action='store_true',
help='Using this option will cause this program to return an exit '
'code of 0 even when the given trees do not match.')
parser.add_argument('--doc-ignores',
action=AddDocIgnores,
help='A comma separated list of additional doc files to ignore')
return parser
def build_rst_name_from_pypath(self, parsed_pypath):
"""Build the expected rst file name based on the parsed Python module path.
:param str parsed_pypath: The parsed Python module path from which to build the expected
rst file name.
:rtype: str
"""
expected_rst_name = parsed_pypath.replace('/', '.').replace('.py', '.rst')
return expected_rst_name
def build_pyfile_path_from_docname(self, docfile):
"""Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
"""
name, ext = os.path.splitext(docfile)
expected_py_name = name.replace('.', '/') + '.py'
return expected_py_name
def calculate_tree_differences(self, pytree, doctree):
"""Calculate the differences between the given trees.
:param dict pytree: The dictionary of the parsed Python tree.
:param dict doctree: The dictionary of the parsed documentation tree.
:rtype: tuple
:returns: A two-tuple of sets, where the first is the missing Python files, and the second
is the missing documentation files.
"""
pykeys = set(pytree.keys())
dockeys = set(doctree.keys())
# Calculate the missing documentation files, if any.
missing_doc_keys = pykeys - dockeys
missing_docs = {pytree[pyfile] for pyfile in missing_doc_keys}
# Calculate the missing Python files, if any.
missing_py_keys = dockeys - pykeys
missing_pys = {docfile for docfile in missing_py_keys}
return missing_pys, missing_docs
def compare_trees(self, parsed_pytree, parsed_doctree):
"""Compare the given parsed trees.
:param dict parsed_pytree: A dictionary representing the parsed Python tree where each
key is a parsed Python file and its key is its expected rst file name.
"""
if parsed_pytree == parsed_doctree:
return 0
missing_pys, missing_docs = self.calculate_tree_differences(pytree=parsed_pytree,
doctree=parsed_doctree)
self.pprint_tree_differences(missing_pys=missing_pys, missing_docs=missing_docs)
return 0 if self.args.no_fail else 1
def _ignore_docfile(self, filename):
"""Test if a documentation filename should be ignored.
:param str filename: The documentation file name to test.
:rtype: bool
"""
if filename in IGNORES['docfile'] or not filename.endswith('.rst'):
return True
return False
def _ignore_pydir(self, basename):
"""Test if a Python directory should be ignored.
:param str filename: The directory name to test.
:rtype: bool
"""
if basename in IGNORES['pydir']:
return True
return False
def _ignore_pyfile(self, filename):
"""Test if a Python filename should be ignored.
:param str filename: The Python file name to test.
:rtype: bool
"""
if filename in IGNORES['pyfile'] or not filename.endswith('.py'):
return True
return False
def parse_doc_tree(self, doctree, pypackages):
"""Parse the given documentation tree.
:param str doctree: The absolute path to the documentation tree which is to be parsed.
:param set pypackages: A set of all Python packages found in the pytree.
:rtype: dict
:returns: A dict where each key is the path of an expected Python module and its value is
the parsed rst module name (relative to the documentation tree).
"""
parsed_doctree = {}
for filename in os.listdir(doctree):
if self._ignore_docfile(filename):
continue
expected_pyfile = self.build_pyfile_path_from_docname(filename)
parsed_doctree[expected_pyfile] = filename
pypackages = {name + '.py' for name in pypackages}
return {elem: parsed_doctree[elem] for elem in parsed_doctree if elem not in pypackages}
def parse_py_tree(self, pytree):
"""Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple
"""
parsed_pytree = {}
pypackages = set()
for base, dirs, files in os.walk(pytree):
if self._ignore_pydir(os.path.basename(base)):
continue
# TODO(Anthony): If this is being run against a Python 3 package, this needs to be
# adapted to account for namespace packages.
elif '__init__.py' not in files:
continue
package_basename = self.build_pypackage_basename(pytree=pytree, base=base)
pypackages.add(package_basename)
for filename in files:
if self._ignore_pyfile(filename):
continue
parsed_path = os.path.join(package_basename, filename)
parsed_pytree[parsed_path] = self.build_rst_name_from_pypath(parsed_path)
return parsed_pytree, pypackages
def pprint_tree_differences(self, missing_pys, missing_docs):
"""Pprint the missing files of each given set.
:param set missing_pys: The set of missing Python files.
:param set missing_docs: The set of missing documentation files.
:rtype: None
"""
if missing_pys:
print('The following Python files appear to be missing:')
for pyfile in missing_pys:
print(pyfile)
print('\n')
if missing_docs:
print('The following documentation files appear to be missing:')
for docfiile in missing_docs:
print(docfiile)
print('\n')
def _valid_directory(self, path):
"""Ensure that the given path is valid.
:param str path: A valid directory path.
:raises: :py:class:`argparse.ArgumentTypeError`
:returns: An absolute directory path.
"""
abspath = os.path.abspath(path)
if not os.path.isdir(abspath):
raise argparse.ArgumentTypeError('Not a valid directory: {}'.format(abspath))
return abspath
def main(self):
"""Parse package trees and report on any discrepancies."""
args = self.args
parsed_pytree, pypackages = self.parse_py_tree(pytree=args.pytree)
parsed_doctree = self.parse_doc_tree(doctree=args.doctree, pypackages=pypackages)
return self.compare_trees(parsed_pytree=parsed_pytree, parsed_doctree=parsed_doctree)
if __name__ == '__main__':
sys.exit(DocParityCheck().main())
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convenience functions for `astropy.cosmology`.
"""
from .core import get_current as _get_current
def kpc_comoving_per_arcmin(z, cosmo=None):
""" Separation in transverse comoving kpc corresponding to an
arcminute at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.kpc_comoving_per_arcmin(z)
def kpc_proper_per_arcmin(z, cosmo=None):
""" Separation in transverse proper kpc corresponding to an
arcminute at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
The distance in proper kpc corresponding to an arcmin at each
input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.kpc_proper_per_arcmin(z)
def arcsec_per_kpc_comoving(z, cosmo=None):
""" Angular separation in arcsec corresponding to a comoving kpc
at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
theta : astropy.units.Quantity
The angular separation in arcsec corresponding to a comoving kpc
at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.arcsec_per_kpc_comoving(z)
def arcsec_per_kpc_proper(z, cosmo=None):
""" Angular separation in arcsec corresponding to a proper kpc at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
theta : astropy.units.Quantity
The angular separation in arcsec corresponding to a proper kpc
at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.arcsec_per_kpc_proper(z)
def distmod(z, cosmo=None):
""" Distance modulus at redshift `z`.
The distance modulus is defined as the (apparent magnitude -
absolute magnitude) for an object at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
distmod : astropy.units.Quantity
Distance modulus at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.distmod(z)
def H(z, cosmo=None):
""" Hubble parameter (km/s/Mpc) at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
H : astropy.units.Quantity
Hubble parameter at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.H(z)
def scale_factor(z, cosmo=None):
""" Scale factor at redshift `z`.
The scale factor is defined as `a = 1 / (1 + z)`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
scalefac : ndarray, or float if input scalar
Scale factor at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.scale_factor(z)
def critical_density(z, cosmo=None):
""" Critical density in grams per cubic cm at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
critdens : astropy.units.Quantity
Critical density at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.critical_density(z)
def lookback_time(z, cosmo=None):
""" Lookback time in Gyr to redshift `z`.
The lookback time is the difference between the age of the
Universe now and the age at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
t : astropy.units.Quantity
Lookback time at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.lookback_time(z)
def comoving_distance(z, cosmo=None):
""" Comoving distance in Mpc at redshift `z`.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
codist : astropy.units.Quantity
Comoving distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.comoving_distance(z)
def angular_diameter_distance(z, cosmo=None):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
angdist : astropy.units.Quantity
Angular diameter distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.angular_diameter_distance(z)
def luminosity_distance(z, cosmo=None):
""" Luminosity distance in Mpc at redshift `z`.
This is the distance to use when converting between the bolometric
flux from an object at redshift `z` and its bolometric luminosity.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
lumdist : astropy.units.Quantity
Luminosity distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.luminosity_distance(z)
|
# Python - 3.6.0
test.assert_equals(areYouPlayingBanjo('martin'), 'martin does not play banjo')
test.assert_equals(areYouPlayingBanjo('Rikke'), 'Rikke plays banjo')
|
# Generated by Django 3.1.4 on 2020-12-30 17:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('folder_name', models.CharField(max_length=512)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='ROI',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roi_id', models.CharField(max_length=255)),
('width', models.IntegerField()),
('height', models.IntegerField()),
('path', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='ImageCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('rois', models.ManyToManyField(related_name='rois', to='core.ROI')),
],
),
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('label', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='core.label')),
('roi', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='core.roi')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import argparse
import os
import random
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from models.retinaface import RetinaFace
from utils.box_utils import decode
from utils.nms.py_cpu_nms import py_cpu_nms
from utils.timer import Timer
parser = argparse.ArgumentParser(description="Retinaface")
parser.add_argument(
"-m",
"--trained_model",
type=str,
help="Trained state_dict file path to open",
)
parser.add_argument(
"--network", default="resnet50", help="Backbone network mobile0.25 or resnet50"
)
parser.add_argument(
"--save_folder",
default="input/detection-results-all",
type=str,
help="Dir to save results",
)
parser.add_argument("--cpu", action="store_true", default=False, help="Use cpu inference")
parser.add_argument("--dataset", default="kouzhao", type=str, help="dataset")
parser.add_argument("--type", default="test", type=str, help="test or val")
parser.add_argument(
"--confidence_threshold", default=0.1, type=float, help="confidence_threshold"
)
parser.add_argument("--top_k", default=5000, type=int, help="top_k")
parser.add_argument("--nms_threshold", default=0.4, type=float, help="nms_threshold")
parser.add_argument("--keep_top_k", default=750, type=int, help="keep_top_k")
parser.add_argument(
"-s",
"--save_image",
action="store_true",
default=False,
help="show detection results",
)
parser.add_argument("--vis_thres", default=0, type=float, help="visualization_threshold")
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print("Missing keys:{}".format(len(missing_keys)))
print("Unused checkpoint keys:{}".format(len(unused_pretrained_keys)))
print("Used keys:{}".format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, "load NONE from pretrained checkpoint"
return True
def remove_prefix(state_dict, prefix):
""" Old style model is stored with all names of parameters sharing common prefix 'module.' """
print("remove prefix '{}'".format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print("Loading pretrained model from {}".format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(
pretrained_path, map_location=lambda storage, loc: storage
)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(
pretrained_path, map_location=lambda storage, loc: storage.cuda(device)
)
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict["state_dict"], "module.")
else:
pretrained_dict = remove_prefix(pretrained_dict, "module.")
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
"""
description: Plots one bounding box on image img,
this function comes from YoLov5 project.
param:
x: a box likes [x1,y1,x2,y2]
img: a opencv image object
color: color to draw rectangle, such as (0,255,0)
label: str
line_thickness: int
return:
no return
"""
tl = (
line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
) # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(
img,
label,
(c1[0], c1[1] - 2),
0,
tl / 3,
[225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA,
)
def preprocess(img, input_h=960, input_w=960):
h, w, _ = img.shape
# Calculate width and height and paddings
r_w = input_w / w
r_h = input_h / h
if r_h > r_w:
tw = input_w
th = int(r_w * h)
tx1 = tx2 = 0
ty1 = int((input_h - th) / 2)
ty2 = input_h - th - ty1
else:
tw = int(r_h * w)
th = input_h
tx1 = int((input_w - tw) / 2)
tx2 = input_w - tw - tx1
ty1 = ty2 = 0
# Resize the image with long side while maintaining ratio
img = cv2.resize(img, (tw, th), interpolation=cv2.INTER_LINEAR)
# Pad the short side with (128,128,128)
img = cv2.copyMakeBorder(
img, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128)
)
img = img.astype(np.float32)
return img
def postprocess(dets, origin_h, origin_w, input_h=960, input_w=960):
r_w = input_w / origin_w
r_h = input_h / origin_h
if r_h > r_w:
dets[:, 1] -= (input_h - r_w * origin_h) / 2
dets[:, 3] -= (input_h - r_w * origin_h) / 2
dets[:, :4] /= r_w
else:
dets[:, 0] -= (input_w - r_h * origin_w) / 2
dets[:, 2] -= (input_w - r_h * origin_w) / 2
dets[:, :4] /= r_h
return dets
if __name__ == "__main__":
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase="test")
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print("Finished loading model!")
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
# save file
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# testing dataset
test_dataset = []
with open(
os.path.join("data", args.dataset, args.type, "labels.txt"), "r", encoding="utf-8"
) as f:
for line in f:
test_dataset.append(
os.path.join(
"data", args.dataset, args.type, "images", line.split("\t")[0]
)
)
num_images = len(test_dataset)
# testing scale
resize = 1
for i, image_path in enumerate(test_dataset):
save_name = os.path.splitext(os.path.basename(image_path))[0] + ".txt"
with open(os.path.join(args.save_folder, save_name), "w", encoding="utf-8") as f:
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
origin_h, origin_w, _ = img_raw.shape
img = preprocess(img_raw)
im_height, im_width, _ = img.shape
scale = torch.Tensor([im_width, im_height, im_width, im_height])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
loc, conf = net(img) # forward pass
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg["variance"])
boxes = boxes * scale
boxes = boxes.cpu().numpy()
conf = conf.squeeze(0).data.cpu().numpy()
face_scores = conf[:, 1]
mask_scores = conf[:, 2]
scores = np.where(face_scores > mask_scores, face_scores, mask_scores)
face_inds = np.where(face_scores > mask_scores, 1, 0)
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
scores = scores[inds]
face_inds = face_inds[inds]
# keep top-K before NMS
# order = scores.argsort()[::-1][:args.top_k]
order = scores.argsort()[::-1]
boxes = boxes[order]
scores = scores[order]
face_inds = face_inds[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False
)
dets = postprocess(dets, origin_h, origin_w)
keep = py_cpu_nms(dets, args.nms_threshold)
dets = dets[keep, :]
face_inds = face_inds[keep]
print("im_detect: {:d}/{:d}".format(i + 1, num_images))
for j, b in enumerate(dets):
if face_inds[j] > 0:
f.write(
"face "
+ str(b[4])
+ " "
+ str(int(b[0]))
+ " "
+ str(int(b[1]))
+ " "
+ str(int(b[2]))
+ " "
+ str(int(b[3]))
+ "\n"
)
else:
f.write(
"mask "
+ str(b[4])
+ " "
+ str(int(b[0]))
+ " "
+ str(int(b[1]))
+ " "
+ str(int(b[2]))
+ " "
+ str(int(b[3]))
+ "\n"
)
# show image
if args.save_image:
for j, b in enumerate(dets):
if b[4] < args.vis_thres:
continue
if face_inds[j] > 0:
# face
text = "face {:.2f}".format(b[4])
plot_one_box(b, img_raw, (0, 0, 255), text)
else:
# mask
text = "mask {:.2f}".format(b[4])
plot_one_box(b, img_raw, (255, 0, 0), text)
# save image
os.makedirs("./results/", exist_ok=True)
name = os.path.join("./results", str(i) + ".jpg")
cv2.imwrite(name, img_raw)
|
from rest_framework.permissions import BasePermission
from acls.business.fields import AclLevelField
from acls.business.permissions import has_object_acl
class CanManageSecretPermission(BasePermission):
def has_object_permission(self, request, view, obj):
if view.action == 'retrieve' or view.action == 'list':
return has_object_acl(request.user, obj.card,
AclLevelField.LEVEL_READ)
else:
# check permission to card
return has_object_acl(request.user, obj.card,
AclLevelField.LEVEL_WRITE)
|
from django.core.mail import EmailMultiAlternatives
from django.test import TestCase
from django.test import override_settings
from ai_django_core.mail.backends.whitelist_smtp import WhitelistEmailBackend
@override_settings(EMAIL_BACKEND='ai_django_core.mail.backends.whitelist_smtp.WhitelistEmailBackend',
EMAIL_BACKEND_DOMAIN_WHITELIST=['valid.domain'],
EMAIL_BACKEND_REDIRECT_ADDRESS='%s@testuser.valid.domain')
class MailBackendWhitelistBackendTest(TestCase):
def test_whitify_mail_addresses_replace(self):
email_1 = 'albertus.magnus@example.com'
email_2 = 'thomas_von_aquin@example.com'
processed_list = WhitelistEmailBackend.whitify_mail_addresses(mail_address_list=[email_1, email_2])
self.assertEqual(len(processed_list), 2)
self.assertEqual(processed_list[0], 'albertus.magnus_example.com@testuser.valid.domain')
self.assertEqual(processed_list[1], 'thomas_von_aquin_example.com@testuser.valid.domain')
def test_whitify_mail_addresses_whitelisted_domain(self):
email = 'platon@valid.domain'
processed_list = WhitelistEmailBackend.whitify_mail_addresses(mail_address_list=[email])
self.assertEqual(len(processed_list), 1)
self.assertEqual(processed_list[0], email)
@override_settings(EMAIL_BACKEND_REDIRECT_ADDRESS='')
def test_whitify_mail_addresses_no_redirect_configured(self):
email = 'sokrates@example.com'
processed_list = WhitelistEmailBackend.whitify_mail_addresses(mail_address_list=[email])
self.assertEqual(len(processed_list), 0)
def test_process_recipients_regular(self):
mail = EmailMultiAlternatives('Test subject', 'Here is the message.', 'from@example.com', ['to@example.com'],
connection=None)
backend = WhitelistEmailBackend()
message_list = backend._process_recipients([mail])
self.assertEqual(len(message_list), 1)
self.assertEqual(message_list[0].to, ['to_example.com@testuser.valid.domain'])
|
import json
import os
import sys
import time
import msvcrt
from pick import pick
from termcolor import cprint, colored
from extractor import Extractor
path_to_config = "./config.json"
def extract_data():
config = get_config(path_to_config)
extractor = Extractor(config, restart)
raw_data = extractor.get_file()
extracted_data = extractor.extract_data(raw_data)
extractor.save_file(extracted_data)
def title_map(option):
try:
title = option["title"]
except:
cprint("Invalid configuration", "red")
return title
def get_config(path_to_config):
if os.path.exists(path_to_config):
with open(path_to_config) as f:
config = json.load(f)
else:
config = []
with open(path_to_config, 'w+') as f:
f.write(json.dumps(config))
if len(config) > 0:
prompt = 'Please choose data to extract:'
(option, _) = pick(config, prompt, indicator="=>",
options_map_func=title_map)
return option
else:
restart(colored('No configurations available!', "red"))
def run_tool():
prompt = 'What would you like to do?'
(option, _) = pick(["Extract Data", "Exit"], prompt, indicator="=>")
if option == "Exit":
sys.exit(colored("Bye!", "green"))
if option == "Extract Data":
extract_data()
def restart(status):
print(status)
print("Press any key to continue...")
msvcrt.getch()
run_tool()
if __name__ == "__main__":
run_tool()
|
from zenoh_service.core.zenoh_native import ZenohNative
import numpy as np
import time
from datetime import datetime
import logging
###
L = logging.getLogger(__name__)
###
class ZenohNativeGet(ZenohNative):
def __init__(self, _listener=None, _mode="peer", _selector=None, _peer=None, _session_type=None,
type_image=False, tagged_image=False):
super().__init__(_listener=_listener, _mode=_mode, _selector=_selector, _peer=_peer, _session_type=_session_type)
self.type_image = type_image
self.tagged_image = tagged_image
def get(self, count=False):
"""
selector: The selection of resources to get.
"""
t0_get = time.time()
if count:
print(">>>> count: {}".format(count))
total = len(self.workspace.get(self.selector))
print("Total data: {}".format(total))
else:
for data in self.workspace.get(self.selector):
print("Data path: {}".format(data.path))
# print(' {} : {} (encoding: {} , timestamp: {})'.format(
# data.path, data.value.get_content(), data.value.encoding_descr(), data.timestamp))
if self.type_image and self.tagged_image:
# data is an encoded image;
print(" --- type: {}; {}; {}".format(
type(data.value.get_content()[0]),
data.value.get_content()[0],
data.value.get_content()[1]
))
# self._extract_normal_img(data.value.get_content())
elif self.type_image and not self.tagged_image:
# data is an encoded image and tagged with extra information;
self._extract_tagged_img(data.value.get_content())
else:
print("Data: {}".format(data.value.get_content()))
t1_get = (time.time() - t0_get) * 1000
L.warning(('\n[%s] Latency get data from Zenoh (%.3f ms) \n' % (datetime.now().strftime("%H:%M:%S"), t1_get)))
def _extract_normal_img(self, img_data):
t0_decoding = time.time()
deserialized_bytes = np.frombuffer(img_data, dtype=np.int8)
t1_decoding = (time.time() - t0_decoding) * 1000
L.warning(
('\n[%s] Latency load ONLY numpy image (%.3f ms) \n' % (datetime.now().strftime("%H:%M:%S"), t1_decoding)))
t0_decoding = time.time()
deserialized_img = np.reshape(deserialized_bytes, newshape=(1080, 1920, 3))
t1_decoding = (time.time() - t0_decoding) * 1000
L.warning(
('\n[%s] Latency reformat image (%.3f ms) \n' % (datetime.now().strftime("%H:%M:%S"), t1_decoding)))
def _extract_tagged_img(self, img_data):
t0_decoding = time.time()
deserialized_bytes = np.frombuffer(img_data, dtype=[('id', 'U10'),
('store_enabled', '?'),
('timestamp', 'f'),
('image', [('pixel', 'i')], (1, 6220800))
])
t1_decoding = (time.time() - t0_decoding) * 1000
L.warning(
('\n[%s] Latency deserialized_bytes (%.3f ms) \n' % (datetime.now().strftime("%H:%M:%S"), t1_decoding)))
t0_decoding = time.time()
img_original = deserialized_bytes["image"]["pixel"][0].reshape(1080, 1920, 3)
print(">>> img_original SHAPE:", img_original.shape)
t1_decoding = (time.time() - t0_decoding) * 1000
L.warning(
('\n[%s] Latency reformat image (%.3f ms) \n' % (datetime.now().strftime("%H:%M:%S"), t1_decoding)))
"""
Usage example
---------------
# configure input parameters
selector = "/demo/example/**"
type_image = True
tagged_image = True
z_svc = ZenohNativeGet(
_selector=selector, _session_type="GET",
type_image=type_image, tagged_image=tagged_image
)
z_svc.init_connection()
z_svc.get()
# closing Zenoh subscription & session
z_svc.close_connection()
"""
|
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# This file contains simple test cases that combine various codegen options.
from mlir.sandbox.experts import *
from mlir.sandbox.harness import *
from mlir.sandbox.transforms import *
from .definitions import *
fun_name = 'padded_conv1d_nwc_wcf_main'
op_name = 'linalg.conv_1d_nwc_wcf'
################################################################################
### Compilation strategies.
################################################################################
all_names = ["Conv1DExpert"]
all_experts = [
Tile(fun_name=fun_name,
op_name=op_name,
# N W C KW F
tile_sizes=[1, 8, 32, 1, 8])\
.then(LoweringOnlyExpert(fun_name=fun_name, op_name=op_name))
]
################################################################################
### Problem instantiation
################################################################################
keys = ['N', 'W', 'C', 'KW', 'F', 'WpadL', 'WpadR', 'stride', 'dilation']
# CHECK-NOT: FAILURE
def main():
# Specify default configuration and parse command line.
args = test_argparser(
"padded conv 1d benchmark",
default_n_iters=1,
# N W C KW F WpadL WpadR stride dilation
default_problem_sizes_list=[
[8, 16, 32, 3, 64, 0, 1, 1, 1], \
],
default_expert_list=all_names,
default_dynamic_at_compile_time_list=[],
default_spec_list=[])
test_harness(lambda sizes, t: Padded_Conv1d_NWC_WCF_Problem(
WpadL=sizes['WpadL'],
WpadR=sizes['WpadR'],
stride=sizes['stride'],
dilation=sizes['dilation']), [[np.float32] * 3],
test_sizes(keys, args.problem_sizes_list),
test_experts(all_experts, all_names, args.expert_list),
n_iters=args.n_iters,
function_name=fun_name,
dump_data_to_file=args.dump_data)
if __name__ == '__main__':
main()
|
import asyncio
import collections
import unittest
from unittest import mock
from aiohttp.multidict import CIMultiDict
from aiohttp.web import Request
from aiohttp.protocol import RawRequestMessage, HttpVersion11
from aiohttp import web
class TestHTTPExceptions(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.payload = mock.Mock()
self.transport = mock.Mock()
self.reader = mock.Mock()
self.writer = mock.Mock()
self.writer.drain.return_value = ()
self.buf = b''
self.writer.write.side_effect = self.append
def tearDown(self):
self.loop.close()
def append(self, data):
self.buf += data
def make_request(self, method='GET', path='/', headers=CIMultiDict()):
self.app = mock.Mock()
message = RawRequestMessage(method, path, HttpVersion11, headers,
False, False)
req = Request(self.app, message, self.payload,
self.transport, self.reader, self.writer)
return req
def test_all_http_exceptions_exported(self):
self.assertIn('HTTPException', web.__all__)
for name in dir(web):
if name.startswith('_'):
continue
obj = getattr(web, name)
if isinstance(obj, type) and issubclass(obj, web.HTTPException):
self.assertIn(name, web.__all__)
def test_HTTPOk(self):
req = self.make_request()
resp = web.HTTPOk()
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = self.buf.decode('utf8')
self.assertRegex(txt, ('HTTP/1.1 200 OK\r\n'
'CONTENT-TYPE: text/plain; charset=utf-8\r\n'
'CONTENT-LENGTH: 7\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\n'
'SERVER: .+\r\n\r\n'
'200: OK'))
def test_terminal_classes_has_status_code(self):
terminals = set()
for name in dir(web):
obj = getattr(web, name)
if isinstance(obj, type) and issubclass(obj, web.HTTPException):
terminals.add(obj)
dup = frozenset(terminals)
for cls1 in dup:
for cls2 in dup:
if cls1 in cls2.__bases__:
terminals.discard(cls1)
for cls in terminals:
self.assertIsNotNone(cls.status_code, cls)
codes = collections.Counter(cls.status_code for cls in terminals)
self.assertNotIn(None, codes)
self.assertEqual(1, codes.most_common(1)[0][1])
def test_HTTPFound(self):
req = self.make_request()
resp = web.HTTPFound(location='/redirect')
self.assertEqual('/redirect', resp.location)
self.assertEqual('/redirect', resp.headers['location'])
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = self.buf.decode('utf8')
self.assertRegex(txt, ('HTTP/1.1 302 Found\r\n'
'CONTENT-TYPE: text/plain; charset=utf-8\r\n'
'CONTENT-LENGTH: 10\r\n'
'LOCATION: /redirect\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\n'
'SERVER: .+\r\n\r\n'
'302: Found'))
def test_HTTPFound_empty_location(self):
with self.assertRaises(ValueError):
web.HTTPFound(location='')
with self.assertRaises(ValueError):
web.HTTPFound(location=None)
def test_HTTPMethodNotAllowed(self):
req = self.make_request()
resp = web.HTTPMethodNotAllowed('get', ['POST', 'PUT'])
self.assertEqual('GET', resp.method)
self.assertEqual(['POST', 'PUT'], resp.allowed_methods)
self.assertEqual('POST,PUT', resp.headers['allow'])
self.loop.run_until_complete(resp.prepare(req))
self.loop.run_until_complete(resp.write_eof())
txt = self.buf.decode('utf8')
self.assertRegex(txt, ('HTTP/1.1 405 Method Not Allowed\r\n'
'CONTENT-TYPE: text/plain; charset=utf-8\r\n'
'CONTENT-LENGTH: 23\r\n'
'ALLOW: POST,PUT\r\n'
'CONNECTION: keep-alive\r\n'
'DATE: .+\r\n'
'SERVER: .+\r\n\r\n'
'405: Method Not Allowed'))
def test_override_body_with_text(self):
resp = web.HTTPNotFound(text="Page not found")
self.assertEqual(404, resp.status)
self.assertEqual("Page not found".encode('utf-8'), resp.body)
self.assertEqual("Page not found", resp.text)
self.assertEqual("text/plain", resp.content_type)
self.assertEqual("utf-8", resp.charset)
def test_override_body_with_binary(self):
txt = "<html><body>Page not found</body></html>"
resp = web.HTTPNotFound(body=txt.encode('utf-8'),
content_type="text/html")
self.assertEqual(404, resp.status)
self.assertEqual(txt.encode('utf-8'), resp.body)
self.assertEqual(txt, resp.text)
self.assertEqual("text/html", resp.content_type)
self.assertIsNone(resp.charset)
def test_default_body(self):
resp = web.HTTPOk()
self.assertEqual(b'200: OK', resp.body)
def test_empty_body_204(self):
resp = web.HTTPNoContent()
self.assertIsNone(resp.body)
def test_empty_body_205(self):
resp = web.HTTPNoContent()
self.assertIsNone(resp.body)
def test_empty_body_304(self):
resp = web.HTTPNoContent()
self.assertIsNone(resp.body)
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Saves updated Pfam-A seed 34.0 TFRecords with UniprotKB context sequences.
The new TFRecords will be identical to the original, except for the following:
+ A new 'full_sequence' field will be included, containing the context sequence
from UniprotKB.
+ The fields 'start' and 'end' will be overwritten whenever the values reported
by Pfam-A seed do not match with those computed here (occurs in 11 entries).
+ Pfam-A seed entries that occur multiple times within their context (repeats)
are no longer included (549 entries).
+ Pfam-A seed entries not found within their context (sequence mismatches) are
no longer included (1 entry).
"""
import collections
import json
import os
import time
from typing import Any, Callable, List, Mapping, MutableMapping, Sequence, Union
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from dedal import vocabulary
from dedal.data import specs
flags.DEFINE_string(
'out_dir',
None,
'TFrecords for <split> to be stored in <out_dir>/<task>_with_ctx/<split>.')
flags.DEFINE_string(
'data_dir', None,
'<data_dir>/<task> contains output of create_splits.py.')
flags.DEFINE_string(
'full_sequences_file',
None,
'Path to input csv file produced by the script `save_full_sequences.py`.')
flags.DEFINE_string(
'pfam34_metadata',
None,
'Path to input json file produced by the script `save_to_disk.py`.')
flags.DEFINE_string(
'task',
'iid_ood_clans',
'Task for which to generate TFRecords.')
flags.DEFINE_list(
'splits',
['train', 'iid_validation', 'ood_validation', 'iid_test', 'ood_test'],
'Data split for which to generate TFRecords.')
flags.DEFINE_integer(
'num_shards_train', 30,
'Number of shards for TFRecords in the train split.')
flags.DEFINE_integer(
'num_shards_eval', 5,
'Number of shards for TFRecords in eval splits, i.e. anything not train.')
flags.mark_flag_as_required('out_dir')
flags.mark_flag_as_required('data_dir')
flags.mark_flag_as_required('full_sequences_file')
flags.mark_flag_as_required('pfam34_metadata')
FLAGS = flags.FLAGS
# Preprocessed Pfam-A seed 34.0 config.
INT_KEYS = ('seq_key', 'fam_key', 'cla_key', 'seq_len', 'start', 'end')
CI_KEY = 'ci'
STR_KEYS = ('id', 'ac', 'ss')
# Type aliases
PfamExample = MutableMapping[str, Union[np.ndarray, int, str]]
PfamSplit = List[PfamExample]
UniprotKBContext = Mapping[str, Union[Sequence[int], bool, str]]
def load_pfam_metadata():
"""Loads Pfam-A seed metadata with vocabulary and additional key info."""
with tf.io.gfile.GFile(FLAGS.pfam34_metadata, 'r') as f:
metadata = json.load(f)
metadata['vocab'] = vocabulary.Vocabulary(**metadata['vocab'])
return metadata
def load_pfam_examples(pid_ths):
"""Loads Pfam-A seed TFRecords w/o UniprotKB context for each split."""
logging.info('Loading original Pfam-A seed examples...')
examples_from_split = collections.defaultdict(list)
ci_keys = tuple(f'{CI_KEY}_{pid}' for pid in pid_ths)
ds_loader = specs.make_pfam34_loader(root_dir=FLAGS.data_dir,
sub_dir='',
extra_keys=INT_KEYS + ci_keys+ STR_KEYS,
task=FLAGS.task)
for split in FLAGS.splits:
for ex in ds_loader.load(split).prefetch(tf.data.AUTOTUNE):
ex = tf.nest.map_structure(lambda t: t.numpy(), ex)
for key in STR_KEYS:
ex[key] = ex[key].decode('utf-8')
# Multiple Pfam entries can share the same UniprotKB ID, but are different
# subsequences of that protein.
ex['id'] = f"{ex['id']}/{ex['start']}-{ex['end']}"
examples_from_split[split].append(ex)
logging.info('Found %d Pfam-A seed examples for split %s.',
len(examples_from_split[split]), split)
return examples_from_split
def load_uniprotkb_context():
"""Loads UniprotKB sequences containing Pfam-A seed (sub)sequences."""
logging.info('Loading UniprotKB context from %s...',
FLAGS.full_sequences_file)
context_from_id = {}
with tf.io.gfile.GFile(FLAGS.full_sequences_file, 'r') as f:
_ = f.readline() # Discards CSV header.
for line in f:
pfam_id, uniprot_starts_str, full_sequence = line.strip().split(',')
uniprot_starts = [int(s) if s else -1
for s in uniprot_starts_str.split(';')]
context_from_id[pfam_id] = {'starts': uniprot_starts,
'has_repeats': len(uniprot_starts) > 1,
'mismatched': -1 in uniprot_starts,
'full_sequence': full_sequence}
logging.info('Found %d UniprotKB context entries.', len(context_from_id))
return context_from_id
def add_uniprotkb_context_to_pfam_examples(
pfam_examples_from_split,
uniprotkb_context_from_id,
vocab):
"""Cross-references UniprotKB context sequences with Pfam-A seed examples."""
updated_pfam_examples_from_split = collections.defaultdict(list)
for split, pfam_split in pfam_examples_from_split.items():
removed_pfam_ids = []
for ex in pfam_split:
pfam_id = ex['id']
# Ensures all Pfam-A seed entries were cross-referenced against UniprotKB.
if pfam_id not in uniprotkb_context_from_id:
raise ValueError(
f'Pfam entry {pfam_id} not present in {FLAGS.full_sequences_file}.')
uniprotkb_context = uniprotkb_context_from_id[pfam_id]
# Skips any Pfam entries that occur more than once in their UniprotKB
# context (ambiguous ground-truth alignment) or whose sequences did not
# perfectly match a subsequence in the UniprotKB context.
if uniprotkb_context['has_repeats'] or uniprotkb_context['mismatched']:
removed_pfam_ids.append(pfam_id)
continue
updated_pfam_examples_from_split[split].append(ex)
# Encodes UniprotKB context sequence using the same vocabulary as
# `ex['sequence']`.
ex['full_sequence'] = vocab.encode(uniprotkb_context['full_sequence'])
# Overrides start / end positions for rare cases in which Pfam-A seed
# reports values inconsistent with UniprotKB context.
if ex['start'] != uniprotkb_context['starts'][0]:
logging.info('Overriding start / end for Pfam entry %s.', pfam_id)
ex['start'] = uniprotkb_context['starts'][0]
ex['end'] = ex['start'] + ex['seq_len'] - 1
logging.info(
'%d Pfam entries in %s split removed for repeats / mismatches: %s.',
len(removed_pfam_ids), split, removed_pfam_ids)
return updated_pfam_examples_from_split
def make_serialize_example_fn(pid_ths):
"""Returns fn for serialization of Pfam-A seed examples."""
ci_keys = tuple(f'{CI_KEY}_{pid}' for pid in pid_ths)
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def str_feature(value, encoding='ascii'):
value = bytes(value, encoding=encoding)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_example_fn(ex):
"""Serializes the Pfam-A seed example for TFRecord storage."""
feature = {}
for key in INT_KEYS + ci_keys:
feature[key] = int64_feature([ex[key]])
for key in STR_KEYS:
feature[key] = str_feature(ex[key])
feature['seq'] = int64_feature(ex['sequence'])
feature['full_seq'] = int64_feature(ex['full_sequence'])
proto = tf.train.Example(features=tf.train.Features(feature=feature))
return proto.SerializeToString()
return serialize_example_fn
def split_to_disk(
out_dir,
num_shards,
pfam_split,
serialize_example_fn):
"""Saves new Pfam-A seed 34.0 TFRecords with extra 'full_sequence' field."""
def gen():
for ex in pfam_split:
yield serialize_example_fn(ex)
def reduce_func(key, ds):
filename = tf.strings.join(
[out_dir, '/', tf.strings.as_string(key), '.tfrecords'])
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(ds.map(lambda _, x: x))
return tf.data.Dataset.from_tensors(filename)
ds = tf.data.Dataset.from_generator(
gen, output_signature=tf.TensorSpec(shape=(), dtype=tf.string))
ds = ds.enumerate()
ds = ds.group_by_window(
lambda i, _: i % num_shards, reduce_func, tf.int64.max)
_ = list(ds)
def main(_):
start = time.time()
metadata = load_pfam_metadata()
pfam_examples_from_split = load_pfam_examples(metadata['pid_ths'])
uniprotkb_context_from_id = load_uniprotkb_context()
updated_pfam_examples_from_split = add_uniprotkb_context_to_pfam_examples(
pfam_examples_from_split,
uniprotkb_context_from_id,
metadata['vocab'])
serialize_example_fn = make_serialize_example_fn(metadata['pid_ths'])
for split, pfam_split in updated_pfam_examples_from_split.items():
logging.info('Saving updated TFRecords for split %s...', split)
out_dir = os.path.join(FLAGS.out_dir, f'{FLAGS.task}_with_ctx', split)
tf.io.gfile.makedirs(out_dir)
num_shards = (FLAGS.num_shards_train if split == 'train'
else FLAGS.num_shards_eval)
split_to_disk(out_dir, num_shards, pfam_split, serialize_example_fn)
runtime = time.time() - start
logging.info('Total time elapsed: %.3f seconds.', runtime)
if __name__ == '__main__':
app.run(main)
|
# Copyright (C) 2017 MongoDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Transform idl.syntax trees from the parser into well-defined idl.ast trees."""
from __future__ import absolute_import, print_function, unicode_literals
import re
from typing import Union
from . import ast
from . import bson
from . import errors
from . import syntax
def _validate_single_bson_type(ctxt, idl_type, syntax_type):
# type: (errors.ParserContext, Union[syntax.Type, ast.Field], unicode) -> bool
"""Validate bson serialization type is correct for a type."""
bson_type = idl_type.bson_serialization_type[0]
# Any is only valid if it is the only bson type specified
if bson_type == "any":
return True
if not bson.is_valid_bson_type(bson_type):
ctxt.add_bad_bson_type_error(idl_type, syntax_type, idl_type.name, bson_type)
return False
# Validate bindata_subytpe
if bson_type == "bindata":
subtype = idl_type.bindata_subtype
if subtype is None:
subtype = "<unknown>"
if not bson.is_valid_bindata_subtype(subtype):
ctxt.add_bad_bson_bindata_subtype_value_error(idl_type, syntax_type, idl_type.name,
subtype)
elif idl_type.bindata_subtype is not None:
ctxt.add_bad_bson_bindata_subtype_error(idl_type, syntax_type, idl_type.name, bson_type)
return True
def _validate_bson_types_list(ctxt, idl_type, syntax_type):
# type: (errors.ParserContext, Union[syntax.Type, ast.Field], unicode) -> bool
"""Validate bson serialization type(s) is correct for a type."""
bson_types = idl_type.bson_serialization_type
if len(bson_types) == 1:
return _validate_single_bson_type(ctxt, idl_type, syntax_type)
for bson_type in bson_types:
if bson_type == "any":
ctxt.add_bad_any_type_use_error(idl_type, syntax_type, idl_type.name)
return False
if not bson.is_valid_bson_type(bson_type):
ctxt.add_bad_bson_type_error(idl_type, syntax_type, idl_type.name, bson_type)
return False
# V1 restiction: cannot mix bindata into list of types
if bson_type == "bindata":
ctxt.add_bad_bson_type_error(idl_type, syntax_type, idl_type.name, bson_type)
return False
# Cannot mix non-scalar types into the list of types
if not bson.is_scalar_bson_type(bson_type):
ctxt.add_bad_bson_scalar_type_error(idl_type, syntax_type, idl_type.name, bson_type)
return False
return True
def _validate_type(ctxt, idl_type):
# type: (errors.ParserContext, syntax.Type) -> None
"""Validate each type is correct."""
# Validate naming restrictions
if idl_type.name.startswith("array<"):
ctxt.add_array_not_valid_error(idl_type, "type", idl_type.name)
_validate_type_properties(ctxt, idl_type, 'type')
def _validate_cpp_type(ctxt, idl_type, syntax_type):
# type: (errors.ParserContext, Union[syntax.Type, ast.Field], unicode) -> None
"""Validate the cpp_type is correct."""
# Validate cpp_type
# Do not allow StringData, use std::string instead.
if "StringData" in idl_type.cpp_type:
ctxt.add_no_string_data_error(idl_type, syntax_type, idl_type.name)
# We do not support C++ char and float types for style reasons
if idl_type.cpp_type in ['char', 'wchar_t', 'char16_t', 'char32_t', 'float']:
ctxt.add_bad_cpp_numeric_type_use_error(idl_type, syntax_type, idl_type.name,
idl_type.cpp_type)
# We do not support C++ builtin integer for style reasons
for numeric_word in ['signed', "unsigned", "int", "long", "short"]:
if re.search(r'\b%s\b' % (numeric_word), idl_type.cpp_type):
ctxt.add_bad_cpp_numeric_type_use_error(idl_type, syntax_type, idl_type.name,
idl_type.cpp_type)
# Return early so we only throw one error for types like "signed short int"
return
# Check for std fixed integer types which are allowed
if idl_type.cpp_type in ["std::int32_t", "std::int64_t", "std::uint32_t", "std::uint64_t"]:
return
# Check for std fixed integer types which are not allowed. These are not allowed even if they
# have the "std::" prefix.
for std_numeric_type in [
"int8_t", "int16_t", "int32_t", "int64_t", "uint8_t", "uint16_t", "uint32_t", "uint64_t"
]:
if std_numeric_type in idl_type.cpp_type:
ctxt.add_bad_cpp_numeric_type_use_error(idl_type, syntax_type, idl_type.name,
idl_type.cpp_type)
return
def _validate_type_properties(ctxt, idl_type, syntax_type):
# type: (errors.ParserContext, Union[syntax.Type, ast.Field], unicode) -> None
"""Validate each type or field is correct."""
# Validate bson type restrictions
if not _validate_bson_types_list(ctxt, idl_type, syntax_type):
return
if len(idl_type.bson_serialization_type) == 1:
bson_type = idl_type.bson_serialization_type[0]
if bson_type == "any":
# For any, a deserialer is required but the user can try to get away with the default
# serialization for their C++ type.
if idl_type.deserializer is None:
ctxt.add_missing_ast_required_field_error(idl_type, syntax_type, idl_type.name,
"deserializer")
elif bson_type == "string":
# Strings support custom serialization unlike other non-object scalar types
if idl_type.deserializer is None:
ctxt.add_missing_ast_required_field_error(idl_type, syntax_type, idl_type.name,
"deserializer")
elif not bson_type in ["object"]:
if idl_type.deserializer is None:
ctxt.add_missing_ast_required_field_error(idl_type, syntax_type, idl_type.name,
"deserializer")
if idl_type.deserializer is not None and "BSONElement" not in idl_type.deserializer:
ctxt.add_not_custom_scalar_serialization_not_supported_error(
idl_type, syntax_type, idl_type.name, bson_type)
if idl_type.serializer is not None:
ctxt.add_not_custom_scalar_serialization_not_supported_error(
idl_type, syntax_type, idl_type.name, bson_type)
else:
# Now, this is a list of scalar types
if idl_type.deserializer is None:
ctxt.add_missing_ast_required_field_error(idl_type, syntax_type, idl_type.name,
"deserializer")
_validate_cpp_type(ctxt, idl_type, syntax_type)
def _validate_types(ctxt, parsed_spec):
# type: (errors.ParserContext, syntax.IDLSpec) -> None
"""Validate all types are correct."""
for idl_type in parsed_spec.symbols.types:
_validate_type(ctxt, idl_type)
def _bind_struct(ctxt, parsed_spec, struct):
# type: (errors.ParserContext, syntax.IDLSpec, syntax.Struct) -> ast.Struct
"""
Bind a struct.
- Validating a struct and fields.
- Create the idl.ast version from the idl.syntax tree.
"""
ast_struct = ast.Struct(struct.file_name, struct.line, struct.column)
ast_struct.name = struct.name
ast_struct.description = struct.description
ast_struct.strict = struct.strict
# Validate naming restrictions
if ast_struct.name.startswith("array<"):
ctxt.add_array_not_valid_error(ast_struct, "struct", ast_struct.name)
for field in struct.fields:
ast_field = _bind_field(ctxt, parsed_spec, field)
if ast_field:
ast_struct.fields.append(ast_field)
return ast_struct
def _validate_ignored_field(ctxt, field):
# type: (errors.ParserContext, syntax.Field) -> None
"""Validate that for ignored fields, no other properties are set."""
if field.optional:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, "optional")
if field.default is not None:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, "default")
def _validate_field_of_type_struct(ctxt, field):
# type: (errors.ParserContext, syntax.Field) -> None
"""Validate that for fields with a type of struct, no other properties are set."""
if field.default is not None:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, "default")
def _bind_field(ctxt, parsed_spec, field):
# type: (errors.ParserContext, syntax.IDLSpec, syntax.Field) -> ast.Field
"""
Bind a field from the idl.syntax tree.
- Create the idl.ast version from the idl.syntax tree.
- Validate the resulting type is correct.
"""
ast_field = ast.Field(field.file_name, field.line, field.column)
ast_field.name = field.name
ast_field.description = field.description
ast_field.optional = field.optional
ast_field.cpp_name = field.name
if field.cpp_name:
ast_field.cpp_name = field.cpp_name
# Validate naming restrictions
if ast_field.name.startswith("array<"):
ctxt.add_array_not_valid_error(ast_field, "field", ast_field.name)
if field.ignore:
ast_field.ignore = field.ignore
_validate_ignored_field(ctxt, field)
return ast_field
(struct, idltype) = parsed_spec.symbols.resolve_field_type(ctxt, field)
if not struct and not idltype:
return None
# If the field type is an array, mark the AST version as such.
if syntax.parse_array_type(field.type):
ast_field.array = True
if field.default or (idltype and idltype.default):
ctxt.add_array_no_default(field, field.name)
# Copy over only the needed information if this a struct or a type
if struct:
ast_field.struct_type = struct.name
ast_field.bson_serialization_type = ["object"]
_validate_field_of_type_struct(ctxt, field)
else:
# Produce the union of type information for the type and this field.
# Copy over the type fields first
ast_field.cpp_type = idltype.cpp_type
ast_field.bson_serialization_type = idltype.bson_serialization_type
ast_field.bindata_subtype = idltype.bindata_subtype
ast_field.serializer = idltype.serializer
ast_field.deserializer = idltype.deserializer
ast_field.default = idltype.default
if field.default:
ast_field.default = field.default
# Validate merged type
_validate_type_properties(ctxt, ast_field, "field")
return ast_field
def _bind_globals(parsed_spec):
# type: (syntax.IDLSpec) -> ast.Global
"""Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy."""
if parsed_spec.globals:
ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line,
parsed_spec.globals.column)
ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace
ast_global.cpp_includes = parsed_spec.globals.cpp_includes
else:
ast_global = ast.Global("<implicit>", 0, 0)
# If no namespace has been set, default it do "mongo"
ast_global.cpp_namespace = "mongo"
return ast_global
def bind(parsed_spec):
# type: (syntax.IDLSpec) -> ast.IDLBoundSpec
"""Read an idl.syntax, create an idl.ast tree, and validate the final IDL Specification."""
ctxt = errors.ParserContext("unknown", errors.ParserErrorCollection())
bound_spec = ast.IDLAST()
bound_spec.globals = _bind_globals(parsed_spec)
_validate_types(ctxt, parsed_spec)
for struct in parsed_spec.symbols.structs:
if not struct.imported:
bound_spec.structs.append(_bind_struct(ctxt, parsed_spec, struct))
if ctxt.errors.has_errors():
return ast.IDLBoundSpec(None, ctxt.errors)
else:
return ast.IDLBoundSpec(bound_spec, None)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:20:45 2019
@author: Sturla
"""
import numpy as np
import matplotlib.pyplot as plt
days = 59 #Days between 1.jan - 1.mars
lambd = 1.5 #The parameter in the poisson process
def sim_poisson():
'''Simulates the poisson process'''
process_time_steps = []
value = 0
for i in range(days):
value += np.random.poisson(lambd)
process_time_steps.append(value)
return np.array(process_time_steps)
#Simulate the poisson process 10 times
for i in range(10):
plt.plot(sim_poisson())
plt.title("10 realizations of the Poisson Process", fontdict={'fontname': 'Times New Roman', 'fontsize': 21}, y=1.03)
plt.ylabel('Number of claims')
plt.xlabel('Time (days)')
plt.ylim(0)
plt.xlim(0)
plt.show()
def count_large():
'''
Counts how many times the process reaches above 100 in 1000 iterations,
and returns the percentage of times this happens.
'''
count = 0
for i in range(1000):
sim = sim_poisson()
sim_max = sim[-1]
if sim_max > 100:
count += 1
return float(count)/1000
print("The precentage of iterations larger than 100 is: " + str(count_large()))
def expected_claim_and_var():
'''Returns the excpected number of claims'''
realization = []
for i in range(1000):
time_steps = sim_poisson()
last_time_step = time_steps[-1]
Z = 0
for j in range(last_time_step):
C = np.random.exponential(1/10)
Z += C
realization.append(Z)
return np.average(realization), np.var(realization)
excepcted_claims_value, estimated_variance = expected_claim_and_var()
print("The excpected value of the claims is: " + str(excepcted_claims_value))
print("The estimated variance of the claims is: " + str(estimated_variance))
|
# Generated by Django 2.2.8 on 2019-12-14 06:50
from django.db import migrations
from ontask import models
def change_action_types(apps, schema_editor):
"""Change the action type for some of the actions."""
Old_Action = apps.get_model('ontask', 'Action')
for action in Old_Action.objects.all():
if action.action_type == 'send_list':
action.action_type = models.Action.EMAIL_LIST
action.save()
continue
if action.action_type == 'send_list_json':
action.action_type = models.Action.JSON_LIST
action.save()
def change_log_types(apps, schema_editor):
"""Change the log type for some of the logs"""
Old_Log = apps.get_model('ontask', 'Log')
for log_item in Old_Log.objects.all():
if log_item.name == 'action_run_list':
log_item.name = models.Log.ACTION_RUN_JSON_LIST
log_item.save()
continue
if log_item.name == 'action_runjson':
log_item.name = models.Log.ACTION_RUN_PERSONALIZED_JSON
log_item.save()
continue
if log_item.name == 'action_run_send_list':
log_item.name = models.Log.ACTION_RUN_EMAIL_LIST
log_item.save()
class Migration(migrations.Migration):
dependencies = [
('ontask', '0015_auto_20191214_1720'),
]
operations = [
migrations.RunPython(code=change_action_types),
migrations.RunPython(code=change_log_types),
]
|
#!/usr/bin/env python3
from wsgiref.simple_server import make_server
def page(content, *args):
yield b'<html><head><title>wsgi_example.py</title></head><body>'
yield (content % args).encode('utf-8')
yield b'</body></html>'
def application(environ, start_response):
if environ['PATH_INFO'] == '/':
response = "<p>This is my web page built with python wsgi</p>"
start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])
return page(response)
elif environ['PATH_INFO'] == '/operation':
print('environ["QUERY_STRING"]:',environ["QUERY_STRING"])
params = environ["QUERY_STRING"].split("&")
print('Parameters ',params)
operator1 = params[0].split("=")[1]
print('Operator 1:',operator1)
operator2 = params[1].split("=")[1]
print('Operator 2:',operator2)
operation = params[2].split("=")[1]
print('Operation:',operation)
result = str(eval(operator1+operation+operator2))
print('Result:',result)
response = "<p>The operation result is %s</p>" %result
start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])
return page(response)
else:
response = "<p>This URL is not valid</p>"
start_response('404 Not Found', [('Content-Type', 'text/html; charset=utf-8')])
return page(response)
if __name__ == '__main__':
print('Listening on localhost:8080')
server = make_server('localhost', 8080, application)
server.serve_forever()
|
load("@rules_foreign_cc//tools/build_defs/shell_toolchain/toolchains:toolchain_mappings.bzl", "ToolchainMapping")
ADD_TOOLCHAIN_MAPPINGS = [
ToolchainMapping(
exec_compatible_with = [
"@rules_foreign_cc_toolchains_examples//:fancy_constraint_value",
],
file = "@rules_foreign_cc_toolchains_examples//:fancy_platform_commands.bzl",
),
]
|
import json, subprocess
from .. pyaz_utils import get_cli_name, get_params
def create(resource_group, name, publisher_email, sku_name=None, sku_capacity=None, virtual_network=None, enable_managed_identity=None, enable_client_certificate=None, publisher_name, location=None, tags=None, no_wait=None):
params = get_params(locals())
command = "az apim create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, name):
params = get_params(locals())
command = "az apim show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group=None):
params = get_params(locals())
command = "az apim list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, name, yes=None, no_wait=None):
params = get_params(locals())
command = "az apim delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, name, publisher_email=None, sku_name=None, sku_capacity=None, virtual_network=None, publisher_name=None, enable_managed_identity=None, enable_client_certificate=None, tags=None, set=None, add=None, remove=None, force_string=None, no_wait=None):
params = get_params(locals())
command = "az apim update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def check_name(name):
params = get_params(locals())
command = "az apim check-name " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def backup(resource_group, name, backup_name, storage_account_name, storage_account_container, storage_account_key, no_wait=None):
params = get_params(locals())
command = "az apim backup " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def restore(resource_group, name, backup_name, storage_account_name, storage_account_container, storage_account_key, no_wait=None):
params = get_params(locals())
command = "az apim restore " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def apply_network_updates(resource_group, name, location=None, no_wait=None):
params = get_params(locals())
command = "az apim apply-network-updates " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def wait(resource_group, name, timeout=None, interval=None, deleted=None, created=None, updated=None, exists=None, custom=None):
params = get_params(locals())
command = "az apim wait " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
|
import sys
from .antlr import EOF, CommonToken as Tok, TokenStream, TokenStreamException
import struct
from . import ExcelFormulaParser
from re import compile as recompile, match, LOCALE, UNICODE, IGNORECASE, VERBOSE
int_const_pattern = r"\d+\b"
flt_const_pattern = r"""
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \. ) # 1. 12. 123. etc
)
# followed by optional exponent part
(?: [Ee] [+-]? \d+ ) ?
"""
str_const_pattern = r'"(?:[^"]|"")*"'
#range2d_pattern = recompile(r"\$?[A-I]?[A-Z]\$?\d+:\$?[A-I]?[A-Z]\$?\d+"
ref2d_r1c1_pattern = r"[Rr]0*[1-9][0-9]*[Cc]0*[1-9][0-9]*"
ref2d_pattern = r"\$?[A-I]?[A-Z]\$?0*[1-9][0-9]*"
true_pattern = r"TRUE\b"
false_pattern = r"FALSE\b"
if_pattern = r"IF\b"
choose_pattern = r"CHOOSE\b"
name_pattern = r"\w[\.\w]*"
quotename_pattern = r"'(?:[^']|'')*'" #### It's essential that this bracket be non-grouping.
ne_pattern = r"<>"
ge_pattern = r">="
le_pattern = r"<="
pattern_type_tuples = (
(flt_const_pattern, ExcelFormulaParser.NUM_CONST),
(int_const_pattern, ExcelFormulaParser.INT_CONST),
(str_const_pattern, ExcelFormulaParser.STR_CONST),
# (range2d_pattern , ExcelFormulaParser.RANGE2D),
(ref2d_r1c1_pattern, ExcelFormulaParser.REF2D_R1C1),
(ref2d_pattern , ExcelFormulaParser.REF2D),
(true_pattern , ExcelFormulaParser.TRUE_CONST),
(false_pattern , ExcelFormulaParser.FALSE_CONST),
(if_pattern , ExcelFormulaParser.FUNC_IF),
(choose_pattern , ExcelFormulaParser.FUNC_CHOOSE),
(name_pattern , ExcelFormulaParser.NAME),
(quotename_pattern, ExcelFormulaParser.QUOTENAME),
(ne_pattern, ExcelFormulaParser.NE),
(ge_pattern, ExcelFormulaParser.GE),
(le_pattern, ExcelFormulaParser.LE),
)
_re = recompile(
'(' + ')|('.join([i[0] for i in pattern_type_tuples]) + ')',
VERBOSE+LOCALE+IGNORECASE)
_toktype = [None] + [i[1] for i in pattern_type_tuples]
# need dummy at start because re.MatchObject.lastindex counts from 1
single_char_lookup = {
'=': ExcelFormulaParser.EQ,
'<': ExcelFormulaParser.LT,
'>': ExcelFormulaParser.GT,
'+': ExcelFormulaParser.ADD,
'-': ExcelFormulaParser.SUB,
'*': ExcelFormulaParser.MUL,
'/': ExcelFormulaParser.DIV,
':': ExcelFormulaParser.COLON,
';': ExcelFormulaParser.SEMICOLON,
',': ExcelFormulaParser.COMMA,
'(': ExcelFormulaParser.LP,
')': ExcelFormulaParser.RP,
'&': ExcelFormulaParser.CONCAT,
'%': ExcelFormulaParser.PERCENT,
'^': ExcelFormulaParser.POWER,
'!': ExcelFormulaParser.BANG,
}
class Lexer(TokenStream):
def __init__(self, text):
self._text = text[:]
self._pos = 0
self._line = 0
def isEOF(self):
return len(self._text) <= self._pos
def curr_ch(self):
return self._text[self._pos]
def next_ch(self, n = 1):
self._pos += n
def is_whitespace(self):
return self.curr_ch() in " \t\n\r\f\v"
def match_pattern(self):
m = _re.match(self._text, self._pos)
if not m:
return None
self._pos = m.end(0)
return Tok(type = _toktype[m.lastindex], text = m.group(0), col = m.start(0) + 1)
def nextToken(self):
# skip whitespace
while not self.isEOF() and self.is_whitespace():
self.next_ch()
if self.isEOF():
return Tok(type = EOF)
# first, try to match token with 2 or more chars
t = self.match_pattern()
if t:
return t
# second, we want 1-char tokens
te = self.curr_ch()
try:
ty = single_char_lookup[te]
except KeyError:
raise TokenStreamException(
"Unexpected char %r in column %u." % (self.curr_ch(), self._pos))
self.next_ch()
return Tok(type=ty, text=te, col=self._pos)
if __name__ == '__main__':
try:
for t in Lexer(""" 1.23 456 "abcd" R2C2 a1 iv65536 true false if choose a_name 'qname' <> >= <= """):
print(t)
except TokenStreamException as e:
print("error:", e)
|
# 202101 - Daniel Meier
import uuid
import requests
import json
import argparse
import logging
import ipaddress
import os
from datetime import datetime
#################################################################################################
# set args #
#################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--outfile', help='Target file name (ie. proxy.pac')
args = parser.parse_args()
#################################################################################################
params={}
pacdata={}
if not args.outfile:
filename=str(datetime.now()).replace(' ','-').replace('.','_')+'.pac'
else:
filename=args.outfile
def fun_setup():
try:
with open('params.json', 'r') as paramsfile:
json.load(paramsfile)
except:
print("parameter file not found or invalid, please define! Downloading template, please adjust!")
with open("params.json", "w") as paramsfile:
tmpl_url = 'https://raw.githubusercontent.com/leinadred/Py-PACBuilder/main/params.json_tmpl'
r = requests.get(tmpl_url, verify=False)
with open("params.json_tmpl","w") as file:
file.write(r.content)
raise ValueError("Downloaded file 'params.json_tmpl' - now you can set your preferencecs.")
else:
print("file found, using it!")
with open("params.json","r") as paramsjson:
params = json.load(paramsjson)
#print(dir(params))
#print(params)
if params["general"]["v_debug"]=='True':
logging.basicConfig(level=logging.DEBUG)
logging.debug("################## Starting - With extended Logging ##################")
logging.debug("################## Setup Done, downloading Feeds ##################")
return params
def fun_downloadfeeds():
params = fun_setup()
n=1
for feeds in params["feeds"]:
print("Feed to work on: "+str(params["feeds"][feeds]["feed_name"])+"...")
logging.debug("################## Downloading from "+str(params["feeds"][feeds]["feed_url"])+" ##################")
#feedcontent = str("feedcontent_"+str(n))
if params["feeds"][feeds]["feed_uuid"]=="True":
feedcontent = requests.get(params["feeds"][feeds]["feed_url"]+params["feeds"][feeds]["feed_guid"], verify=False)
else:
feedcontent = requests.get(params["feeds"][feeds]["feed_url"], verify=False)
feedname=str(params["feeds"][feeds]["feed_name"])
pacdata[feedname]={}
try:
feedcontent.status_code==200
except:
print("Download failed or file could not be parsed! (is it json formatted?)")
raise(SystemExit)
else:
#pacdata[feedname]['ServiceArea']={}
n+=1
fun_extractfromfeed(feedname,feedcontent,params,feeds)
fun_pacbuilding(pacdata,params,filename)
with open (filename,'a+') as file:
file.write('else return "{0}";\n'.format(params['actions'][params['default']['act_todo']]))
file.write(" }")
def fun_extractfromfeed(feedname,feedcontent,params,feeds):
cnt_url=1
cnt_ip4=1
cnt_ip6=1
if params['feeds'][feeds]['feed_format']=='json':
for feedcont_entry in feedcontent.json():
pacdata[feedname]['action']={}
pacdata[feedname]['action']=params["feeds"][feeds]["pac_action"]["act_todo"]
pacdata[feedname][feedcont_entry['serviceArea']]={}
pacdata[feedname][feedcont_entry['serviceArea']]['urls']={}
pacdata[feedname][feedcont_entry['serviceArea']]['ips']={}
pacdata[feedname][feedcont_entry['serviceArea']]['ips']['v4']={}
pacdata[feedname][feedcont_entry['serviceArea']]['ips']['v6']={}
for feedcont_entry in feedcontent.json():
if feedcont_entry['required']==False and not params["feeds"][feeds]['feed_option'] == 'all':
print("Not Required: "+str(pacdata[feedname][feedcont_entry['serviceArea']]))
else:
for fe_key in feedcont_entry.keys():
if fe_key == 'urls':
pacdata[feedname][feedcont_entry['serviceArea']]['urls'][cnt_url]={}
for entry in feedcont_entry['urls']:
pacdata[feedname][feedcont_entry['serviceArea']]['urls'][cnt_url]=entry
cnt_url=cnt_url+1
if fe_key == 'ips':
#pacdata[feedname]['ips'][cnt_ip]={}
for entry in feedcont_entry['ips']:
try:
ipaddress.IPv4Network(entry)
except:
try:
ipaddress.IPv6Network(entry)
except:
print("something weird with address "+entry)
else:
pacdata[feedname][feedcont_entry['serviceArea']]['ips']['v6'][cnt_ip6]=entry
cnt_ip6=cnt_ip6+1
else:
pacdata[feedname][feedcont_entry['serviceArea']]['ips']['v4'][cnt_ip4]=entry
cnt_ip4=cnt_ip4+1
elif params['feeds'][feeds]['feed_format']=='wsa-csv':
cnt_site=1
cnt_ip4=1
cnt_ip6=1
feedcont_entry='from-wsa-csv'
pacdata[feedname]['action']={}
pacdata[feedname]['action']=params["feeds"][feeds]["pac_action"]["act_todo"]
pacdata[feedname][feedcont_entry]={}
pacdata[feedname][feedcont_entry]['ips']={}
pacdata[feedname][feedcont_entry]['urls']={}
pacdata[feedname][feedcont_entry]['ips']['v4']={}
pacdata[feedname][feedcont_entry]['ips']['v6']={}
pacdata[feedname][feedcont_entry]['action']=params['feeds'][feeds]['pac_action']['act_todo']
for destination in feedcontent.text.splitlines():
if destination.split(',')[0][0]=='.':
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination.split(',')[0].replace('.','*.',1)
else:
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination.split(',')[0]
cnt_site+=1
pass
elif params['feeds'][feeds]['feed_format']=='csv':
cnt_site=1
cnt_ip4=1
cnt_ip6=1
feedcont_entry='csv'
pacdata[feedname]['action']={}
pacdata[feedname]['action']=params["feeds"][feeds]["pac_action"]["act_todo"]
pacdata[feedname][feedcont_entry]={}
pacdata[feedname][feedcont_entry]['urls']={}
pacdata[feedname][feedcont_entry]['ips']={}
pacdata[feedname][feedcont_entry]['ips']['v4']={}
pacdata[feedname][feedcont_entry]['ips']['v6']={}
pacdata[feedname][feedcont_entry]['action']=params['feeds'][feeds]['pac_action']['act_todo']
for destination in feedcontent.text.split(','):
if destination=='':print('found empty value, ignoring')
else:
try:
ipaddress.ip_network(destination)
except:
#domainname
if destination[0]=='.':
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination.replace('.','*.',1)
else:
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination
cnt_site+=1
else:
try:
ipaddress.IPv4Network(destination)
except:
try:
ipaddress.IPv6Network(destination)
except:
print("something is wrong with {0}".format(destination))
else:
pacdata[feedname][feedcont_entry['serviceArea']]['ips']['v6'][cnt_ip6]=destination
else:
if destination[0]=='.':
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination.replace('.','*.',1)
else:
pacdata[feedname][feedcont_entry]['urls'][cnt_site]=destination
cnt_site+=1
elif params['feeds'][feedname]['feed_format']=='plain':
pass # FUTURE Build, Plain text or other formats
return pacdata
def fun_pacbuilding(pacdata,params,filename):
#filename=str(datetime.now()).replace(' ','-').replace('.','_')+'.pac'
try:
with open (filename,'r'):
print('file exists')
except:
with open(filename, 'w') as file:
file.write
else:
filename=str(datetime.now()).replace(' ','-').replace('.','_')+'_1.pac'
print('generating new file')
with open (filename,'w') as file:
file.write
print('pacfile all is written to: '+filename)
with open (filename,'a+') as file:
file.write("function FindProxyForURL(url, host) {\n")
#DEBUG
#print(pacdata)
line=''
pacline={}
isfirst=True
for feedname in list(pacdata.keys()): #Feeds
if line =='' and isfirst==True:
line+='if ('
isfirst=False
else:
line+='else if('
for feedservice in list(pacdata[feedname].keys()): #Parts of Feeds, like "Skype","Exchange"
if not feedservice == 'action':
for feeditemtype in pacdata[feedname][feedservice]: # type - URLs/IP
if feeditemtype=='urls':
for feeditem in pacdata[feedname][feedservice][feeditemtype]:
if line =='' and isfirst==True:
line+='if ('
isfirst=False
pacline={}
pacline['targetline']={}
elif line =='':
line+='else if('
pacline['targetline']={}
if pacdata[feedname][feedservice][feeditemtype][feeditem].startswith('*.'):
line='if (shExpMatch(host, "{0}") ||\n'.format(pacdata[feedname][feedservice][feeditemtype][feeditem])
else:
line='if (dnsDomainIs(host, "{0}") ||\n'.format(pacdata[feedname][feedservice][feeditemtype][feeditem])
else:
if pacdata[feedname][feedservice][feeditemtype][feeditem].startswith('*.'):
if feeditem==1:
line+='shExpMatch(host, "{0}") ||\n'.format(pacdata[feedname][feedservice][feeditemtype][feeditem])
else:
line+='\tshExpMatch(host, "{0}") ||\n'.format(pacdata[feedname][feedservice][feeditemtype][feeditem])
else:
line+='\tdnsDomainIs(host, "{0}") ||\n'.format(pacdata[feedname][feedservice][feeditemtype][feeditem])
if feeditemtype=='ips':
for ipver in pacdata[feedname][feedservice][feeditemtype]:
for feeditem in pacdata[feedname][feedservice][feeditemtype][ipver]:
if line =='' and isfirst==True:
line+='if ('
pacline={}
pacline['targetline']={}
elif line =='':
line+='else if (isInNet(host, "{0}", "{1}") || isInNet(dnsResolve(host), "{0}", "{1}") ||\n'.format(str(ipaddress.ip_network(pacdata[feedname][feedservice][feeditemtype][ipver][feeditem]).network_address), str(ipaddress.ip_network(pacdata[feedname][feedservice][feeditemtype][ipver][feeditem]).netmask))
else:
line+='\tisInNet(host, "{0}", "{1}") || isInNet(dnsResolve(host), "{0}", "{1}") ||\n'.format(str(ipaddress.ip_network(pacdata[feedname][feedservice][feeditemtype][ipver][feeditem]).network_address), str(ipaddress.ip_network(pacdata[feedname][feedservice][feeditemtype][ipver][feeditem]).netmask))
line=line[:-3]+')\n\t\t\treturn "{0}";\n'.format(params['actions'][pacdata[feedname]['action']])
pacline['targetline']=line
with open (filename,'a+') as file:
file.write(pacline['targetline'])
line=''
pacline.clear()
for content in list(params['static'].keys()):
if content.startswith('domains_'):
for domainitem in params['static'][content]['destdomains']['names'].split(','):
if line =='' and isfirst==True:
line+='if ('
isfirst=False
elif line =='':
line+='else if('
if domainitem.startswith('*.'):
line+='shExpMatch(host, "{0}") ||\n'.format(domainitem).replace(' ','')
else:
line+='dnsDomainIs(host, "{0}") ||\n'.format(domainitem).replace(' ','')
else:
if domainitem.startswith('*.'):
line+='\tshExpMatch(host, "{0}") ||\n'.format(domainitem).replace(' ','')
else:
line+='\tdnsDomainIs(host, "{0}") ||\n'.format(domainitem).replace(' ','')
line=line[:-3]+')\n\t\t\treturn "{0}";\n'.format(params['actions'][params['static'][content]['pac_action']['act_todo']])
pacline['targetline']=line
with open (filename,'a+') as file:
file.write(pacline['targetline'])
pacline.clear()
line=''
elif content.startswith('ipscopes_'):
for ipver in params['static'][content]:
if not ipver =='pac_action':
for ipitem in params['static'][content][ipver]['networks'].split(','):
if not ipitem=="":
if line =='' and isfirst==True:
line+='if ('
isfirst=False
elif line =='':
line+='else if('
try:
str(ipaddress.ip_network(str([ipitem]).replace('[','').replace(']','').replace('\'','')))
except:
print("ERROR with network "+ipitem)
raise(SystemExit)
line+='isInNet(host, "{0}", "{1}") || isInNet(dnsResolve(host), "{0}", "{1}") ||\n'.format(str(ipaddress.ip_network(str([ipitem]).replace('[','').replace(']','').replace('\'','')).network_address), str(ipaddress.ip_network(str([ipitem]).replace('[','').replace(']','').replace('\'','')).netmask))
else:
line+='\tisInNet(host, "{0}", "{1}") || isInNet(dnsResolve(host), "{0}", "{1}") ||\n'.format(str(ipaddress.ip_network(str([ipitem]).replace('[','').replace(']','').replace('\'','')).network_address), str(ipaddress.ip_network(str([ipitem]).replace('[','').replace(']','').replace('\'','')).netmask))
line=line[:-3]+')\n\t\t\treturn "{0}";\n'.format(params['actions'][params['static'][content]['pac_action']['act_todo']])
pacline['targetline']=line
with open (filename,'a+') as file:
file.write(pacline['targetline'])
pacline.clear()
line=''
if __name__ == "__main__":
fun_downloadfeeds()
|
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import discord
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash import SlashCommand
from discord_slash import SlashContext
from decimal import Decimal
import time
import user_db
import config
import utility
class Claim(commands.Cog):
def __init__(self, bot):
if not hasattr(bot, "slash"):
bot.slash = SlashCommand(bot, override_type=True, auto_register=True, auto_delete=True)
self.bot = bot
self.bot.slash.get_cog_commands(self)
def cog_unload(self):
self.bot.slash.remove_cog_commands(self)
async def _make_claim(self,ctx):
client = AuthServiceProxy(f'http://{config.rpc_user}:{config.rpc_password}@{config.ip}:{config.rpc_port}')
claim = user_db.can_claim(ctx.author.id)
if claim[0]:
if client.getbalance(config.faucet_wallet, config.confirm) > config.faucet:
user_db.update_claim(ctx.author.id)
client.move(config.faucet_wallet, str(ctx.author.id), float(config.faucet))
embed = await utility.make_embed(ctx,self.bot, title=":tada: Congratulation :tada:", color=0x4b8b3b)
embed.add_field(name=f'You got {config.faucet} {config.currency}', value=f'Your balance is now {utility.moneyfmt(client.getbalance(str(ctx.author.id), config.confirm))} {config.currency}')
return embed
else:
return await utility.make_embed(ctx,self.bot, title="Not enough funds", color=0xd0312d)
else:
to_wait = (claim[1] + config.faucet_time) - int(time.time())
return await utility.make_embed(ctx,self.bot, title=f'You have to wait {int(to_wait / 3600):02}:{int(to_wait / 60):02}:{int(to_wait % 60):02}', color=0xd0312d)
@commands.command()
async def claim(self,ctx):
embed = await self._make_claim(ctx)
await ctx.channel.send(embed=embed)
@cog_ext.cog_slash(name="claim", description=f'Claim some Free {config.currency}', guild_ids=config.guilds)
async def claim_slash(self, ctx: SlashContext):
ctx.author = await self.bot.fetch_user(ctx.author)
embed = await self._make_claim(ctx)
await ctx.send(embeds=[embed])
def setup(bot):
bot.add_cog(Claim(bot))
|
from django.core.management.base import BaseCommand
from shost import setup
class Command(BaseCommand):
help = 'Setup the app'
def handle(self, *args, **options):
self.stdout.write('Starting setup...')
setup.setup()
self.stdout.write(self.style.SUCCESS('Setup successful!'))
|
from unittest.mock import MagicMock, PropertyMock
import pytest
from aioddd import (
BadRequestError,
BaseError,
ConflictError,
NotFoundError,
UnauthorizedError,
UnknownError,
)
from aioddd.testing import sanitize_objects
from orjson import loads
from pydantic import BaseModel, ValidationError
from pydantic.error_wrappers import ErrorWrapper
from starlette.datastructures import URL
from starlette.exceptions import HTTPException
from project.apps.api.middleware.exception_handler import exception_handler
@pytest.mark.parametrize(
'code,status,err',
[
('unknown', 500, Exception()),
('unauthorized', 401, HTTPException(401)),
('not_found', 404, HTTPException(404)),
('invalid_request_validation', 400, ValidationError([ErrorWrapper(Exception(), '')], BaseModel)),
('code', 500, BaseError.create()),
('unauthorized', 401, UnauthorizedError.create()),
('not_found', 404, NotFoundError.create()),
('conflict', 409, ConflictError.create()),
('bad_request', 400, BadRequestError.create()),
('unknown', 500, UnknownError.create()),
],
)
def test_exception_handler(code: str, status: int, err: Exception) -> None:
mock_request = MagicMock()
type(mock_request).url = PropertyMock(return_value=URL('/'))
res = exception_handler(mock_request, err, False)
expected = {'code': code, 'status': str(status)}
assert res.status_code == status
actual = loads(res.body)
assert 'errors' in actual and len(actual['errors']) == 1
actual = sanitize_objects(expected, actual['errors'][0])
assert actual == expected, f'"Actual: {actual}, Expected: {expected}'
|
#
# Copyright (c) 2019-2020 Mike's Pub, see https://github.com/mikespub-org
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import json
import os.path
# from btfs import sessions
# from btfs.auth import AuthorizedUser
from .model import Chunk, Dir, File, Path
PAGE_SIZE = 10
KNOWN_MODELS = {
"Path": Path,
"Dir": Dir,
"File": File,
"Chunk": Chunk,
# "AuthorizedUser": AuthorizedUser,
# "AuthSession": sessions.AuthSession,
}
LIST_CONFIG = {}
config_file = os.path.join(os.path.dirname(__file__), "config.json")
# with open(config_file, "w") as fp:
# json.dump(LIST_CONFIG, fp, indent=2)
with open(config_file) as fp:
LIST_CONFIG = json.load(fp)
for kind in LIST_CONFIG:
truncate_list = LIST_CONFIG[kind].get("truncate", [])
truncate_list.extend(LIST_CONFIG[kind].get("pickled", []))
truncate_list.extend(LIST_CONFIG[kind].get("image", []))
if len(truncate_list) > 0:
LIST_CONFIG[kind]["truncate_list"] = truncate_list
def get_list_config(kind, what, default=[]):
# if "/" in kind:
# kind = kind.split("/")[-1]
if kind in LIST_CONFIG:
return LIST_CONFIG[kind].get(what, default)
return default
|
# import modules
from math import radians, cos, sin, asin, sqrt
import urllib.request
import urllib
import json
import xml.etree.ElementTree as ET
from operator import itemgetter
import requests
# create class with station coordinates/status, and methods to
# find nearest bikes and docks
class LoadStations:
""" class loads stations with geo coordinates
"""
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
# converts latitude/longitude into country/city name using
# Google Maps API
url = "http://maps.googleapis.com/maps/api/geocode/json?"
url += "latlng=%s,%s&sensor=false" % (latitude, longitude)
v = urllib.request.urlopen(url).read()
j = json.loads(v)
try:
components = j['results'][0]['address_components']
except:
components = []
country = town = None
for c in components:
if "country" in c['types']:
country = c['long_name']
if "locality" in c['types']:
town = c['long_name']
# query for station locations and status if country/city is
# United States/Washington area
if country == 'United States' and town in ['Washington','Arlington','Alexandria']:
xml_path = 'https://www.capitalbikeshare.com/data/stations/' \
'bikeStations.xml'
tree = ET.parse(urllib.request.urlopen(xml_path))
root = tree.getroot()
station_location = dict()
station_status = dict()
for child in root:
station_location[child[1].text] = \
{'Latitude': float(child[4].text),
'Longitude': float(child[5].text)}
station_status[child[1].text] = [child[12].text, child[13].text]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if country/city is
# United States/New York
elif country == 'United States' and town in 'New York':
station_location = dict()
station_status = dict()
url = 'https://www.citibikenyc.com/stations/json'
v = urllib.request.urlopen(url).read()
j = json.loads(v)
for i in j['stationBeanList']:
station_location[i['stationName']] = \
{'Latitude': i['latitude'],
'Longitude': i['longitude']}
station_status[i['stationName']] = \
[i['availableBikes'], i['availableDocks']]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if country/city is
# United States/Chicago
elif country == 'United States' and town in 'Chicago':
station_location = dict()
station_status = dict()
url = 'https://www.divvybikes.com/stations/json'
v = urllib.request.urlopen(url).read()
j = json.loads(v)
for i in j['stationBeanList']:
station_location[i['stationName']] = \
{'Latitude': i['latitude'], 'Longitude': i['longitude']}
station_status[i['stationName']] = \
[i['availableBikes'], i['availableDocks']]
self.station_location = station_location
self.station_status = station_status
# search for station locations and status if
# country/city is Canada/Toronto
elif country == 'Canada' and town in 'Toronto':
station_location = dict()
station_status = dict()
url = 'http://www.bikesharetoronto.com/stations/json'
v = urllib.request.urlopen(url).read()
j = json.loads(v)
for i in j['stationBeanList']:
station_location[i['stationName']] = \
{'Latitude': i['latitude'], 'Longitude': i['longitude']}
station_status[i['stationName']] = \
[i['availableBikes'], i['availableDocks']]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if
# country/city is United States/San Francisco Bay area
elif country == 'United States' \
and town in ['San Francisco','Palo Alto', 'Redwood City', 'Mountain View', 'San Jose']:
station_location = dict()
station_status = dict()
url = 'http://www.bayareabikeshare.com/stations/json'
v = urllib.request.urlopen(url).read()
j = json.loads(v)
for i in j['stationBeanList']:
station_location[i['stationName']] = \
{'Latitude': i['latitude'], 'Longitude': i['longitude']}
station_status[i['stationName']] = \
[i['availableBikes'], i['availableDocks']]
self.station_location = station_location
self.station_status = station_status
elif country == 'United States' and town in 'Philadelphia':
station_location = dict()
station_status = dict()
url = 'https://www.rideindego.com/stations/json/'
class MyOpener(urllib.request.FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; ' \
'rv:1.8.1.11)Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
v = myopener.open(url)
j = json.load(v)
for i in j['features']:
station_location[i['properties']['name']] = \
{'Latitude': float(i['geometry']['coordinates'][1]),
'Longitude': float(i['geometry']['coordinates'][0])}
station_status[i['properties']['name']] = \
[i['properties']['bikesAvailable'],
i['properties']['docksAvailable']]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if
# country/city is Canada/Montreal
elif country == 'Canada' and town in 'Montréal':
xml_path = 'http://montreal.bixi.com/data/bikeStations.xml'
tree = ET.parse(urllib.request.urlopen(xml_path))
root = tree.getroot()
station_location = dict()
station_status = dict()
for child in root:
station_location[child[1].text] = \
{'Latitude': float(child[4].text),
'Longitude': float(child[4].text)}
station_status[child[1].text] = \
[child[12].text, child[13].text]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if
# country/city is United Kingdom/London
elif country == 'United Kingdom' and town in 'London':
xml_path = 'https://tfl.gov.uk/tfl/syndication/feeds/cycle-hire/livecyclehireupdates.xml'
root = ET.fromstring(requests.get(xml_path).content)
station_location = dict()
station_status = dict()
for child in root:
station_location[child[1].text] = \
{'Latitude': float(child[3].text),
'Longitude': float(child[4].text)}
station_status[child[1].text] = [child[10].text, child[11].text]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if
# country/city is United States/Boston
elif country == 'United States' and town in 'Boston':
xml_path = 'http://www.thehubway.com/data/stations/bikeStations.xml'
tree = ET.parse(urllib.request.urlopen(xml_path))
root = tree.getroot()
station_location = dict()
station_status = dict()
for child in root:
station_location[child[1].text] = \
{'Latitude': float(child[4].text),
'Longitude': float(child[5].text)}
station_status[child[1].text] = [child[12].text, child[13].text]
self.station_location = station_location
self.station_status = station_status
# query for station locations and status if
# country/city is United States/Minneapolis
elif country == 'United States' and town in 'Minneapolis':
xml_path = 'https://secure.niceridemn.org/data2/bikeStations.xml'
tree = ET.parse(urllib.request.urlopen(xml_path))
root = tree.getroot()
station_location = dict()
station_status = dict()
for child in root:
station_location[child[1].text] = \
{'Latitude': float(child[5].text),
'Longitude': float(child[6].text)}
station_status[child[1].text] = [child[13].text, child[14].text]
self.station_location = station_location
self.station_status = station_status
def distance(self, lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return round(km * 0.621371, 2)
def find_closest_bike(self):
"""
Finds closest station with available bike based on passed coordinates
and returns sorted list of three closest stations
"""
closest_station = []
station_availability = self.station_status
for station in self.station_location.keys():
if int(station_availability[station][0]) >= 2:
closest_station.append(
[station,
self.distance(float(self.longitude),
float(self.latitude),
self.station_location[station]['Longitude'],
self.station_location[station]['Latitude']),
station_availability[station][0],
self.station_location[station]['Latitude'],
self.station_location[station]['Longitude']])
else:
continue
return sorted(closest_station, key=itemgetter(1))[0:5]
def find_closest_dock(self):
"""
Funds closest station with available bike based on passed coordinates
and returns sorted list of three closest stations
"""
closest_station = []
station_availability = self.station_status
for station in self.station_location.keys():
if int(station_availability[station][1]) >= 2:
closest_station.append(
[station,
self.distance(float(self.longitude),
float(self.latitude),
self.station_location[station]['Longitude'],
self.station_location[station]['Latitude']),
station_availability[station][1],
self.station_location[station]['Latitude'],
self.station_location[station]['Longitude']])
else:
continue
return sorted(closest_station, key=itemgetter(1))[0:5]
|
import numpy as np
def extract_snippets(data,*,times,snippet_size):
M = data.shape[1]
T = snippet_size
L = len(times)
Tmid = int(np.floor(T / 2))
snippets = np.zeros((L, T, M),dtype='float32')
for j in range(L):
t1 = times[j] - Tmid
t2 = t1+snippet_size
snippets[j, :, :] = data[t1:t2, :]
return snippets
|
# Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from asyncio import ensure_future, gather, get_event_loop, sleep, wait_for
from concurrent.futures import ThreadPoolExecutor
from datetime import timedelta
from typing import Awaitable, Callable, Dict, Optional, TypeVar
from . import PackageService
from ... import LOG
from ...damlast.daml_lf_1 import Package, PackageRef
from ...damlast.errors import PackageNotFoundError
from ...damlast.lookup import STAR, MultiPackageLookup, PackageExceptionTracker
from ...damlast.parse import parse_archive
from ...damlast.pkgfile import Dar
from ...prim import DazlError
__all__ = ["PackageLoader", "DEFAULT_TIMEOUT"]
T = TypeVar("T")
# mypy insists on having a type annotation here, or it will complain about not being able to
# determine the type of this field in pkgloader_aio_compat.py
DEFAULT_TIMEOUT: timedelta = timedelta(seconds=30)
class PackageLoader:
"""
Loader for packages from a remote PackageService.
This class handles retries and backoffs, and avoids having more than one request in flight for
the same package ID. It is intended to be shared by all local clients that may need package
information.
"""
_allow_deprecated_identifiers = False
def __init__(
self,
package_lookup: "MultiPackageLookup",
conn: "Optional[PackageService]" = None,
timeout: "Optional[timedelta]" = DEFAULT_TIMEOUT,
executor: "Optional[ThreadPoolExecutor]" = None,
):
self._package_lookup = package_lookup
self._conn = conn
self._timeout = timeout or DEFAULT_TIMEOUT
self._futures = dict() # type: Dict[PackageRef, Awaitable[Package]]
self._executor = executor or ThreadPoolExecutor(3)
def set_connection(self, conn: "Optional[PackageService]"):
self._conn = conn
async def do_with_retry(self, fn: "Callable[[], T]") -> "T":
"""
Perform a synchronous action that assumes the existence of one or more packages. In the
event the function raises :class:`PackageNotFoundError` or a wildcarded
:class:`NameNotFoundError`, the required package/type is fetched and the operation retried.
If, after a retry, an expected package or type could not be found, the exception is
re-raised to the caller.
:param fn: A function to invoke.
:return: The result of that function.
"""
guard = PackageExceptionTracker(
allow_deprecated_identifiers=self._allow_deprecated_identifiers
)
while True:
pkg_ref = guard.pop_package()
while pkg_ref is not None:
await self.load(pkg_ref)
pkg_ref = guard.pop_package()
with guard:
return fn()
async def preload(self, *contents: "Dar") -> None:
"""
Populate a :class:`PackageCache` with types from DARs.
:param contents:
One or more DARs to load into a local package cache.
"""
async def load(self, ref: "PackageRef") -> "Optional[Package]":
"""
Load a package ID from the remote server. If the package has additional dependencies, they
are also loaded.
:param ref: One or more :class:`PackageRef`s.
:raises: PackageNotFoundError if the package could not be resolved
"""
if ref == STAR:
return await self.load_all()
# If the package has already been loaded, then skip all the expensive I/O stuff
try:
return self._package_lookup.package(ref)
except PackageNotFoundError:
pass
# If we already have a request in-flight, simply return that same Future to our caller;
# do not try to schedule a new request
fut = self._futures.get(ref)
if fut is None:
fut = ensure_future(self._load(ref))
self._futures[ref] = fut
package = await fut
_ = self._futures.pop(ref, None)
return package
async def _load(self, package_id: "PackageRef") -> "Package":
LOG.info("Loading package: %s", package_id)
loop = get_event_loop()
conn = self._conn
if conn is None:
raise DazlError("a connection is not configured")
archive_bytes = await wait_for(
self.__fetch_package_bytes(conn, package_id), timeout=self._timeout.total_seconds()
)
LOG.info("Loaded for package: %s, %d bytes", package_id, len(archive_bytes))
archive = await loop.run_in_executor(
self._executor, lambda: parse_archive(package_id, archive_bytes)
)
self._package_lookup.add_archive(archive)
return archive.package
@staticmethod
async def __fetch_package_bytes(conn: "PackageService", package_id: "PackageRef") -> bytes:
sleep_interval = 1
while True:
# noinspection PyBroadException
try:
return await conn.get_package(package_id)
except Exception:
# We tried fetching the package but got an error. Retry, backing off to waiting as
# much as 30 seconds between each attempt.
await sleep(sleep_interval)
sleep_interval = min(sleep_interval * 2, 30)
LOG.exception("Failed to fetch package; this will be retried.")
async def load_all(self) -> None:
"""
Load all packages from the remote server.
"""
conn = self._conn
if conn is None:
raise DazlError("a connection is not configured")
package_ids = set(await conn.list_package_ids())
package_ids -= self._package_lookup.package_ids()
if package_ids:
await gather(*(self.load(package_id) for package_id in package_ids))
return None
|
"""
Definição de Escala de Tripulação de Transporte Coletivo Utilizando Algoritmo Genético
Daniel Rodrigues Acosta
Universidade Federal do Rio Grande do Sul
Junho/2019
Variáveis Globais
"""
#import classPopulacao as pp
import datetime as dtm
import pandas as pd
import os
import pickle as pk
# Caminhos das Pastas
inputPopFolder = '25'
linha = '165'
folder = "C:\\Users\\"+ os.getlogin() +"\\Google Drive\\TCC Daniel Acosta\\GitHub\\genetic-algorithm-crew\\Programa\\"
inputViags = folder + "v_input_"+linha+".csv"
# Controle macro
alg = 10000 # n° iterações do algoritmo (usar enquanto eu não estabelecer outro critério)
modo_inicio =1 # 0 = do zero 1 = ler do binário
modo_fim = 1 # 0 = até igl=alg 1 = até ter nCompl soluções completas 2 = até ter completas e tentar algumas
modo_salva = 1 # 0 = não salva no pickle 1 = salva no pickle
nCompl = 4 #nº soluções completas exigidas para que o algoritmo pare
carregaPais = 0 #se pais dos cruzamentos devem ser adicionados a C
tryCompl = 10000
gotCompl = 0
# Pesos dos custos
alfa = 1.5
delta = 0.5
tau = 1.5
gama = 3.5
probMaiorCusto = 0.08
# Populações e Probabilidades
fs = 0.3 #fator de seleção ()
pm = 0.1 #probabilidade de mutação
probAlm = 0.5
probMutNeg = 0.9
algMutNeg = 1
na = 10 #número de soluções na população A
nb = 5 #número de soluções na população B
nCruz = 1 #nº soluções filhas adicionadas no cruzamento
fatorTop = 0.3 #nº soluções que sobrevivem na seleção Deterministica final
fatorRol = 0.3 #nº soluções que sobrevivem na seleção Roleta final
fatorMinServ = 0.3 # tamanho minimo do serviço em numero de viagens
fatorDelServ = 0.1 # quantos serviços bons devem ser deletados
minViagAlm = 3
jornGlob = dtm.timedelta(hours = 7.5) #duraçao fixa da jornada a considerar de início
almGlob = dtm.timedelta(hours = 0.5) #duracao fixa da colacao
intervPontaGlob = dtm.timedelta(hours=0.5)
minInicAlm = dtm.timedelta(hours=1.5)
maxFimAlm = dtm.timedelta(hours=1)
# Inputs e Outputs
if not os.path.exists(folder + "output\\"): os.mkdir(folder + "output\\")
if not os.path.exists(folder+"output\\img"): os.mkdir(folder+"output\\img")
def inpop(nomepop):
pkfile = open(folder+'output\\'+inputPopFolder+'\\pop_'+nomepop+'.txt', mode='br')
return pk.load(pkfile)
pkfile.close()
def outpop(pop, nome, outputPopFolder):
pasta = folder+'output\\'+outputPopFolder
if not os.path.exists(pasta): os.mkdir(pasta)
nomefile = pasta+'\\pop_'+nome+'.txt'
if os.path.exists(nomefile): os.remove(nomefile)
pkfile = open(nomefile, mode='bw')
pk.dump(pop,pkfile)
pkfile.close()
def ler_tempo(primeira,ultima): #reunir atributos de todas as execuções em um único arquivo
execs = open(folder+"tempo_execs.txt",'w')
execs.write("exec;horario inicio;iAlg;horario fim;delta horario")
for ex in range(primeira,ultima+1):
atrib = open(folder+"output\\"+str(ex)+"\\atributos.txt", 'r')
execs.write("\n"+str(ex)+atrib.readline())
atrib.close()
execs.close()
# Contadores
if modo_inicio==0:
idsolGlob = 0 #contador de soluções global, para o identificador IDSOL
#popCompl = pp.Populacao(nCompl, 'f') # População para guardar Soluções Completas
popCompl = inpop('f')
elif modo_inicio==1:
idsolGlob = inpop('id')
popCompl = inpop('f') # População para guardar Soluções Completas - herdada do pickle
#popCompl = pp.Populacao(nCompl, 'f') # População para guardar Soluções Completas - não herdada
#popQuase = pp.Populacao(10, 'q')
igl = 0
custosIguais = 0
solCompl = 0
algStart = 0 # Horário de Inicialização
### LEITURA DO ARQUIVO DE INPUT ############
dfv = pd.read_csv(inputViags, sep=';', index_col=0)
for i in range(0,len(dfv)): # Confusão absurda pra colocar a data na classe datetime de acordo com a tabela sábado ou sexta ou domingo
if i==0:
iniciodiai = 0
iniciodiaf = 0
else:
if dfv.iloc[i,4]!=dfv.iloc[i-1,4] or dfv.iloc[i,5]!=dfv.iloc[i-1,5] or dfv.iloc[i,1]!=dfv.iloc[i-1,1]:
iniciodiai = 0
iniciodiaf = 0
print(i, "| Começou nova tabela. |tab ", dfv['tab'][i],"|ti ", dfv['ti'][i],"|tf",dfv['tf'][i])
else:
if int(dfv.iloc[i,2][0:2])<dfv.iloc[i-1,2].hour:
iniciodiai = 1
print(i,"| Na mesma tabela, o dia virou em hi")
if int(dfv.iloc[i,3][0:2])<dfv.iloc[i-1,3].hour:
iniciodiaf = 1
print(i, "| Na mesma tabela, o dia virou em hf")
if dfv.iloc[i,1]==1: #tab 1 = segunda a sexta - 07 de junho de 2019
diai = 7 + iniciodiai
diaf = 7 + iniciodiaf
elif dfv.iloc[i,1]==2: #tab 2 = sábado - 08 de junho de 2019
diai = 8 + iniciodiai
diaf = 8 + iniciodiaf
elif dfv.iloc[i,1]==3: #tab 3 = domingo - 09 de junho de 2019
diai = 9 + iniciodiai
diaf = 9 + iniciodiaf
else:
print("Erro! A coluna TAB deve ser 1, 2 ou 3")
dfv.iloc[i,2]=dtm.datetime(2019,6,diai,int(dfv.iloc[i,2][0:2]),int(dfv.iloc[i,2][3:5]),0)
dfv.iloc[i,3]=dtm.datetime(2019,6,diaf,int(dfv.iloc[i,3][0:2]),int(dfv.iloc[i,3][3:5]),0)
dfv.iloc[i,6]=dfv.iloc[i,3]-dfv.iloc[i,2]
### PESO HORARIO DE PICO ###########
start = min(dfv['hi'])
end = max(dfv['hf'])
qtdVList = []
i = start
while i < end:
qtdV = len([j for j in range(0,len(dfv)) if dfv.iloc[j,2] <= i and i <= dfv.iloc[j,3]])
qtdVList.append(qtdV)
i = i + dtm.timedelta(minutes=5)
maxq = max(qtdVList)
qtdVList = [(maxq-x)/maxq for x in qtdVList]
for j in range(0,len(dfv)):
k = 0
i = start
while i < dfv.iloc[j,2]:
i = i + dtm.timedelta(minutes=5)
k = k + 1
dfv.iloc[j,7] = qtdVList[k]
dfv.drop(columns="tab", inplace=True) # excluir a coluna tab, porque aparentemente não vai mais ser necessário.
vdict = dfv.to_dict()
nc = round(fs*na*(len(vdict['hi'])+na+nb)) #número de soluções na população C
dursViags = list(vdict['dur'].values())
durMediaViags = sum(dursViags, dtm.timedelta(0))/len(dursViags)
hmus = almGlob + dtm.timedelta(hours=1) # 2h de intervalos (30min / 1h / 30min) - estimativa da folga mínima que se pode alcançar em um serviço
viagsPorServ = (jornGlob-hmus) / durMediaViags
meioTab = min(vdict['hi'].values()) + (max(vdict['hf'].values())-min(vdict['hi'].values()))/2
|
"""Unit tests for the Axe report generated by axe-selenium-python accessibility collector."""
import json
from collector_utilities.functions import md5_hash
from .base import AxeSeleniumPythonTestCase
class AxeSeleniumPythonAccessibilityTest(AxeSeleniumPythonTestCase):
"""Unit tests for the axe-selenium-python collector for accessibility violations."""
METRIC_TYPE = "accessibility"
def setUp(self):
"""Extend to set up test data."""
super().setUp()
self.tested_url = "https://tested_url"
self.json = dict(
url=self.tested_url,
violations=[
dict(
id="aria-input-field-name",
description="description1",
helpUrl="https://help1",
tags=["cat.color", "wcag2aa", "wcag143"],
nodes=[dict(impact="serious", html="html1")],
),
dict(
id="aria-hidden-focus",
description="description2",
helpUrl="https://help2",
nodes=[dict(impact="moderate", html="html2")],
),
],
)
self.expected_entities = [
{
"description": "description1",
"element": "html1",
"help": "https://help1",
"impact": "serious",
"page": self.tested_url,
"url": self.tested_url,
"violation_type": "aria-input-field-name",
"tags": "cat.color, wcag143, wcag2aa",
},
{
"description": "description2",
"element": "html2",
"help": "https://help2",
"impact": "moderate",
"page": self.tested_url,
"url": self.tested_url,
"violation_type": "aria-hidden-focus",
"tags": "",
},
]
for entity in self.expected_entities:
entity["key"] = md5_hash(",".join(str(value) for key, value in entity.items() if key != "tags"))
async def test_nr_of_issues(self):
"""Test that the number of issues is returned."""
response = await self.collect(get_request_json_return_value=self.json)
self.assert_measurement(response, value="2", entities=self.expected_entities)
async def test_no_issues(self):
"""Test zero issues."""
self.json["violations"] = []
response = await self.collect(get_request_json_return_value=self.json)
self.assert_measurement(response, value="0", entities=[])
async def test_filter_by_impact(self):
"""Test that violations can be filtered by impact level."""
self.set_source_parameter("impact", ["serious", "critical"])
response = await self.collect(get_request_json_return_value=self.json)
self.assert_measurement(response, value="1")
async def test_filter_by_tag_include(self):
"""Test that violations can be filtered by tag."""
self.set_source_parameter("tags_to_include", ["wcag2aa"])
response = await self.collect(get_request_json_return_value=self.json)
self.assert_measurement(response, value="1", entities=[self.expected_entities[0]])
async def test_filter_by_tag_ignore(self):
"""Test that violations can be filtered by tag."""
self.set_source_parameter("tags_to_ignore", ["wcag2aa"])
response = await self.collect(get_request_json_return_value=self.json)
self.assert_measurement(response, value="1", entities=[self.expected_entities[1]])
async def test_zipped_json(self):
"""Test that a zip archive with JSON files is processed correctly."""
self.set_source_parameter("url", "axe.zip")
zipfile = self.zipped_report(*[(f"axe{index}.json", json.dumps(self.json)) for index in range(2)])
response = await self.collect(get_request_content=zipfile)
self.assert_measurement(response, value="4", entities=self.expected_entities + self.expected_entities)
|
from .const import *
from .docs import Docs
from .settings import Settings
|
from collections import defaultdict
import pytest
from names import group_names_by_country, data
# another output to test with
data2 = """last_name,first_name,country_code
Poxton,Sydney,CZ
Kynman,Bryant,NL
Mockler,Leese,AF
Gillicuddy,Raffaello,IR
Renyard,Carlo,CO
Beadham,Evonne,CZ
Tunstall,Allissa,IR
Kamenar,Augy,IR
Insko,Ave,NL
Pigney,Gavrielle,ID"""
@pytest.fixture
def grouping1():
return group_names_by_country(data)
@pytest.fixture
def grouping2():
return group_names_by_country(data2)
def test_return_type(grouping1, grouping2):
assert type(grouping1) == defaultdict
assert type(grouping2) == defaultdict
def test_return_dict_len(grouping1, grouping2):
assert len(grouping1) == 7
assert len(grouping2) == 6
@pytest.mark.parametrize('key, expected', [
('BR', ['Alphonso Harrold']),
('CN', ['Davie Halbard', 'Ines Parrett', 'Margo Apdell']),
('ID', ['Husain Watsham', 'Sula Wasielewski']),
('PL', ['Kermit Braunle']),
('RU', ['Deerdre Tomblings']),
('SE', ['Luke Brenston']),
('TD', ['Rudolph Jeffry']),
])
def test_grouping1_return(grouping1, key, expected):
assert sorted(grouping1[key]) == expected
@pytest.mark.parametrize('key, expected', [
('AF', ['Leese Mockler']),
('CO', ['Carlo Renyard']),
('CZ', ['Evonne Beadham', 'Sydney Poxton']),
('ID', ['Gavrielle Pigney']),
('IR', ['Allissa Tunstall', 'Augy Kamenar', 'Raffaello Gillicuddy']),
('NL', ['Ave Insko', 'Bryant Kynman']),
])
def test_grouping2_return(grouping2, key, expected):
assert sorted(grouping2[key]) == expected
|
'''Meta command which draws a graph of chapters with Graphviz'''
import yaml
from pathlib import Path
from foliant.meta_commands.base import BaseMetaCommand
from foliant.meta.generate import load_meta
from foliant.preprocessors.testrail import Preprocessor as TestRail
class MetaCommand(BaseMetaCommand):
config_section = 'testcoverage'
defaults = {'filename': 'test_data.yml'}
meta_fields = ['functionality', 'test_case_ids']
def _get_testrail(self):
options = {}
for p in self.config.get('preprocessors', []):
if isinstance(p, dict):
if 'testrail' in p:
options = p['testrail']
if not options:
raise RuntimeError('Error: to use this command add testrail to'
'preprocessors list in foliant.yml and set up'
'host, login and password.')
return TestRail(self.context,
self.logger,
False,
False,
options)
def collect_data(self) -> list:
result = []
testrail = self._get_testrail()
for section in self.meta.iter_sections():
if section.data.get('functionality', False):
data = {'title': section.title}
test_cases = []
for case_id in section.data.get('test_case_ids', []):
test_cases.append(testrail._get_case_data(case_id))
data['test_cases'] = test_cases
result.append(data)
return result
def run(self, config_file_name='foliant.yml', project_path=Path('.')):
self.logger.debug('Meta command collect_test_data started')
self.meta = load_meta(self.config['chapters'],
self.project_path / self.config['src_dir'])
test_data = self.collect_data()
with open(self.options['filename'], 'w') as f:
yaml.dump({'data': test_data},
f,
default_flow_style=False,
allow_unicode=True,
sort_keys=False)
self.logger.debug('Meta command collect_test_data finished')
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core package."""
from tink.proto import tink_pb2
from tink.core import _crypto_format
from tink.core import _key_manager
from tink.core import _primitive_set
from tink.core import _primitive_wrapper
from tink.core import _registry
from tink.core import _tink_error
KeyManager = _key_manager.KeyManager
PrivateKeyManager = _key_manager.PrivateKeyManager
KeyManagerCcToPyWrapper = _key_manager.KeyManagerCcToPyWrapper
PrivateKeyManagerCcToPyWrapper = _key_manager.PrivateKeyManagerCcToPyWrapper
Registry = _registry.Registry
TinkError = _tink_error.TinkError
use_tink_errors = _tink_error.use_tink_errors
new_primitive_set = _primitive_set.new_primitive_set
PrimitiveSet = _primitive_set.PrimitiveSet
PrimitiveWrapper = _primitive_wrapper.PrimitiveWrapper
crypto_format = _crypto_format
class KeyAccess:
"""Base class for access tokens for Tink Keys."""
pass
|
""" Exceptions for ArgTyper """
from typing import Optional, Text, Callable, TYPE_CHECKING
if TYPE_CHECKING:
from .base import ArgParser
class ArgTyperException(Exception):
""" Thrown on ArgTyper problems """
...
class ArgTyperArgumentException(ArgTyperException):
""" Thrown when something is wrong with a provided Arguement() """
def __init__(self, func: Callable, argument: str, message: str):
self.func = func
self.argument = argument
super().__init__(
f"Error while parsing Argument {argument} for function {func.__qualname__}: {message}"
)
class ArgParserException(Exception):
"""Thrown on Parser errors
Args:
parser: The ArgParser instance that raised this errors
message: Optional error message
"""
def __init__(self, parser: "ArgParser", message: Optional[Text]):
self.parser = parser
self.message = message
super().__init__(f"{self.parser.prog}: {message}")
class ArgParserExitException(Exception):
"""Thrown when the ArgumentParser would have called sys.exit(<code>)
Args:
parser: The ArgParser instance that raised this errors
status: Status/Exit code of the ArgParser
message: Optional error message
"""
def __init__(self, parser: "ArgParser", status: int, message: Optional[Text]):
self.parser = parser
self.status = status
self.message = message
super().__init__(f"{self.parser.prog} exited with status {status} ({message})")
|
import numpy as np
from joblib import Parallel, delayed
import sklearn.neighbors as neighbors
class KernelDensity(object):
''' Kernel Density for anomaly detection
'''
def __init__(self, kernel: str = 'gaussian', bandwidth: float = 0.2) -> None:
super().__init__()
self.bandwidth = bandwidth
self.kernel = kernel
@staticmethod
def __calc_batch__(input_numpy: np.ndarray, bandwidth: float, kernel: str) -> np.ndarray:
kde = neighbors.KernelDensity(bandwidth=bandwidth, kernel=kernel)
kde = kde.fit(input_numpy)
rtn = kde.score_samples(input_numpy)
rtn = np.exp(rtn)
return np.reshape(rtn, newshape=(1, -1))
def __call__(self, input: np.ndarray) -> np.ndarray:
''' from memory key to memory value
Args:
input (np.ndarray): the input data points, [batch_size, memory_size]
'''
batch_size = input.shape[0]
input_numpy = np.reshape(input, newshape=(batch_size, -1, 1))
#rtns = Parallel(n_jobs=1)(
# delayed(KernelDensity.__calc_batch__)(input_numpy[idx, :, :], self.bandwidth, self.kernel)
# for idx in range(batch_size)
#)
rtns = [(input_numpy[idx, :, :], self.bandwidth, self.kernel) for idx in range(batch_size)]
outputs = np.concatenate(rtns, axis=0)
return outputs
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
# Copyright (c) 2020.
#
# @author Mike Hartl <mike_hartl@gmx.de>
# @copyright 2020 Mike Hartl
# @license http://opensource.org/licenses/gpl-license.php GNU Public License
# @version 0.0.1
class Exemplar:
def inspect(self, content):
del content[0]
for row in content:
columns = list(row)
value = columns[24]
if (int(value) > 70):
raise Exception("Fehler falsche Exemplar Groesse {}".format(value))
|
from __future__ import print_function
import signal
import copy
import sys
import time
from random import randint
class AlarmException(Exception):
pass
|
"""
Serializers convert data types to serialized data (e.g. JSON) and back again.
Copyright (C) 2020 Nicholas H.Tollervey.
"Commons Clause" License Condition v1.0:
The Software is provided to you by the Licensor under the License, as defined
below, subject to the following condition.
Without limiting other conditions in the License, the grant of rights under the
License will not include, and the License does not grant to you, the right to
Sell the Software.
For purposes of the foregoing, "Sell" means practicing any or all of the rights
granted to you under the License to provide to third parties, for a fee or
other consideration (including without limitation fees for hosting or
consulting/support services related to the Software), a product or service
whose value derives, entirely or substantially, from the functionality of the
Software. Any license notice or attribution required by the License must also
include this Commons Clause License Condition notice.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
from rest_framework.exceptions import ValidationError # type: ignore
from rest_framework import serializers # type: ignore
from rest_framework.utils import html # type: ignore
from datastore import models
# from datastore import logic
class TagPathField(serializers.Field):
"""
Tag paths are serialized as strings (consisting of a namespace and tag name
separated by a slash "/"). An incoming tagpath is checked for correctness.
"""
default_error_messages = {
"incorrect_type": (
"Incorrect type. Expected a string, but got {input_type}"
),
"incorrect_format": (
"Incorrect format. Expected `namespace_name/tag_name`."
),
}
def to_representation(self, value: str) -> str:
"""
Pass through the outgoing string value.
"""
return value
def to_internal_value(self, data: str) -> str:
"""
Ensure the incoming data is a string and of the expected
"namespace/tag" format. Raise a ValidationError exception if not the
case.
"""
if not isinstance(data, str):
self.fail("incorrect_type", input_type=type(data).__name__)
if not re.match(r"[-\w]+/[-\w]+", data):
self.fail("incorrect_format")
return data
class TagPathListField(serializers.ListField):
"""
Represents a list of TagPathFields.
"""
child = TagPathField()
allow_empty = False
class TagValueDictField(serializers.DictField):
"""
Represents a dictionary where the keys must be valid TagPathFields and the
values arbitrary values (whose type and range are checked by the serializer
rather than this field).
"""
initial = {}
default_error_messages = {
"not_a_dict": (
'Expected a dictionary of items but got type "{input_type}".'
),
"empty": "This dictionary may not be empty.",
}
def get_value(self, dictionary):
"""
Override the default field access in order to support dictionaries in
HTML forms.
"""
if html.is_html_input(dictionary):
return html.parse_html_dict(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, serializers.empty)
def to_internal_value(self, data):
"""
Ensure incoming data is a dictionary and run validation on entries.
"""
if html.is_html_input(data):
data = html.parse_html_dict(data)
if not isinstance(data, dict):
self.fail("not_a_dict", input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail("empty")
return self.run_child_validation(data)
def to_representation(self, value):
"""
Pass through the outgoing dictionary value.
"""
return value
def run_child_validation(self, data):
"""
Ensure the dictionary keys are valid TagPathFields. Otherwise raise a
ValidationError exception.
"""
result = {}
errors = {}
for key, value in data.items():
key_field = TagPathField()
try:
tag_path = key_field.run_validation(key)
result[tag_path] = value
except ValidationError as e:
errors[key] = e.detail
if errors:
raise ValidationError(errors)
return result
class TagPathList(serializers.Serializer):
"""
Manages how lists of tag paths are serialized.
"""
tag_paths = TagPathListField(
required=True,
label="Tag paths",
help_text=("A list of tag-paths to use with the referenced object."),
)
class RetrieveQuery(serializers.Serializer):
"""
Manages how queries for retrieving values on matching objects are
serialized.
"""
select = TagPathListField(
required=True,
label="Select",
help_text=(
"A list of tag-paths for values to retrieve from matching objects."
),
)
where = serializers.CharField(
required=True,
label="Where",
help_text="Criteria for matching objects expressed as BFQL.",
style={"base_template": "textarea.html"},
)
class UpdateQuery(serializers.Serializer):
"""
Manages how queries for updating values on matching objects are serialized.
"""
update = TagValueDictField(
required=True,
label="Update",
help_text=(
"A dictionary of tag-paths and values "
"to annotate onto matching objects."
),
)
where = serializers.CharField(
required=True,
label="Where",
help_text="Criteria for matching objects expressed as BFQL,",
style={"base_template": "textarea.html"},
)
class DeleteQuery(serializers.Serializer):
"""
Manages how queries for deleting values from matching objects are
serialized.
"""
delete = TagPathListField(
required=True,
label="Delete",
help_text=(
"A list of tag-paths for values to delete from matching objects."
),
)
where = serializers.CharField(
required=True,
label="Where",
help_text="Criteria for matching objects expressed as BFQL.",
style={"base_template": "textarea.html"},
)
class UserSerializer(serializers.ModelSerializer):
"""
Manages how users are serialized to the outside world.
"""
username = serializers.SlugField(read_only=True)
email = serializers.EmailField(read_only=True)
is_admin = serializers.BooleanField(source="is_superuser", read_only=True)
last_login = serializers.DateTimeField(read_only=True)
class Meta:
model = models.User
fields = [
"username",
"email",
"is_admin",
"last_login",
]
class UserRoleSerializer(serializers.ModelSerializer):
"""
Manages how users are serialized when being specified for roles.
"""
username = serializers.SlugField(required=True)
avatar = serializers.CharField(required=True)
class Meta:
model = models.User
fields = [
"username",
"avatar",
]
class NamespaceCreateSerializer(serializers.ModelSerializer):
"""
Manages Namespace creation data.
"""
name = serializers.SlugField(required=True)
description = serializers.CharField(
required=True, style={"base_template": "textarea.html"}
)
admins = UserRoleSerializer(many=True)
class Meta:
model = models.Namespace
fields = ["name", "description", "admins"]
class NamespaceUpdateSerializer(serializers.ModelSerializer):
"""
Manages data to update a namespace.
"""
description = serializers.CharField(
required=True, style={"base_template": "textarea.html"}
)
admins = UserRoleSerializer(many=True)
class Meta:
model = models.Namespace
fields = ["description", "admins"]
class TagSerializer(serializers.ModelSerializer):
"""
Manages how Tag data comes in/out of the API.
"""
name = serializers.SlugField(read_only=True)
description = serializers.CharField(
required=True, style={"base_template": "textarea.html"}
)
type_of = serializers.ChoiceField(models.VALID_DATA_TYPES, read_only=True)
private = serializers.BooleanField()
users = UserRoleSerializer(many=True)
readers = UserRoleSerializer(many=True)
class Meta:
model = models.Tag
fields = [
"name",
"description",
"type_of",
"private",
"users",
"readers",
]
class NamespaceDetailSerializer(serializers.ModelSerializer):
"""
Manages outgoing Namespace detail data.
"""
name = serializers.SlugField()
description = serializers.CharField()
admins = UserRoleSerializer(many=True)
tag_set = TagSerializer(many=True)
class Meta:
model = models.Namespace
fields = ["name", "description", "admins", "tag_set"]
class StringValueSerializer(serializers.ModelSerializer):
"""
Manages the serialization of string values annotated onto objects via a
tag.
"""
value = serializers.CharField(
required=True, style={"base_template": "textarea.html"}
)
class Meta:
model = models.StringValue
fields = [
"value",
]
class BooleanValueSerializer(serializers.ModelSerializer):
"""
Manages the serialization of boolean values annotated onto objects via a
tag.
"""
value = serializers.BooleanField()
class Meta:
model = models.BooleanValue
fields = [
"value",
]
class IntegerValueSerializer(serializers.ModelSerializer):
"""
Manages the serialization of integer values annotated onto objects via a
tag.
"""
value = serializers.IntegerField()
class Meta:
model = models.IntegerValue
fields = [
"value",
]
class FloatValueSerializer(serializers.ModelSerializer):
"""
Manages the serialization of float values annotated onto objects via a
tag.
"""
value = serializers.FloatField()
class Meta:
model = models.FloatValue
fields = [
"value",
]
class DateTimeSerializer(serializers.ModelSerializer):
"""
Manages the serialization of datetime values annotated onto objects via a
tag.
"""
value = serializers.DateTimeField()
class Meta:
model = models.DateTimeValue
fields = [
"value",
]
class DurationSerializer(serializers.ModelSerializer):
"""
Manages the serialization of duration values annotated onto objects via a
tag.
"""
value = serializers.DurationField()
class Meta:
model = models.DurationValue
fields = [
"value",
]
class BinarySerializer(serializers.ModelSerializer):
"""
Manages the serialization of binary values annotated onto objects via a
tag.
"""
value = serializers.FileField()
mime = serializers.CharField(required=True, max_length=256)
class Meta:
model = models.BinaryValue
fields = [
"value",
"mime",
]
class PointerSerializer(serializers.ModelSerializer):
"""
Manages the serialization of URL values annotated onto objects via a
tag.
"""
value = serializers.URLField(max_length=512)
class Meta:
model = models.PointerValue
fields = [
"value",
]
|
# -*- coding: utf-8 -*-
def morse_to_text(morse):
"""Convert morse code to text.
Args:
morse (str): Morse code.
Returns:
str: Return a text.
"""
CODE = {
'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
',': '--..--', '.': '.-.-.-', ':': '---...', ';': '-.-.-.',
'?': '..--..', '=': '-...-', "'": '.----.', '/': '-..-.',
'!': '-.-.--', '-': '-....-', '_': '..--.-', '(': '-.--.',
')': '-.--.-', '$': '...-..-', '&': '. . . .', '@': '.--.-.',
' ': '/'
}
morse = morse.strip()
msg = ''
codes = morse.split(' ')
for code in codes:
msg += dict(map(lambda t: (t[1], t[0]), CODE.items()))[code]
return msg
|
import zoo
def makeTheZoo():
return zoo.Zoo("Bill's Wonderland Zoo", getLocations())
def getLocations():
locations = {}
locations.update(one())
locations.update(two())
locations.update(three())
locations.update(four())
locations.update(five())
locations.update(six())
locations.update(seven())
locations.update(eight())
locations.update(nine())
return locations
def one():
id = 1
name = 'Entrance'
animal = None
neighbors = {}
neighbors['s'] = 9
neighbors['sw'] = 2
neighbors['se'] = 8
allowExit = True
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def two():
id = 2
name = 'Africa Live'
animal = zoo.Animal('Lion', 'roar', 'whatever it wants', 'dry, arid land')
neighbors = {}
neighbors['ne'] = 1
neighbors['se'] = 9
neighbors['s'] = 3
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def three():
id = 3
name = 'Antarctica'
animal = zoo.Animal('Penguin', 'silence', 'fish', 'cold air with lots of water')
neighbors = {}
neighbors['n'] = 2
neighbors['e'] = 9
neighbors['s'] = 4
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def four():
id = 4
name = 'Bird House'
animal = zoo.Animal('Maquaw', 'squawk, squawk', 'nuts, berries, insects', 'clean crisp air')
neighbors = {}
neighbors['n'] = 3
neighbors['e'] = 5
neighbors['ne'] = 9
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def five():
id = 5
name = 'Elbonia'
animal = zoo.Animal('Hippophant', 'Moo', 'humans, unicorns', 'filthy water, with bugs in it')
neighbors = {}
neighbors['n'] = 9
neighbors['e'] = 4
neighbors['w'] = 6
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def six():
id = 6
name = 'Enchanted Wood'
animal = zoo.Animal('Unicorn', 'Giggle Snort', 'magic seeds, magic grass, imaginary leaves', 'forest full of people who will believe anything')
neighbors = {}
neighbors['n'] = 7
neighbors['w'] = 5
neighbors['nw'] = 9
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def seven():
id = 7
name = 'Deep Sea'
animal = zoo.Animal('Sea Serpent', 'ssssssss', 'you', 'salt water, preferably 1000 meters deep')
neighbors = {}
neighbors['s'] = 6
neighbors['n'] = 8
neighbors['w'] = 9
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def eight():
id = 8
name = 'Cave Land'
animal = zoo.Animal('Dragon', 'roaring growl', 'farm animals, humans, other dragons', 'dark caves, with water, and one entrance')
neighbors = {}
neighbors['nw'] = 1
neighbors['sw'] = 9
neighbors['s'] = 7
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
def nine():
id = 9
name = 'Aquarium'
animal = zoo.Animal('Seal', 'barking', 'fish and penguins', 'water with some dry land')
neighbors = {}
neighbors['n'] = 1
neighbors['nw'] = 2
neighbors['ne'] = 8
neighbors['w'] = 3
neighbors['e'] = 7
neighbors['sw'] = 4
neighbors['se'] = 6
neighbors['s'] = 5
allowExit = False
return {id : zoo.Location(id, name, animal, neighbors, allowExit)}
|
"""initial migration
Revision ID: 2ddfd99e0bb4
Revises:
Create Date: 2021-01-25 22:42:56.598121
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "2ddfd99e0bb4"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"users",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("email", sa.TEXT(), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_users")),
sa.UniqueConstraint("email", name=op.f("uq_users_email")),
)
op.create_table(
"credentials",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("username", sa.TEXT(), nullable=False),
sa.Column("password", postgresql.BYTEA(), nullable=False),
sa.Column("last_seen", postgresql.TIMESTAMP(), nullable=True),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_credentials_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_credentials")),
sa.UniqueConstraint("username", name=op.f("uq_credentials_username")),
)
op.create_table(
"event_statuses",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_event_statuses_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_event_statuses")),
)
op.create_table(
"event_types",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_event_types_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_event_types")),
)
op.create_table(
"hive_conditions",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_hive_conditions_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_hive_conditions")),
)
op.create_table(
"honey_types",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=True),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_honey_types_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_honey_types")),
)
op.create_table(
"owners",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_owners_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_owners")),
)
op.create_table(
"swarm_health_statuses",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=True),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
name=op.f("fk_swarm_health_statuses_user_id_users"),
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_swarm_health_statuses")),
)
op.create_table(
"apiaries",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("location", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("honey_type_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["honey_type_id"],
["honey_types.id"],
name=op.f("fk_apiaries_honey_type_id_honey_types"),
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_apiaries_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_apiaries")),
)
op.create_table(
"swarms",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("health_status_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["health_status_id"],
["swarm_health_statuses.id"],
name=op.f("fk_swarms_health_status_id_swarm_health_statuses"),
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_swarms_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_swarms")),
)
op.create_table(
"hives",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("name", sa.TEXT(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("condition_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("swarm_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("apiary_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["apiary_id"], ["apiaries.id"], name=op.f("fk_hives_apiary_id_apiaries")
),
sa.ForeignKeyConstraint(
["condition_id"],
["hive_conditions.id"],
name=op.f("fk_hives_condition_id_hive_conditions"),
),
sa.ForeignKeyConstraint(
["owner_id"], ["owners.id"], name=op.f("fk_hives_owner_id_owners")
),
sa.ForeignKeyConstraint(
["swarm_id"], ["swarms.id"], name=op.f("fk_hives_swarm_id_swarms")
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_hives_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_hives")),
)
op.create_table(
"events",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("title", sa.TEXT(), nullable=False),
sa.Column("description", sa.TEXT(), nullable=True),
sa.Column("due_date", postgresql.TIMESTAMP(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("type_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("status_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("hive_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["hive_id"], ["hives.id"], name=op.f("fk_events_hive_id_hives")
),
sa.ForeignKeyConstraint(
["status_id"],
["event_statuses.id"],
name=op.f("fk_events_status_id_event_statuses"),
),
sa.ForeignKeyConstraint(
["type_id"], ["event_types.id"], name=op.f("fk_events_type_id_event_types")
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_events_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_events")),
)
op.create_table(
"comments",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("comment", sa.TEXT(), nullable=False),
sa.Column("type", sa.TEXT(), nullable=False),
sa.Column("date", postgresql.TIMESTAMP(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("swarm_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("hive_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("event_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("created_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(), nullable=True),
sa.Column("deleted_at", postgresql.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(
["event_id"], ["events.id"], name=op.f("fk_comments_event_id_events")
),
sa.ForeignKeyConstraint(
["hive_id"], ["hives.id"], name=op.f("fk_comments_hive_id_hives")
),
sa.ForeignKeyConstraint(
["swarm_id"], ["swarms.id"], name=op.f("fk_comments_swarm_id_swarms")
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], name=op.f("fk_comments_user_id_users")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_comments")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("comments")
op.drop_table("events")
op.drop_table("hives")
op.drop_table("swarms")
op.drop_table("apiaries")
op.drop_table("swarm_health_statuses")
op.drop_table("owners")
op.drop_table("honey_types")
op.drop_table("hive_conditions")
op.drop_table("event_types")
op.drop_table("event_statuses")
op.drop_table("credentials")
op.drop_table("users")
op.drop_table("comment_types")
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
"""
Leetcode - Binary search introduction
https://leetcode.com/problems/binary-search
Binary search solution
Created on Sun Oct 21 16:21:01 2018
@author: Arthur Dysart
"""
# REQUIRED MODULES
import sys
# FUNCTION DEFINITIONS
class Solution(object):
def search(self, a, x):
"""
Determines index of target "x" in sorted array "a".
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(a) == 0:
return -1
else:
l = 0
r = len(a) - 1
# Executes binary search
while l <= r:
m = l + (r - l) // 2
# Determines if target found
if a[m] == x:
return m
# Determines if left-half has target
elif a[m] < x:
l = m + 1
# Determines if right-half has target
else:
r = m - 1
# Target not found
return -1
def stdin(sys_stdin):
"""
Imports standard input.
"""
inputs = [x.strip("[]\n").split(",") for x in sys_stdin]
a = [int(x) for x in inputs[0]]
x = int(inputs[1][0])
return a, x
# MAIN MODULE
if __name__ == "__main__":
a, x = stdin(sys.stdin)
s = Solution()
i = s.search(a, x)
print(i)
|
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from tws_game.lobby import Lobby
from tws_game.tetris import Tetris
from pprint import pprint
import json
class ClientConnection(WebSocketServerProtocol):
def onOpen(self):
self.factory.register(self)
def onMessage(self, payload, is_binary):
if not is_binary:
self.factory.command(self, payload)
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.unregister(self)
class TwsServerFactory(WebSocketServerFactory):
AVAILABLE_GAMES = {
'TETRIS': Tetris
}
def __init__(self, url):
WebSocketServerFactory.__init__(self, url)
self.clients = []
self.lobbies = {}
self._lobby_id_counter = 0
""" Client registration
"""
def register(self, client):
if client not in self.clients:
print "registered client {}".format(client.peer)
client.lobby = None
self.clients.append(client)
def unregister(self, client):
if client in self.clients:
print "unregistered client {}".format(client.peer)
if client.lobby:
client.lobby.remove_client(client)
self.clients.remove(client)
""" Message encode/decode functionality
"""
def encode_message(self, command, data):
return '|'.join([command, json.dumps(data)])
def decode_message(self, message):
command, data = message.split('|', 1)
data = json.loads(data)
return command, data
""" Basic game commands
"""
def create_lobby(self, client, data):
if not 'game' in data and data['game'] in self.AVAILABLE_GAMES:
raise Exception('Game unavailable')
lobby = Lobby(self.AVAILABLE_GAMES[data['game']])
lobby.set_factory(self)
self._lobby_id_counter += 1
lobby.name = 'Lobby %s' % self._lobby_id_counter
self.lobbies[lobby.id] = lobby
self.send_command(client, 'create_lobby', {
'id': lobby.id
})
def join_lobby(self, client, data):
if client.lobby:
client.lobby.remove_client(client)
if data['id'] in self.lobbies and client not in self.lobbies[data['id']].clients:
self.lobbies[data['id']].add_client(client)
self.send_command(client, 'join_lobby', True)
def list_lobbies(self, client, data):
lobbies = []
for id in self.lobbies:
lobby = self.lobbies[id]
lobbies.append({
'id': lobby.id,
'name': lobby.name,
'clients': len(lobby.clients)
})
self.send_command(client, 'list_lobbies', lobbies)
return lobbies
def leave_lobby(self, client, data):
pass
def start_game(self, client, data):
client.lobby.start_game(client)
def set_nickname(self, client, data):
print "Setting nickname"
pprint(data)
""" Communication methods
"""
def send_command(self, client, command, data):
msg = self.encode_message(command, data)
client.sendMessage(
msg
)
def command(self, client, msg):
command, data = self.decode_message(msg)
commands = {
'create_lobby': self.create_lobby,
'join_lobby': self.join_lobby,
'list_lobbies': self.list_lobbies,
'leave_lobby': self.leave_lobby,
'start_game': self.start_game,
'set_nickname': self.set_nickname
}
if command in commands:
print "Executing command %s" % (command,)
commands[command](client, data)
else:
if client.lobby.game:
client.lobby.game.handle_command(client, command, data)
|
"""provides functions to kill the script by raising SystemExit"""
import sys
from typing import List, NoReturn
def assert_empty(blocked_actions: List[str]) -> None:
"""used with validate_perms, which returns list of denied AWS actions"""
if blocked_actions:
err("IAM user missing following permission(s):",
*sorted(list(set(blocked_actions))))
def err(*halt_messages: str) -> NoReturn:
"""prepend "Error: " to first halt message, then halt"""
halt_msg_list = list(halt_messages)
halt_msg_list[0] = f"Error: {halt_messages[0]}"
stop(*halt_msg_list)
def stop(*halt_messages: str) -> NoReturn:
"""halts the script by raising SystemExit"""
if halt_messages:
print("")
print("\n".join(halt_messages), file=sys.stderr, flush=True)
sys.exit(1)
|
numbers = [10, 25, 54, 86, 89, 11, 33, 22]
new_numbers = list(filter(lambda x: (x%2 == 0) , numbers))
print(new_numbers)
|
import re
from .deep_getattr import deep_getattr
from connexion.resolver import Resolver
class RestyResolverEx(Resolver):
"""
Resolves endpoint functions using REST semantics (unless overridden by specifying operationId)
"""
def __init__(self, default_module_name, collection_endpoint_name='search', **kwargs):
"""
:param default_module_name: Default module name for operations
:type default_module_name: str
"""
Resolver.__init__(self, **kwargs)
self.default_module_name = default_module_name
self.collection_endpoint_name = collection_endpoint_name
def resolve_operation_id(self, operation):
"""
Resolves the operationId using REST semantics unless explicitly configured in the spec
:type operation: connexion.operation.Operation
"""
if operation.operation.get('operationId'):
return Resolver.resolve_operation_id(self, operation)
return self.resolve_operation_id_using_rest_semantics(operation)
def resolve_operation_id_using_rest_semantics(self, operation):
"""
Resolves the operationId using REST semantics
:type operation: connexion.operation.Operation
"""
path_match = re.search(
'^/?(?P<resource_name>([\w\-](?<!/))*)(?P<trailing_slash>/*)(?P<extended_path>.*)$', operation.path
)
def get_controller_name():
x_router_controller = operation.operation.get('x-swagger-router-controller')
name = self.default_module_name
resource_name = path_match.group('resource_name')
if x_router_controller:
name = x_router_controller
elif resource_name:
resource_controller_name = resource_name.replace('-', '_')
name += '.' + resource_controller_name
return name
def get_function_name():
method = operation.method
is_collection_endpoint = \
method.lower() == 'get' \
and path_match.group('resource_name') \
and not path_match.group('extended_path')
return self.collection_endpoint_name if is_collection_endpoint else method.lower()
return '{}.{}'.format(get_controller_name(), get_function_name())
class ModuleResolver(RestyResolverEx):
def __init__(self, module, collection_endpoint_name='search'):
"""
:param module: Default module name for operations
:type module: any
"""
RestyResolverEx.__init__(
self,
default_module_name=module.__name__,
collection_endpoint_name=collection_endpoint_name,
function_resolver=self.get_function_from_name,
)
self.module = module
def get_function_from_name(self, function_name):
"""
Tries to get function by fully qualified name (e.g. "mymodule.myobj.myfunc")
:type function_name: str
"""
if function_name is None:
raise ValueError("Empty function name")
function_name_parts = function_name.split('.')
assert function_name_parts[0] == self.module.__name__
return deep_getattr(self.module, '.'.join(function_name_parts[1:]))
|
#Changeable parameters
def calc(*number): #can be seen as a list/tuple
#automatically seen
s=0
for n in number:
s=s+n*n
return s
#This input must be a list or tuple without the star mark
def calc(number): #can be seen as a list/tuple
#automatically seen
s=0
for n in number:
s=s+n*n
return s
#Default parameters
def defunc(x,n=2): #if no input of n, the n is defaulted as 2
s=0
while n>0:
n-=1
s=s+x*x
return s
#f(*arg,**arc)funtion can be used
#All different kinds of
print(calc(1,2,3,4)) #just input directly without be in list or tuple style
print(defunc(3,4))
|
import pyx ### vector graphics
import cmath
from file_io import parse_data_file, read_from_pickle, output_to_pickle
from taut import isosig_to_tri_angle
from veering import veering_triangulation
from continent import continent
from boundary_triangulation import boundary_triangulation, tet_face
# def pre_draw_transformation( z, ladder_holonomy ):
# return z/ladder_holonomy
def draw_path(canv, path_C, draw_options, fill = False, closed = False):
p = pyx.path.path( pyx.path.moveto(path_C[0].real, path_C[0].imag) )
for coord in path_C[1:]: ### this is how path drawing works...
p.append( pyx.path.lineto(coord.real, coord.imag) )
if closed:
p.closepath()
if fill:
canv.fill(p, draw_options)
else:
canv.stroke(p, draw_options)
def dividers_to_lightning_curves_spiky(dividers):
# print(('dividers', len(dividers[0]), len(dividers[1])))
### now find lightning curve from the edges of the divider
out = []
for i in range(2): ## upper and lower
curves = []
for divider in dividers[i]:
if len(divider) <= 2:
### add nothing
continue
a, b = divider[:2]
x = a.shared_vertex(b)
lightning_curve = [x]
for c in divider[2:]:
y = b.shared_vertex(c)
if x == y:
a, b = a, c
else:
lightning_curve.append(y)
a, b = b, c
x = y
curves.append(lightning_curve)
out.append(curves)
return out
def special_vertex_in_and_out(divider_list, v, v_inds):
### v_inds are indices of edges in divider_list that are incident to v
### answers how we get to v and back out when we are doing the non-spiky lightning curves
if len(v_inds) % 2 == 1:
# print 'odd'
ind = (v_inds[0] + v_inds[-1])//2
return ( [v.pos.complex()], ind, ind ) ### last midpoint is at ind, then we do the extra point in the list, then start midpoints up again with ind
else:
# print 'even'
ind = (v_inds[0] + v_inds[-1] + 1)//2
mid_step = (divider_list[ind-1].midpoint() + divider_list[ind].midpoint()) * 0.5
return ( [mid_step, v.pos.complex(), mid_step], ind - 1, ind ) ### more complicated since we have an extra point on the midpoints curve
def lightning_curve_from_dividers(dividers, a, b, special_vertices = [], spiky = True):
#### give it only upper dividers, or only lower dividers
#### result is oriented not in terms of order of a and b, but in order of the dividers
curve = []
for divider_list in dividers:
### check if both s and e are endpoints of this divider list
a_inds = []
b_inds = []
for i, div in enumerate(divider_list):
if a in div.vertices:
a_inds.append(i)
if b in div.vertices:
b_inds.append(i)
if len(a_inds) > 0 and len(b_inds) > 0: # we found the correct divider_list
divider_list = divider_list[:] ### make a copy
a_to_b = a_inds[0] < b_inds[0]
if spiky:
if a_to_b:
divider_list = divider_list[a_inds[-1]:b_inds[0]]
else:
divider_list = divider_list[b_inds[-1]:a_inds[0]]
p, q = divider_list[:2]
x = p.shared_vertex(q)
lightning_curve = [x.pos.complex()]
for r in divider_list[2:]:
y = q.shared_vertex(r)
if x == y:
p, q = p, r
else:
lightning_curve.append(y.pos.complex())
p, q = q, r
x = y
if a_to_b:
lightning_curve = [a.pos.complex()] + lightning_curve + [b.pos.complex()]
else:
lightning_curve = [b.pos.complex()] + lightning_curve + [a.pos.complex()]
return lightning_curve
else: ### have to hit a, b, and any special vertices along the way
### find all special vertices along the path from a to b (or b to a), go in and out of each in turn...
if a_to_b:
s_inds, e_inds = a_inds, b_inds
s, e = a, b
else:
s_inds, e_inds = b_inds, a_inds
s, e = b, a
special_verts = special_vertices[:]
if a in special_vertices:
special_verts.remove(a)
if b in special_vertices:
special_verts.remove(b)
visited_special_vertices = [s]
visited_special_vertex_inds = [s_inds]
for j in range(s_inds[-1] + 1, e_inds[0]):
edge = divider_list[j]
p, q = edge.vertices
v = None
if p in special_vertices:
v = p
elif q in special_vertices:
v = q
if v != None:
if v == visited_special_vertices[-1]:
visited_special_vertex_inds[-1].append(j)
else:
visited_special_vertices.append(v)
visited_special_vertex_inds.append([j])
visited_special_vertices.append(e)
visited_special_vertex_inds.append(e_inds)
all_extra_steps = []
all_in_indices = []
all_out_indices = []
for v, v_inds in zip(visited_special_vertices, visited_special_vertex_inds):
extra_steps, in_ind, out_ind = special_vertex_in_and_out(divider_list, v, v_inds)
all_extra_steps.append(extra_steps)
all_in_indices.append(in_ind)
all_out_indices.append(out_ind)
lightning_curve = []
for j in range(len(all_extra_steps) - 1):
lightning_curve.extend(all_extra_steps[j])
lightning_curve.extend([edge.midpoint() for edge in divider_list[all_out_indices[j] : all_in_indices[j+1] + 1]])
lightning_curve.extend(all_extra_steps[-1])
if len(s_inds) % 2 == 0:
lightning_curve = lightning_curve[1:]
if len(e_inds) % 2 == 0:
lightning_curve = lightning_curve[:-1]
return lightning_curve
### function to get lightning curve to the left or right of a ladderpole edge. look for tet_faces you need near the edge. Calculate any needed offsets then and there.
def explore_side(vt, tet, face, pivot_vert, leading_vert, trailing_vert, ladderpole_colour):
tet_faces_pivots = []
while True:
tet_faces_pivots.append( (tet_face(vt, tet.index(), face), pivot_vert) )
if vt.get_edge_between_verts_colour(tet.index(), (pivot_vert, leading_vert)) != ladderpole_colour:
break
gluing = tet.adjacentGluing(trailing_vert)
tet = tet.adjacentTetrahedron(trailing_vert)
face = gluing[face]
pivot_vert = gluing[pivot_vert]
leading_vert, trailing_vert = gluing[trailing_vert], gluing[leading_vert]
tet_faces_pivots.reverse() ## seems to play nice with continent coast order
return tet_faces_pivots
def lightning_curve_for_ladder_unit(dividers, lu, offset):
non_inf_verts = [0,1,2,3]
non_inf_verts.remove(lu.face)
edge_colours = []
for i in range(3):
edge_colours.append( lu.ladder.vt.get_edge_between_verts_colour(lu.tet_num, (non_inf_verts[(i+1)%3], non_inf_verts[(i+2)%3]) ) )
for i in range(3):
if edge_colours[i] == edge_colours[(i+1)%3]:
odd_one_out = (i+2)%3
s, e = lu.con_verts[non_inf_verts[(odd_one_out + 1)%3]], lu.con_verts[non_inf_verts[(odd_one_out + 2)%3]]
T = lu.ladder.torus_triangulation
ladder_parity = T.ladder_list.index(lu.ladder) % 2
lightning_curve = lightning_curve_from_dividers(dividers[ladder_parity], s, e, special_vertices = [], spiky = False) ### no need for extra ladderpole vertices - there are none between these two
lightning_curve = [c + offset for c in lightning_curve]
return lightning_curve
# lightning_curve_scaled = [ T.drawing_scale * (c + offset) for c in lightning_curve ]
# draw_path(T.canv, lightning_curve_scaled, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[ladder_parity%2]])
def lightning_curves_around_ladder_unit_ladder_pole_edge(dividers, lu, T):
### describe a ladderpole edge by a neighbouring ladder unit. Draw lightning curves around it
vt = T.vt
tet_num, face = lu.tet_num, lu.face
if lu.is_on_left():
opp_vert = lu.right_vertices[0]
ladderpole_verts = lu.left_vertices
else:
opp_vert = lu.left_vertices[0]
ladderpole_verts = lu.right_vertices
ladderpole_verts_complex = [lu.verts_pos[i].complex() for i in ladderpole_verts]
ladderpole_colour = vt.get_edge_between_verts_colour(tet_num, ladderpole_verts)
if (ladderpole_colour == "red" and lu.is_on_left()) or (ladderpole_colour == "blue" and lu.is_on_right()):
pivot_vert, trailing_vert = ladderpole_verts
else:
trailing_vert, pivot_vert = ladderpole_verts
ladderpole_verts_complex.reverse()
leading_vert = opp_vert
tet = T.vt.tri.tetrahedron(tet_num)
gluing = tet.adjacentGluing(opp_vert)
nb_tet = tet.adjacentTetrahedron(opp_vert)
nb_face = gluing[face]
nb_pivot_vert, nb_trailing_vert = gluing[trailing_vert], gluing[pivot_vert] ### swap
nb_leading_vert = gluing[leading_vert]
### if the edge is blue, rotate up to the right to find relevant tet_faces to its right, and down to the left.
### vice versa for red ladderpole edge
our_side_tet_faces_pivots = explore_side(vt, tet, face, pivot_vert, leading_vert, trailing_vert, ladderpole_colour)
nb_side_tet_faces_pivots = explore_side(vt, nb_tet, nb_face, nb_pivot_vert, nb_leading_vert, nb_trailing_vert, ladderpole_colour)
### for each of these tet_faces, use the edge for the corresponding ladder unit in T, translated as necessary using the offset
### from either end of the ladderpole edge.
if (ladderpole_colour == "red" and lu.is_on_left()) or (ladderpole_colour == "blue" and lu.is_on_right()):
all_tet_faces_pivots = [our_side_tet_faces_pivots, nb_side_tet_faces_pivots]
else:
all_tet_faces_pivots = [nb_side_tet_faces_pivots, our_side_tet_faces_pivots]
out = []
for k in range(2):
full_crv = []
for j, (tf, pivot_vert) in enumerate(all_tet_faces_pivots[k]):
lu = T.bt.get_ladder_unit_from_tet_face(tf)
offset = ladderpole_verts_complex[k] - lu.verts_pos[pivot_vert].complex()
crv = lightning_curve_for_ladder_unit(dividers, lu, offset)
if j > 0:
crv = crv[1:]
full_crv.extend(crv)
out.append(full_crv)
return out
def uniquify_list(dup_list, subtract = [], epsilon = 0.001):
unique_list = []
for z in dup_list:
keep = True
for w in unique_list + subtract:
if abs(z-w)<0.001:
keep = False
break
if keep:
unique_list.append(z)
return unique_list
def replace_with_continent_vertices(v_list, con, epsilon = 0.001):
for i, w in enumerate(v_list):
for v in con.boundary_triangulation_vertices:
# print(type(v), type(w))
if abs(v.pos.complex() - w) < epsilon:
v_list[i] = v
break
def assign_continent_vertices_to_tet_faces(T, con):
for L in T.ladder_list:
for lu in L.ladder_unit_list:
lu.con_verts = [v.complex() for v in lu.verts_pos]
replace_with_continent_vertices(lu.con_verts, con)
def draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, max_length, output_filename = None, draw_args = None, build_type = None ):
draw_CT_curve, draw_lightning_curve, draw_jordan_curve = draw_args['draw_CT_curve'], draw_args['draw_lightning_curve'], draw_args['draw_jordan_curve']
draw_dividers, draw_landscapes, draw_box_for_cohom_frac = draw_args['draw_dividers'], draw_args['draw_landscapes'], draw_args['draw_box_for_cohom_frac']
draw_alignment_dots, draw_desired_vertices, expand_fund_dom = draw_args['draw_alignment_dots'], draw_args['draw_desired_vertices'], draw_args['expand_fund_dom']
tri, angle = isosig_to_tri_angle(veering_isosig)
vt = veering_triangulation(tri, angle, tet_shapes = tet_shapes)
B = boundary_triangulation(vt)
B.generate_canvases(args = draw_args)
out_data = []
for i,T in enumerate(B.torus_triangulation_list):
print(T.sideways_index_shift)
print(T.sideways_once_holonomy)
print(T.sideways_holonomy)
print(T.ladder_holonomy)
# print(('cusp', i))
### make initial_tet_face be in the lower left of the fundamental domain
# initial_tet_face = T.ladder_list[0].ladder_unit_list[0]
### make initial_tet_face be in the middle of the fundamental domain
num_ladders = len(T.ladder_list)
L = T.ladder_list[int(num_ladders/2 - 1)] ## -1 because we split the last ladder between the left and right
num_ladder_units = len(L.ladder_unit_list)
initial_tet_face = L.ladder_unit_list[int(num_ladder_units/2)]
# print(('initial_tet_face', initial_tet_face))
# print(('origin_in_C', initial_tet_face.origin_in_C))
# print(('verts_pos', initial_tet_face.verts_pos))
### want to draw a box which will contain a fund dom, will be what we render as a cohom frac
ladderpoles_vertices = T.left_ladder_pole_vertices() ### everything is on left of the ladders...
if expand_fund_dom:
left_ladder = T.ladder_list[0]
right_ladder = T.ladder_list[-1]
left_ladder_nbd = []
for lu in left_ladder.ladder_unit_list:
if lu.is_on_left():
# print(lu, lu.left_vertices[0])
left_ladder_nbd.extend( lu.vert_positions_around_corner( lu.left_vertices[0] ) )
last = left_ladder.ladder_unit_list[-1]
# print('extra', last, last.left_vertices[-1])
left_ladder_nbd.extend( last.vert_positions_around_corner( last.left_vertices[-1] ) )
# print('right')
right_ladder_nbd = []
for lu in right_ladder.ladder_unit_list:
if lu.is_on_left():
# print(lu, lu.left_vertices[0])
right_ladder_nbd.extend( lu.vert_positions_around_corner( lu.left_vertices[0] ) )
last = right_ladder.ladder_unit_list[-1]
# print('extra', last, last.left_vertices[-1])
right_ladder_nbd.extend( last.vert_positions_around_corner( last.left_vertices[-1] ) )
left_ladder_nbd = uniquify_list(left_ladder_nbd)
right_ladder_nbd = uniquify_list(right_ladder_nbd)
bottom_nbd = []
top_nbd = []
for i, L in enumerate(T.ladder_list):
if i%2 == 0:
lu_bottom = L.ladder_unit_list[0]
lu_top = L.ladder_unit_list[-1]
bottom_nbd.extend( lu_bottom.vert_positions_around_corner(lu_bottom.left_vertices[0]) )
bottom_nbd.extend( lu_bottom.vert_positions_around_corner(lu_bottom.right_vertices[0]) )
top_nbd.extend( lu_top.vert_positions_around_corner(lu_top.left_vertices[-1]) )
top_nbd.extend( lu_top.vert_positions_around_corner(lu_top.right_vertices[-1]) )
bottom_nbd = uniquify_list(bottom_nbd)
top_nbd = uniquify_list(top_nbd)
all_ladderpole_vertices = [v for L in ladderpoles_vertices for v in L]
left_ladder_nbd = uniquify_list(left_ladder_nbd, subtract = all_ladderpole_vertices)
right_ladder_nbd = uniquify_list(right_ladder_nbd, subtract = all_ladderpole_vertices + left_ladder_nbd) ### right and left can overlap
bottom_nbd = uniquify_list(bottom_nbd, subtract = all_ladderpole_vertices + left_ladder_nbd + right_ladder_nbd) ### top and bottom cannot
top_nbd = uniquify_list(top_nbd, subtract = all_ladderpole_vertices + left_ladder_nbd + right_ladder_nbd)
desired_vertices = all_ladderpole_vertices + left_ladder_nbd + right_ladder_nbd + bottom_nbd + top_nbd
desired_vertices = uniquify_list(desired_vertices) ## just in case...
else:
desired_vertices = [v for L in ladderpoles_vertices for v in L]
con = continent( vt, initial_tet_face, desired_vertices = desired_vertices )
con.build_fundamental_domain() ## expand the continent until we have all vertices of the boundary triangulation fundamental domain
# print(('unfound desired_vertices', con.desired_vertices))
# assert con.desired_vertices == [] ### found all the desired vertices
if con.desired_vertices != []:
print(veering_isosig, 'did not find all torus triangulation vertices')
return False
# now replace ladderpoles_vertices with the continent's corresponding vertices
for ladderpole_vertices in ladderpoles_vertices:
replace_with_continent_vertices(ladderpole_vertices, con)
if expand_fund_dom:
replace_with_continent_vertices(left_ladder_nbd, con)
replace_with_continent_vertices(right_ladder_nbd, con)
replace_with_continent_vertices(bottom_nbd, con)
replace_with_continent_vertices(top_nbd, con)
nbd = left_ladder_nbd + bottom_nbd + top_nbd + right_ladder_nbd
nbd.sort(key = lambda v: con.coast.index(v))
### the following is the list with correctly replaced vertices
all_ladderpole_vertices = [v for L in ladderpoles_vertices for v in L] ### dont need this to be bigger when doing "expand_fund_dom"
ladderpole_descendant_segments = []
if expand_fund_dom:
### we must sort the extra vertices of nbd into the correct ladderpoles... just do by colour of the edge connecting to infty
ladderpole_is_red = nbd[0].edge_between(con.infinity).is_red
segment_start = con.coast.index(nbd[0])
for i,v in enumerate(nbd):
if v.edge_between(con.infinity).is_red != ladderpole_is_red:
ladderpole_descendant_segments.append( [segment_start, con.coast.index(nbd[i-1])] )
segment_start = con.coast.index(nbd[i])
ladderpole_is_red = not ladderpole_is_red
ladderpole_descendant_segments.append( [segment_start, con.coast.index(nbd[-1])] )
else:
for ladderpole_vertices in ladderpoles_vertices:
segment = [con.coast.index(ladderpole_vertices[0]), con.coast.index(ladderpole_vertices[-1])]
segment.sort()
ladderpole_descendant_segments.append( segment )
con.mark_ladderpole_descendants(ladderpole_descendant_segments)
# print 'important verts', important_vertices
# for v in important_vertices:
# z = T.drawing_scale * v.pos.complex()
# pyx_fill_col = pyx.deco.filled([pyx.color.rgb.black])
# T.canv.fill(pyx.path.circle(z.real, z.imag, 0.02), [pyx_fill_col])
hit_max_tetrahedra = False ### default assumption is that we had enough tetrahedra to get the max_length we want.
# print(build_type)
if build_type == 'build_naive':
con.build_naive(max_num_tetrahedra = max_num_tetrahedra)
elif build_type == 'build_on_coast':
hit_max_tetrahedra = con.build_on_coast(max_length = max_length, max_num_tetrahedra = max_num_tetrahedra)
elif build_type == 'build_make_long_descendant_edges_internal':
hit_max_tetrahedra = con.build_make_long_descendant_edges_internal(max_length = max_length, max_num_tetrahedra = max_num_tetrahedra)
elif build_type == 'build_explore_prongs':
hit_max_tetrahedra = con.build_explore_prongs(max_length = max_length, max_num_tetrahedra = max_num_tetrahedra)
elif build_type == 'build_long_and_mid':
hit_max_tetrahedra = con.build_long_and_mid(max_length = max_length, max_num_tetrahedra = max_num_tetrahedra)
if hit_max_tetrahedra:
output_filename = output_filename[:-4] + '_hitmax.pdf'
#######
# eq = con.segment_between( ladderpoles_vertices[0][0], ladderpoles_vertices[0][1] ) ## segment under one edge of ladderpole
# eq = con.segment_between( ladderpoles_vertices[0][0], ladderpoles_vertices[0][-1] ) ## 0th ladderpole
grad = pyx.color.gradient.Hue
# colours = {"blue":pyx.color.rgb.blue, "red":pyx.color.rgb.red}
colours = {"blue":pyx.color.rgb(0,0,0.5), "red":pyx.color.rgb(0.5,0,0)}
ct_lw = draw_args['ct_lw']
draw_options = [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, pyx.deco.colorgradient(grad, steps = 256)] ## this may get overwritten with colour information for the ladder
assign_continent_vertices_to_tet_faces(T, con)
dividers = con.lightning_dividers([]) ## only lightning curves for infinity
layer1 = T.canv.layer("layer1")
layer2 = T.canv.layer("layer2", below = "layer1")
if draw_jordan_curve:
jordan_colours = [pyx.color.rgb(0,0,0), pyx.color.rgb(1,1,1)] ### black, white
for k, L in enumerate(T.ladder_list):
if k%2 == 0:
for lu in L.ladder_unit_list:
if lu.is_on_left():
s, e = lu.con_verts[lu.left_vertices[0]], lu.con_verts[lu.left_vertices[1]]
else:
s, e = lu.con_verts[lu.right_vertices[0]], lu.con_verts[lu.right_vertices[1]]
CT_ladderpole = con.segment_between(s, e)
curves = lightning_curves_around_ladder_unit_ladder_pole_edge(dividers, lu, T)
if CT_ladderpole[0] != e:
CT_ladderpole.reverse()
assert CT_ladderpole[0] == e
assert CT_ladderpole[-1] == s
CT_ladderpole_pos = [v.pos.complex() for v in CT_ladderpole ]
assert abs(CT_ladderpole_pos[0] - curves[0][-1]) < 0.0001
assert abs(CT_ladderpole_pos[-1] - curves[0][0]) < 0.0001
loop0 = curves[0] + CT_ladderpole_pos[1:-1]
path_C = [ T.drawing_scale * c for c in loop0 ]
draw_path(layer1, path_C, [jordan_colours[0]], fill = True)
assert abs(CT_ladderpole_pos[0] - curves[1][0]) < 0.0001
assert abs(CT_ladderpole_pos[-1] - curves[1][-1]) < 0.0001
CT_ladderpole_pos.reverse()
loop1 = curves[1] + CT_ladderpole_pos[1:-1]
path_C = [ T.drawing_scale * c for c in loop1 ]
draw_path(layer1, path_C, [jordan_colours[1]], fill = True)
# for k, ladderpole in enumerate(ladderpoles_vertices):
# for j in range(len(ladderpole) - 1):
# s, e = ladderpole[j], ladderpole[j+1]
# CT_ladderpole = con.segment_between(s, e)
# if CT_ladderpole[0] != s:
# CT_ladderpole.reverse()
# assert CT_ladderpole[0] == s
# assert CT_ladderpole[-1] == e
# for i in range(2):
# lightning_curve = lightning_curve_from_dividers(dividers[i], s, e, special_vertices = all_ladderpole_vertices, spiky = False)
# CT_ladderpole_pos = [v.pos.complex() for v in CT_ladderpole ]
# if CT_ladderpole_pos[0] == lightning_curve[0]:
# lightning_curve.reverse()
# assert CT_ladderpole_pos[0] == lightning_curve[-1] and CT_ladderpole_pos[-1] == lightning_curve[0]
# lightning_curve = lightning_curve[1:-1]
# loop = CT_ladderpole_pos + lightning_curve
# path_C = [ T.drawing_scale * c for c in loop ]
# draw_path(layer1, path_C, [jordan_colours[i]], fill = True)
# if k == 0:
# L = T.ladder_list[0]
# offset = L.left_ladderpole_index_to_ladder_unit(j).gluing_offset
# path_C = [ T.drawing_scale * (c + offset) for c in loop ]
# draw_path(layer1, path_C, [jordan_colours[i]], fill = True)
box = T.canv.bbox()
layer2.fill(box.enlarged(1.0).rect(), [pyx.color.rgb(0.5,0.5,0.5)])
# lightning_colours = [pyx.color.rgb(0,0.5,0), pyx.color.rgb(0.5,0,0.5)] ### green, purple
# lightning_colours = [pyx.color.rgb(1,0,0), pyx.color.rgb(1,0,0)] ### red, red
lightning_colours = [pyx.color.rgb(0,0,0), pyx.color.rgb(0,0,0)] ### black, black
if draw_dividers:
for k in range(2):
for divider_list in dividers[k]:
for edge in divider_list:
edgeC = [T.drawing_scale * v.pos.complex() for v in edge.vertices]
draw_path(layer1, edgeC, [lightning_colours[k], pyx.style.linewidth(0.005)])
##### draw the Cannon-Thurston curve
if draw_CT_curve:
if draw_args['only_draw_ladderpoles']:
for j, ladderpole_vertices in enumerate(ladderpoles_vertices):
# if j%2 == 0:
# col = colours["red"]
# else:
# col = colours["blue"]
# draw_options = [pyx.style.linewidth(ct_lw), col]
for i in range(len(ladderpole_vertices) - 1): # fenceposts
path = con.segment_between(ladderpole_vertices[i], ladderpole_vertices[i+1])
for v in path[:-1]:
assert v.is_ladderpole_descendant()
path_C = [ T.drawing_scale * v.pos.complex() for v in path ]
draw_path(T.canv, path_C, draw_options)
else:
path = con.coast
path.remove(con.infinity)
path_C = [ T.drawing_scale * v.pos.complex() for v in path ]
draw_path(T.canv, path_C, draw_options)
##############
# adj_verts, adj_edges = con.vertices_and_edges_adjacent_to_infinity()
### continent drawing the boundary triangulation (we don't generally do this because we have boundary_triangulation to do this)
# lines for triangles meeting infinity
# for endpoints, veering_colour in adj_edges:
# z, w = [T.drawing_scale * v.pos.complex() for v in endpoints]
# pyx_stroke_col = pyx.deco.stroked([colours[veering_colour]])
# T.canv.stroke(pyx.path.line( z.real, z.imag, w.real, w.imag), [pyx.style.linewidth(lw * 5), pyx_stroke_col] )
# # dots for edges from infinity
# for v,veering_colour in adj_verts:
# z = v.pos.complex()
# pyx_fill_col = pyx.deco.filled([colours[veering_colour]])
# T.canv.fill(pyx.path.circle(z.real, z.imag, 0.02), [pyx_fill_col])
### continent drawing the left_ladder_pole_vertices
# T = B.torus_triangulation_list[0]
# for L in T.ladder_list:
# for v in L.left_ladder_pole_vertices():
# v *= T.drawing_scale
# if L.is_even:
# col = colours["blue"]
# else:
# col = colours["red"]
# T.canv.stroke(pyx.path.circle(v.real, v.imag, 0.15), [pyx.style.linewidth(lw * 3), col])
#### draw upper and lower landscapes for the continent
if draw_landscapes:
lower_colours = {True: pyx.color.rgb(0.5,0.3,0), False: pyx.color.rgb(0,0.3,0.5)}
upper_colours = {True: pyx.color.rgb(0.9,0.3,0), False: pyx.color.rgb(0,0.3,0.9)}
landscape_edges = con.boundary_landscape_edges()
colours = [lower_colours, upper_colours]
for i in range(2):
# i = 1
for e in landscape_edges[i]:
col = colours[i][e.is_red]
u, v = e.vertices
if u == con.infinity or v == con.infinity:
if u == con.infinity:
z = T.drawing_scale * v.pos.complex()
else:
z = T.drawing_scale * u.pos.complex()
T.canv.fill(pyx.path.circle(z.real, z.imag, 0.05), [col])
else:
draw_path(T.canv, [T.drawing_scale * u.pos.complex(), T.drawing_scale * v.pos.complex()], [pyx.style.linewidth(0.5 * ct_lw), col])
#### draw lightning curves
lightning_colours = [pyx.color.rgb(0,0.5,0), pyx.color.rgb(0.5,0,0.5)] ### green, purple
# lightning_colours = [pyx.color.rgb(1,0,0), pyx.color.rgb(1,0,0)] ### red, red
# lightning_colours = [pyx.color.rgb(0,0,0), pyx.color.rgb(0,0,0)] ### black, black
if draw_lightning_curve:
dividers = con.lightning_dividers([]) ## only lightning curves for infinity
# dividers = con.lightning_dividers(all_ladderpole_vertices) ## add in more lightning curves
# k = 0
for k, L in enumerate(T.ladder_list):
for lu in L.ladder_unit_list:
if k%2 == 0:
curves = lightning_curves_around_ladder_unit_ladder_pole_edge(dividers, lu, T)
for j, crv in enumerate(curves):
lightning_curve_scaled = [ T.drawing_scale * c for c in crv ]
draw_path(T.canv, lightning_curve_scaled, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[j]])
# non_inf_verts = [0,1,2,3]
# non_inf_verts.remove(lu.face)
# edge_colours = []
# for i in range(3):
# edge_colours.append( T.vt.get_edge_between_verts_colour(lu.tet_num, (non_inf_verts[(i+1)%3], non_inf_verts[(i+2)%3]) ) )
# for i in range(3):
# if edge_colours[i] == edge_colours[(i+1)%3]:
# odd_one_out = (i+2)%3
# s, e = lu.con_verts[non_inf_verts[(odd_one_out + 1)%3]], lu.con_verts[non_inf_verts[(odd_one_out + 2)%3]]
# lightning_curve = lightning_curve_from_dividers(dividers[k%2], s, e, special_vertices = all_ladderpole_vertices, spiky = False)
# lightning_curve_scaled = [ T.drawing_scale * c for c in lightning_curve ]
# draw_path(T.canv, lightning_curve_scaled, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[k%2]])
# if k == len(T.ladder_list) - 1:
# lightning_curve_shifted = [c + lu.gluing_offset for c in lightning_curve]
# lightning_curve_shifted_scaled = [ T.drawing_scale * c for c in lightning_curve_shifted ]
# draw_path(T.canv, lightning_curve_shifted_scaled, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[k%2]])
# for k, ladderpole in enumerate(ladderpoles_vertices):
# for j in range(len(ladderpole) - 1):
# s, e = ladderpole[j], ladderpole[j+1]
# for i in range(2):
# lightning_curve = lightning_curve_from_dividers(dividers[i], s, e, special_vertices = all_ladderpole_vertices, spiky = False)
# lightning_curve_scaled = [ T.drawing_scale * c for c in lightning_curve ]
# draw_path(T.canv, lightning_curve_scaled, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[i]])
# if k == 0:
# L = T.ladder_list[0]
# offset = L.left_ladderpole_index_to_ladder_unit(j).gluing_offset
# lightning_curve_scaled_shifted = [ T.drawing_scale * (c + offset) for c in lightning_curve ]
# draw_path(T.canv, lightning_curve_scaled_shifted, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, pyx.color.rgb(0,1,1)])
# for i in range(2):
# for crv in lightning_curves[i]:
# ## remove any infinities
# crv = [c for c in crv if c != con.infinity]
# # if draw_args['only_draw_ladderpoles']:
# # ladderpole_indices = []
# # for j, c in enumerate(crv):
# # if c in all_ladderpole_vertices:
# # ladderpole_indices.append(j)
# # if len(ladderpole_indices) <= 1:
# # crv = []
# # else:
# # crv = crv[ladderpole_indices[0]:ladderpole_indices[-1] + 1]
# crv = [ T.drawing_scale * c.pos.complex() for c in crv ]
# if len(crv) > 0:
# draw_path(T.canv, crv, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round, lightning_colours[i]])
# # ## trim to ladder poles
# # ladderpole_vertex_indices = []
# # for i, v in enumerate(crv):
# # if v in all_ladderpole_vertices:
# # ladderpole_vertex_indices.append(i)
# # if len(ladderpole_vertex_indices) > 0:
# # crv = crv[ladderpole_vertex_indices[0]: ladderpole_vertex_indices[-1] + 1]
# # # for e in crv:
# # # pts = [T.drawing_scale * v.pos.complex() for v in e.vertices]
# # # draw_path(T.canv, pts, [pyx.style.linewidth(ct_lw)])
# # crv = [ T.drawing_scale * c.pos.complex() for c in crv ]
# # draw_path(T.canv, crv, [pyx.style.linewidth(ct_lw), pyx.style.linejoin.round])
if draw_box_for_cohom_frac:
box = T.canv.bbox()
diam = pyx.unit.tocm( max([box.right() - box.left(), box.top() - box.bottom()]) )
### pyx stores lengths in a weird format, we have to convert to cm to get a float out
box_center = complex(pyx.unit.tocm( 0.5*(box.right() + box.left()) ), pyx.unit.tocm( 0.5*(box.top() + box.bottom()) ))
box_right_midpoint = box_center + complex(0.5*diam, 0)
# T.canv.stroke(box.rect())
T.canv.stroke(pyx.path.rect(box_center.real - 0.5*diam, box_center.imag - 0.5*diam, diam, diam)) # lower left corner coords, width, height
inf_vert = initial_tet_face.face
zero_vert = inf_vert - ((inf_vert%2)*2 - 1) ## swaps 0 with 1, 2 with 3
one_vert = inf_vert - 2*(((inf_vert//2) % 2)*2 - 1) ## swaps 0 with 2, 1 with 3
zero_vert_pos = T.drawing_scale * initial_tet_face.verts_pos[zero_vert].complex()
one_vert_pos = T.drawing_scale * initial_tet_face.verts_pos[one_vert].complex()
half_pos = 0.5 * (zero_vert_pos + one_vert_pos)
### we must rotate, translate, and scale the cohom fractal picture to fit in the box
if draw_alignment_dots:
T.canv.fill(pyx.path.circle(zero_vert_pos.real, zero_vert_pos.imag, 0.2))
T.canv.fill(pyx.path.circle(half_pos.real, half_pos.imag, 0.15))
T.canv.fill(pyx.path.circle(box_center.real, box_center.imag, 0.1))
T.canv.fill(pyx.path.circle(box_right_midpoint.real, box_right_midpoint.imag, 0.1))
### need to send zero_vert_pos to box_center, and half_pos to box_right_midpoint
# print veering_isosig
out_data.append(veering_isosig)
# print 'tet face', initial_tet_face
out_data.append((initial_tet_face.tet_num, initial_tet_face.face))
picture_unit = one_vert_pos - zero_vert_pos
translation = (box_center - zero_vert_pos)/picture_unit ## need to write this in coord system of one_vert_pos and zero_vert_pos
# print 'translation', [translation.real, translation.imag]
out_data.append((translation.real, translation.imag))
### first rotate and scale, then do parabolicBy2DVector(v)
complex_scale = (box_right_midpoint - box_center)/(half_pos - zero_vert_pos)
# print 'scale, rotate', cmath.polar(complex_scale)
out_data.append(cmath.polar(complex_scale))
# pic_center = zero_vert_pos
# print pyx.unit.tocm(box.right())
# print type(pyx.unit.tocm(box.right()))
# rad = pyx.unit.tocm( max([box.right() - pic_center.real, pic_center.real - box.left(), box.top() - pic_center.imag, pic_center.imag - box.bottom()]) )
### pyx stores lengths in a weird format, we have to convert to cm to get a float out
# T.canv.stroke(pyx.path.rect(pic_center.real - rad, pic_center.imag - rad, 2*rad, 2*rad)) # lower left corner coords, width, height
# right_midpoint = pic_center + complex(rad,0)
# complex_scale = (right_midpoint - pic_center) / (half_pos - pic_center)
# print "complex_scale", complex_scale
### to do:
### 1. remove bits of lightning curve beyond the fund domain DONE
### 2. how to get the data into the cohom frac code DONE
## circles around found vertices
if draw_desired_vertices:
for pos in desired_vertices:
# pos = v.pos.complex()
pos *= T.drawing_scale
T.canv.stroke(pyx.path.circle(pos.real, pos.imag, 0.3), [pyx.style.linewidth(0.1), pyx.color.rgb.green])
out_canvas = pyx.canvas.canvas()
height_offset = 0.0
canvases = [T.canv for T in B.torus_triangulation_list]
for i,c in enumerate(canvases):
out_canvas.insert(c, attrs=[pyx.trafo.translate(-c.bbox().left(), height_offset - c.bbox().bottom())])
height_offset += c.bbox().height() + 0.05 ### add a tiny bit to stop crashes due to line width
out_canvas.writePDFfile(output_filename)
return out_data
def draw_cannon_thurston_from_veering_isosigs_file(veering_isosigs_filename, output_dirname, max_num_tetrahedra = 500, max_length = 0.1, interval_to_draw = None, draw_args = None, build_type = None):
veering_isosigs_list = parse_data_file(veering_isosigs_filename)
if interval_to_draw != None:
to_draw = veering_isosigs_list[interval_to_draw[0]:interval_to_draw[1]]
else:
to_draw = veering_isosigs_list
shapes_data = read_from_pickle('Data/veering_shapes.pkl')
for veering_isosig in to_draw:
print(veering_isosig)
tet_shapes = shapes_data[veering_isosig]
filename = output_dirname + '/' + veering_isosig + '_' + str(max_num_tetrahedra) + '_' + str(max_length) + '_' + build_type + '.pdf'
try:
draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, max_length, output_filename = filename, draw_args = draw_args, build_type = build_type )
except:
print('failed to draw ' + veering_isosig)
def draw_jigsaw_from_veering_isosigs_list(veering_isosigs_list, output_dirname, jigsaw_data_out_filename = "jigsaw_data.pkl", max_num_tetrahedra = 2000000, max_length = 0.2, draw_args = None):
build_type = 'build_long_and_mid'
shapes_data = read_from_pickle('Data/shapes_jig_no_symm.pkl')
data_for_cohom_fracs = {}
for i, veering_isosig in enumerate(veering_isosigs_list):
# print(veering_isosig)
if i%25 == 0:
print(i)
tet_shapes = shapes_data[veering_isosig]
# print 'tet_shapes', tet_shapes
filename = output_dirname + '/' + veering_isosig + '_' + str(max_num_tetrahedra) + '_' + str(max_length) + '_' + build_type + '.pdf'
expand_fund_dom = True
out = draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, max_length, output_filename = filename, draw_args = draw_args, build_type = build_type )
if out != False:
data_for_cohom_fracs[out[0]] = out[1:]
output_to_pickle(data_for_cohom_fracs, jigsaw_data_out_filename)
def draw_jigsaw_from_veering_isosigs_file(veering_isosigs_filename, output_dirname, jigsaw_data_out_filename = "jigsaw_data.pkl", max_num_tetrahedra = 2000000, max_length = 0.2, interval_to_draw = None, draw_args = None):
veering_isosigs_list = parse_data_file(veering_isosigs_filename)
if interval_to_draw != None:
to_draw = veering_isosigs_list[interval_to_draw[0]:interval_to_draw[1]]
else:
to_draw = veering_isosigs_list
draw_jigsaw_from_veering_isosigs_list(to_draw, output_dirname, jigsaw_data_out_filename = jigsaw_data_out_filename, max_num_tetrahedra = max_num_tetrahedra, max_length = max_length, draw_args = draw_args)
if __name__ == '__main__':
# draw_args = {'draw_boundary_triangulation':True, 'draw_labels': False, 'only_draw_ladderpoles': True, 'ct_lw': 0.002, 'global_drawing_scale': 4, 'style': 'geometric', 'draw_triangles_near_poles': True, 'ct_depth': -1} #ct_depth is the old way to try to build ct maps
# draw_args = {'draw_boundary_triangulation':True, 'draw_labels': True, 'only_draw_ladderpoles': False, 'ct_lw': 0.02, 'global_drawing_scale': 4, 'style': 'geometric', 'draw_triangles_near_poles': False, 'ct_depth': -1} #ct_depth is the old way to try to build ct maps
draw_args = {'draw_boundary_triangulation':False, 'draw_labels': False, 'only_draw_ladderpoles': True, 'ct_lw': 0.02, 'global_drawing_scale': 4, 'style': 'geometric', 'draw_triangles_near_poles': True, 'ct_depth': -1} #ct_depth is the old way to try to build ct maps
# max_num_tetrahedra = 40
# max_num_tetrahedra = 5000
# max_num_tetrahedra = 50000
# max_num_tetrahedra = 100000
# max_num_tetrahedra = 400000
max_num_tetrahedra = 2000000
# max_length = 0.4
# max_length = 0.3
# max_length = 0.2
# max_length = 0.15
# max_length = 0.14
max_length = 0.1
# max_length = 0.07
# max_length = 0.06
# max_length = 0.05
# max_length = 0.02
# max_length = 0.01
draw_args['ct_lw'] = 0.2 * max_length
# build_type = 'build_naive'
# build_type = 'build_on_coast'
# build_type = 'build_make_long_descendant_edges_internal'
# build_type = 'build_explore_prongs'
build_type = 'build_long_and_mid'
# veering_isosig = 'cPcbbbiht_12'
# # # # veering_isosig = 'cPcbbbdxm_10'
# veering_isosig = 'dLQacccjsnk_200'
# veering_isosig = 'eLMkbcddddedde_2100'
# veering_isosig = 'eLAkaccddjsnak_2001'
# veering_isosig = 'gLAMPbbcdeffdhwqqqj_210202'
# veering_isosig = 'gLLAQbecdfffhhnkqnc_120012'
# veering_isosig = 'iLLLAQccdffgfhhhqgdatgqdm_21012210' ## no symmetry - helps us spot errors
# veering_isosig = 'iLLPwQcccdfehghhhggaahhbg_20102211'
# # veering_isosig = 'jLAwwAQbcbdfghihihhwhnaaxrn_211211021' ## first non geometric
# # veering_isosig = 'nLLwMLPMMkbeefeihjkjlmlmhhaaaektxnaqrs_0111000011220' ### quite big negative shape
# veering_isosig = 'qLvPvvMQQLQkccgkgjkmlknpooppoqjaajqqhhqqaqxhhh_0222110112222211'
# veering_isosig = 'fLLQcbeddeehhnkhh_21112'
# veering_isosig = 'eLAkbbcdddhwqj_2102'
# # # veering_isosig = 'mLvLLLQQQbegikhjiilkllhiardrnnkxeif_120000112222'
# veering_isosig = 'mLvLLMMQQcehfhjlklkjlktilbbjumhtfai_011220220111'
# veering_isosig = 'mLLvLQLQQbeffjglhlkjklxxxjsfqjhhoqo_102210101022'
veering_isosig ='kLLLAPPkcdgfehhjijjhfhaqiphffj_2010222001'
# veering_isosig = 'jLLLLQQbegfhihihihhqakkkkoo_120011211'
# veering_isosig = 'dLQbccchhsj_122'
# veering_isosig = 'mvLALLMQQecfgjkkjiljllccaxvvwkfekix_100001122112'
# veering_isosig = 'mvLALPMPQecfggjgikllklccaxxvcfaqdmo_100001122100'
# veering_isosig = 'nLLLLzAPQkbefgjkjimlllmmxxqhxqubxtivwb_1022101110220'
# shapes_data = read_from_pickle('Data/veering_shapes_up_to_twelve_tetrahedra.pkl')
# shapes_data = read_from_pickle('Data/veering_shapes.pkl')
shapes_data = read_from_pickle('Data/shapes_jig_no_symm.pkl')
tet_shapes = shapes_data[veering_isosig]
# # print tet_shapes
# filename = 'Images/Cannon-Thurston/' + veering_isosig + '_' + str(max_num_tetrahedra) + '_' + str(max_length) + '_' + build_type + '.pdf'
filename = 'Images/Jigsaw/' + veering_isosig + '_' + str(max_num_tetrahedra) + '_' + str(max_length) + '_' + build_type + '.pdf'
# # # draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, draw_CT_curve = True, draw_lightning_curve = False, draw_landscapes = False, max_length = max_length, output_filename = filename, draw_args = draw_args, build_type = build_type )
# # draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, draw_CT_curve = True, draw_lightning_curve = True, draw_landscapes = False, draw_box_for_cohom_frac = True, max_length = max_length, output_filename = filename, draw_args = draw_args, build_type = build_type )
# # draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, draw_CT_curve = False, draw_lightning_curve = True, draw_landscapes = True, draw_box_for_cohom_frac = False, max_length = max_length, output_filename = filename, draw_args = draw_args, build_type = build_type, more = more )
draw_args['draw_CT_curve'] = False
draw_args['draw_lightning_curve'] = True
draw_args['draw_jordan_curve'] = False
draw_args['draw_dividers'] = True
draw_args['draw_landscapes'] = False
draw_args['draw_box_for_cohom_frac'] = False
draw_args['draw_alignment_dots'] = False
draw_args['draw_desired_vertices'] = False
draw_args['expand_fund_dom'] = True
draw_continent( veering_isosig, tet_shapes, max_num_tetrahedra, max_length, output_filename = filename, draw_args = draw_args, build_type = build_type )
## draw many:
start_num = 500
end_num = 502
# draw_cannon_thurston_from_veering_isosigs_file('Data/veering_census.txt', 'Images/Jordan_curve', max_num_tetrahedra = max_num_tetrahedra, max_length = max_length, interval_to_draw = (start_num, end_num), draw_args = draw_args, build_type = build_type)
### jigsaws
# draw_jigsaw_from_veering_isosigs_file('Data/sigs_for_jigs_no_symm.txt', 'Images/Jigsaw', jigsaw_data_out_filename = "jigsaw_data_no_symm.pkl", max_num_tetrahedra = max_num_tetrahedra, max_length = max_length, draw_args = draw_args, interval_to_draw = (start_num, end_num)) # all up through n's is 876. The 281th has trouble developing
# veering_isosigs_list = ['kLLLAPPkcdgfehhjijjhfhaqiphffj_2010222001', 'mvLALLMQQecfgjkkjiljllccaxvvwkfekix_100001122112', 'mvLALPMPQecfggjgikllklccaxxvcfaqdmo_100001122100']
# draw_jigsaw_from_veering_isosigs_list(veering_isosigs_list, 'Images/Jigsaw', jigsaw_data_out_filename = "jigsaw_data.pkl", max_num_tetrahedra = 2000000, max_length = max_length, draw_args = draw_args)
|
from Tkinter import *
def doNothing():
print "okay okay I won't"
root = Tk()
optionsbar = Menu(root)
root.config(menu = optionsbar)
fileMenu = Menu(optionsbar, tearoff = 0 )
optionsbar.add_cascade(label = "File", menu = fileMenu)
fileMenu.add_command(label = "New Project", command = doNothing)
fileMenu.add_command(label = "New", command = doNothing)
fileMenu.add_separator()
fileMenu.add_command(label = "Exit", command = doNothing)
editMenu = Menu(optionsbar, tearoff = 0)
optionsbar.add_cascade(label = "Edit", menu = editMenu)
editMenu.add_command(label = "Redo", command = doNothing)
root.mainloop()
|
#!/usr/bin/python
arr = [2,3,4,3,5,4,6,4,6,9,10,9,2,8,7,8,10,7]
dict1={}
for elem in arr:
dict1.setdefault(elem, 0)
dict1[elem] = dict1.get(elem) + 1
for a in dict1.keys():
if dict1.get(a) == 1:
print a
|
# Generated by Django 4.0 on 2021-12-08 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0004_user_alter_owner_first_name'),
]
operations = [
migrations.AlterField(
model_name='owner',
name='first_name',
field=models.CharField(max_length=30),
),
migrations.DeleteModel(
name='User',
),
]
|
'''
This python file implements a wrapper for surface matching (PPF) algorithm in Halcon software
'''
import time
import numpy as np
from scipy.spatial.transform import Rotation as R
'''
Please refer https://pypi.org/project/mvtec-halcon/ for setting up Halcon/Python Interface.
More documentations can be found here: https://www.mvtec.com/products/halcon/documentation
Note that the Halcon software of the corresponding version must be installed
'''
import halcon as ha
class PPFModel():
def __init__(
self, object_model,
ModelSamplingDist = 0.03,
ModelInvertNormals = 'true',
UseViewBased = 'false',
) -> None:
'''
@params
object_model: str or np.ndarray. The path to the ply model file or the array storing model points
XYZ coordinates of the model points should be in milimeter
others: all other arguments are to be used in create_surface_model() in halcon
Refer https://www.mvtec.com/doc/halcon/2105/en/create_surface_model.html for details
'''
# Create the Object3DModel
if type(object_model) is str:
Object3DModel, StatusModel = ha.read_object_model_3d(object_model, 'm', [], [])
elif type(object_model) is np.ndarray:
Object3DModel = ha.gen_object_model_3d_from_points(object_model[:, 0].tolist(), object_model[:, 1].tolist(), object_model[:, 2].tolist())
Object3DModel = ha.surface_normals_object_model_3d(Object3DModel, 'mls', [], [])
else:
raise Exception("Unknown type of object_model:", type(object_model))
self.Object3DModel = Object3DModel
self.ObjectSurfaceModel = ha.create_surface_model(
self.Object3DModel, ModelSamplingDist,
['model_invert_normals', 'train_view_based'],
[ModelInvertNormals, UseViewBased],
)
self.UseViewBased = UseViewBased
def find_surface_model(
self, scene_pc,
MaxOverlapDistRel=0,
NumResult=100,
SceneNormalComputation='mls',
SparsePoseRefinement = 'true',
DensePoseRefinement = 'true',
RefPtRate = 1,
SceneSamplingDist = 0.03,
):
'''
@params
scene_pc: np.ndarray of shape (N, 6), the scene point cloud, in milimeter
others: all other arguments are to be used in find_surface_model() in halcon
Refer https://www.mvtec.com/doc/halcon/2105/en/find_surface_model.html for details
@return
poses_ppf: np.ndarray of shape (NumResult, 4, 4), the estimated poses, in milimeter
scores_ppf: list of length NumResult, the score of each pose given by PPF algorithm
time_ppf: float, the time used by find_surface_model()
'''
Scene3DModel = ha.gen_object_model_3d_from_points(scene_pc[:, 0].tolist(), scene_pc[:, 1].tolist(), scene_pc[:, 2].tolist())
t1 = time.time()
Pose, Score, SurfaceMatchingResultID = ha.find_surface_model(
self.ObjectSurfaceModel, Scene3DModel, SceneSamplingDist, RefPtRate, 0, 'true',
['num_matches', 'use_view_based', 'max_overlap_dist_rel', 'scene_normal_computation', 'sparse_pose_refinement', 'dense_pose_refinement'], \
[NumResult, self.UseViewBased, MaxOverlapDistRel, SceneNormalComputation, SparsePoseRefinement, DensePoseRefinement],
)
t2 = time.time()
poses_raw = np.asarray(Pose).reshape((NumResult, 7))
poses_rot = R.from_euler("XYZ", poses_raw[:, 3:6], degrees=True)
poses_rotmat = poses_rot.as_matrix()
poses_ppf = np.zeros((NumResult, 4, 4))
poses_ppf[:, :3, :3] = poses_rotmat
poses_ppf[:, :3, 3] = poses_raw[:, :3]
poses_ppf[:, 3, 3] = 1
scores_ppf = Score
time_ppf = t2 - t1
return poses_ppf, scores_ppf, time_ppf
|
import math
a = [] # user array
c = []
one = int(1)
zero = int(0)
def perfect_square(x):
return int(math.sqrt(x))**2 == x
def sorted(arr, n):
a1 = [] # index
a2 = [] # perfect squares
for i in range(n):
if(perfect_square(a[i]) == True):
a1.append(i)
a2.append(a[i])
a2.sort()
# print(a1)
# print(a2)
for i in range(n):
if(perfect_square(a[i]) != True):
c.append(a[i])
# print(c)
n1 = len(a2)
for i in range(0, n1, 1):
c.insert(a1[i], a2[i])
# print(c)
n = int(input())
a = list(map(int, input().strip().split()))[:n]
# print(a)
sorted(a, n)
# print(perfect_square(21))
print(*c, sep=" ")
|
#
# This script is licensed as public domain.
#
# http://docs.python.org/2/library/struct.html
from xml.etree import ElementTree as ET
from xml.dom import minidom
import os,shutil
import struct
import array
import logging
import bpy
import re
from queue import Queue
from threading import current_thread,main_thread
from math import degrees
from mathutils import Vector
import traceback
from .addon_jsonnodetree import JSONNodetree
# # -----------------------------------------
# # Check if json-nodetree-addon is available
# # -----------------------------------------
# def IsJsonNodeAddonAvailable():
# #jsonNodetreeAvailable = False
# #log = logging.getLogger("ExportLogger")
# jsonNodetreeAvailable = "addon_jsonnodetree" in bpy.context.preferences.addons.keys()
# return jsonNodetreeAvailable
# # -------------------------------------------
# # Check if blender-connect-addon is available
# # -------------------------------------------
# def IsBConnectAddonAvailable():
# bconnectAvailable = "addon_blender_connect" in bpy.context.preferences.addons.keys()
# return bconnectAvailable
# BCONNECT_AVAILABLE = IsBConnectAddonAvailable()
# if BCONNECT_AVAILABLE:
# import addon_blender_connect
# from addon_blender_connect.BConnectNetwork import Publish,StartNetwork,NetworkRunning,AddListener,GetSessionId
log = logging.getLogger("ExportLogger")
def enum(**enums):
return type('Enum', (), enums)
PathType = enum(
ROOT = "ROOT-",
MODELS = "MODE-",
ANIMATIONS = "ANIM-",
TRIGGERS = "TRIG-",
MATERIALS = "MATE-",
TECHNIQUES = "TECH-",
TEXTURES = "TEXT-",
MATLIST = "MATL-",
OBJECTS = "OBJE-",
SCENES = "SCEN-")
# Options for file utils
class FOptions:
def __init__(self):
self.useSubDirs = True
self.fileOverwrite = False
self.paths = {}
self.exts = {
PathType.MODELS : "mdl",
PathType.ANIMATIONS : "ani",
PathType.TRIGGERS : "xml",
PathType.MATERIALS : "xml",
PathType.TECHNIQUES : "xml",
PathType.TEXTURES : "png",
PathType.MATLIST : "txt",
PathType.OBJECTS : "xml",
PathType.SCENES : "xml"
}
self.preserveExtTemp = False
#--------------------
# Errors container
#--------------------
class ErrorsMem:
def __init__(self):
self.errors = {}
self.seconds = []
def Get(self, name, defaultValue = None):
try:
return self.errors[name]
except KeyError:
if defaultValue is not None:
self.errors[name] = defaultValue
return defaultValue
def Delete(self, name):
if name in self.errors:
del self.errors[name]
def Cleanup(self):
emptyList = []
for name in self.errors.keys():
try:
if not self.errors[name]:
emptyList.append(name)
except TypeError:
pass
for name in emptyList:
del self.errors[name]
def Names(self):
return self.errors.keys()
def Second(self, index):
try:
return self.seconds[index]
except IndexError:
return None
def SecondIndex(self, second):
try:
return self.seconds.index(second)
except ValueError:
index = len(self.seconds)
self.seconds.append(second)
return index
def Clear(self):
self.errors.clear()
self.seconds.clear()
#--------------------
# File utilities
#--------------------
# Get a file path for the object 'name' in a folder of type 'pathType'
def GetFilepath(pathType, name, fOptions):
# Get absolute root path
rootPath = bpy.path.abspath(fOptions.paths[PathType.ROOT])
# Remove unnecessary separators and up-level references
rootPath = os.path.normpath(rootPath)
# Append the relative path to get the full path
fullPath = rootPath
if fOptions.useSubDirs:
fullPath = os.path.join(fullPath, fOptions.paths[pathType])
# Compose filename, remove invalid characters
filename = re.sub('[^\w_.)( -]', '_', name)
if type(filename) is list or type(filename) is tuple:
filename = os.path.sep.join(filename)
# Add extension to the filename, if present we can preserve the extension
ext = fOptions.exts[pathType]
if ext and (not fOptions.preserveExtTemp or os.path.extsep not in filename):
filename += os.path.extsep + ext
#filename = bpy.path.ensure_ext(filename, ".mdl")
fOptions.preserveExtTemp = False
# Replace all characters besides A-Z, a-z, 0-9 with '_'
#filename = bpy.path.clean_name(filename)
# Compose the full file path
fileFullPath = os.path.join(fullPath, filename)
# Get the Urho path (relative to root)
fileUrhoPath = os.path.relpath(fileFullPath, rootPath)
fileUrhoPath = fileUrhoPath.replace(os.path.sep, '/')
# Return full file path and relative file path
return (fileFullPath, fileUrhoPath)
# Check if 'filepath' is valid
def CheckFilepath(fileFullPaths, fOptions):
fileFullPath = fileFullPaths
if type(fileFullPaths) is tuple:
fileFullPath = fileFullPaths[0]
# Create the full path if missing
fullPath = os.path.dirname(fileFullPath)
if not os.path.isdir(fullPath):
try:
os.makedirs(fullPath)
log.info( "Created path {:s}".format(fullPath) )
except Exception as e:
log.error("Cannot create path {:s} {:s}".format(fullPath, e))
if os.path.exists(fileFullPath) and not fOptions.fileOverwrite:
log.error( "File already exists {:s}".format(fileFullPath) )
return False
return True
#--------------------
# XML formatters
#--------------------
def FloatToString(value):
return "{:g}".format(value)
def Vector3ToString(vector):
return "{:g} {:g} {:g}".format(vector[0], vector[1], vector[2])
def Vector4ToString(vector):
return "{:g} {:g} {:g} {:g}".format(vector[0], vector[1], vector[2], vector[3])
def XmlToPrettyString(elem):
rough = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough)
pretty = reparsed.toprettyxml(indent="\t")
i = pretty.rfind("?>")
if i >= 0:
pretty = pretty[i+2:]
return pretty.strip()
#--------------------
# XML writers
#--------------------
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def WriteStringFile(stringContent, filepath, fOptions):
try:
ensure_dir(filepath)
file = open(filepath, "w")
except Exception as e:
log.error("Cannot open file {:s} {:s}".format(filepath, e))
return
try:
file.write(stringContent)
except Exception as e:
log.error("Cannot write to file {:s} {:s}".format(filepath, e))
file.close()
# Write XML to a text file
def WriteXmlFile(xmlContent, filepath, fOptions):
WriteStringFile(XmlToPrettyString(xmlContent),filepath,fOptions)
#--------------------
# Binary writers
#--------------------
class BinaryFileWriter:
# We try to write the file with a single API call to avoid
# the Editor crashing while reading a not completed file.
# We set the buffer to 1Mb (if unspecified is 64Kb, and it is
# 8Kb with multiple file.write calls)
# Constructor.
def __init__(self):
self.filename = None
self.buffer = None
# Open file stream.
def open(self, filename):
self.filename = filename
self.buffer = array.array('B')
return True
def close(self):
try:
file = open(self.filename, "wb", 1024 * 1024)
except Exception as e:
log.error("Cannot open file {:s} {:s}".format(self.filename, e))
return
try:
self.buffer.tofile(file)
except Exception as e:
log.error("Cannot write to file {:s} {:s}".format(self.filename, e))
file.close()
# Writes an ASCII string without terminator
def writeAsciiStr(self, v):
# Non ASCII to '_'
v = re.sub(r'[^\x00-\x7f]', '_', v)
self.buffer.extend(bytes(v, "ascii", errors="ignore"))
# Writes a 32 bits unsigned int
def writeUInt(self, v):
self.buffer.extend(struct.pack("<I", v))
# Writes a 16 bits unsigned int
def writeUShort(self, v):
self.buffer.extend(struct.pack("<H", v))
# Writes one 8 bits unsigned byte
def writeUByte(self, v):
self.buffer.extend(struct.pack("<B", v))
# Writes four 32 bits floats .w .x .y .z
def writeQuaternion(self, v):
self.buffer.extend(struct.pack("<4f", v.w, v.x, v.y, v.z))
# Writes three 32 bits floats .x .y .z
def writeVector3(self, v):
self.buffer.extend(struct.pack("<3f", v.x, v.y, v.z))
# Writes a 32 bits float
def writeFloat(self, v):
self.buffer.extend(struct.pack("<f", v))
# --------------------------
# Hash - Function (like StringHash in Urho3D)
# --------------------------
def SDBMHash(key):
hash = 0
for i in range(len(key)):
hash = ord(key[i]) + (hash << 6) + (hash << 16) - hash
return (hash & 0xFFFFFFFF)
def CalcNodeHash(id):
return SDBMHash(id) % 10000000
def getLodSetWithID(id,returnIdx=False):
cnt=0
for lodset in bpy.data.worlds[0].lodsets:
if lodset.lodset_id == id: # good that I'm so consistent with my name *#%&
if returnIdx:
return cnt
else:
return lodset
cnt=cnt+1
#print("COULD NOT FIND LODSET WITH ID:%s"%id)
return None
def getObjectWithID(id):
if id==-1:
return None
for obj in bpy.data.objects:
if obj.ID == id:
return obj
return None
# ---------------
# execution queue
# ---------------
class ExecutionQueue:
def __init__(self):
self.queue = Queue()
def queue_action(self,action):
#print("added queue function(THREAD:%s)" % current_thread().getName())
self.queue.put(action)
#print("..done..")
## execute immediately if called from main-thread, otherwise queue it
def execute_or_queue_action(self,action):
if current_thread() is main_thread():
#print("immediate call")
action()
else:
#print("queued:%s"%current_thread().getName())
self.queue_action(action)
def has_actions(self):
return not self.queue.empty
def flush_actions(self):
#print("TRY TO FLUSH EXECUTION ACTIONS: empty?: %s" % self.queue.empty())
while not self.queue.empty():
#print("DO EXECUTION FUNCTION")
# get queued-action...
action = self.queue.get()
# ...and execute it
try:
action()
except ReferenceError:
print("!!Referror!! %s" % str(action));
except Exception:
print("Listener error for ")
print(traceback.format_exc())
execution_queue = ExecutionQueue()
# ----------------
# conversion utils
# ----------------
def vec2dict(vec,convToDeg=False):
result={}
try:
if not convToDeg:
result["x"]=vec.x
result["y"]=vec.y
result["z"]=vec.z
result["w"]=vec.w
else:
result["x"]=degrees(vec.x)
result["y"]=degrees(vec.y)
result["z"]=degrees(vec.z)
result["w"]=degrees(vec.w)
except:
pass
return result
def matrix2dict(matrix,convToDeg=False):
resultmatrix=[]
for vector in matrix:
resultmatrix.append(vec2dict(vector,convToDeg))
return resultmatrix
class PingData:
ping_check_running = False
ping_runtime_timer = 0
ping_runtime_interval = 0.5
ping_count = 0
ping_auto_timer = 0
FOUND_RUNTIME = False
def found_blender_runtime():
global FOUND_RUNTIME
return FOUND_RUNTIME
def set_found_blender_runtime(found=True):
global FOUND_RUNTIME
FOUND_RUNTIME=found
def PingForRuntime():
#print("PPIINNGG for Runtime")
if PingData.ping_check_running:
return
PingData.ping_auto_timer = 10
PingData.ping_check_running = True
#print("Setted:%s" % PingData.ping_check_running)
PingData.ping_runtime_timer = 0
PingData.ping_runtime_interval = 2
PingData.ping_count = 0
set_found_blender_runtime(False)
def copy_file(from_filepath,to_folder,createFolderIfNotPresent=True):
if createFolderIfNotPresent:
from pathlib import Path
Path(to_folder).mkdir(parents=True, exist_ok=True)
shutil.copy(bpy.path.abspath(from_filepath), to_folder)
def PrepareSceneHeaderFile(scene=None):
# store object-data
object_data={}
def get_or_create_objdata(obj):
if obj in object_data:
return object_data[obj]
obj_data={
"name" : obj.name
}
object_data[obj]=obj_data
return obj_data
if not scene:
scene = bpy.context.scene
scene_name = scene.name
all_objects={}
result={}
scenedata=result[scene_name]={}
scene_data = scenedata['scene']={
"name" : scene_name,
"path" : "Scenes/%s.xml" % scene_name
}
objects = scenedata["all_obj"]={}
empties = scenedata["empties"]={}
collections = scenedata["collections"]={}
tags = scenedata["tags"]={}
lights = scenedata["lights"]={}
cameras = scenedata["cameras"]={}
meshobj = scenedata["mesh_objects"]={}
# build data-structure
for obj in scene.objects:
obj_data = get_or_create_objdata(obj)
obj_name = obj.name
obj_name = re.sub('[^\w_.)( -]', '_', obj_name).replace('.','_')
objects[obj_name]=obj_data
all_objects[obj]=obj_data
if obj.type=="MESH":
meshobj[obj_name]=obj_data
elif obj.type=="LIGHT":
lights[obj_name]=obj_data
elif obj.type=="CAMERA":
cameras[obj_name]=obj_data
elif obj.type=="EMPTY":
empties[obj_name]=obj_data
else:
print("obj-type:%s not categorized" % obj.type)
for col in obj.users_collection:
collection_name = col.name
if collection_name not in collections:
collections[collection_name]={}
collections[collection_name][obj_name]=obj_data
for userdata in obj.user_data:
if userdata.key=="tag":
tag = userdata.value
if tag not in tags:
tags[tag]={}
tags[tag][obj_name]=obj_data
return (result,all_objects)
def PrepareGlobalHeader():
result={}
animations = result["animations"]={}
scenes = result["scenes"]={}
objects = result["objects"]={}
sounds = result["sounds"]={}
particles = result["particles"]={}
models = result["models"]={}
textures = result["textures"]={}
materials = result["materials"]={}
fonts = result["fonts"]={}
postprocess = result["postprocess"]={}
textures["all"]={}
def PrepareDefault(globalDataName,bucket):
try:
for elem in JSONNodetree.globalData[globalDataName]:
res_path = elem["name"]
name = bpy.path.basename(res_path)
name_normalized = re.sub('[_.)( -]|@', '_', name)
if name_normalized[0].isdigit():
name_normalized = "_"+name_normalized
data = {
"name" : os.path.splitext(name)[0],
"path" : res_path
}
bucket[name_normalized]=data
except:
print("could not read animations")
try:
for texture in JSONNodetree.globalData["textures"]:
tex_res_path = texture["name"]
tex_name = bpy.path.basename(tex_res_path)
tex_name_normalized = re.sub('[_.)( -]|@', '_', tex_name)
folder = os.path.dirname(tex_res_path)
data = {
#"name" : os.path.splitext(tex_name)[0],
"path" : tex_res_path
}
textures["all"][tex_name_normalized]=data
current_dict=textures
skip=True # skip first
for f in folder.split('/'):
if skip:
skip=False
continue
if f not in current_dict:
current_dict[f]={}
current_dict = current_dict[f]
if current_dict!=textures:
current_dict[tex_name_normalized]=data
except:
print("could not read textures")
PrepareDefault("animations",animations)
PrepareDefault("scenes",scenes)
PrepareDefault("objects",objects)
PrepareDefault("particles",particles)
PrepareDefault("sounds",sounds)
PrepareDefault("models",models)
PrepareDefault("materials",materials)
PrepareDefault("postprocess",postprocess)
PrepareDefault("fonts",fonts)
return result
def WriteSceneHeaderFile(topic,input,output_path):
def _WriteSceneHeader(input):
current_text=""
for key in input:
value=input[key]
if isinstance(value,dict):
namespace_name = re.sub('[_.\.)( -]', '_', key)
current_text+="namespace %s {\n%s\n}\n" % (namespace_name,_WriteSceneHeader(value))
elif isinstance(value,int):
current_text+="int %s=%s;\n" % (key,value)
elif isinstance(value,float):
current_text+="float %s=%sf;\n" % (key,value)
elif isinstance(value,str):
current_text+='const char* %s="%s";\n' % (key,value)
else:
print("unsupported type for %s[%s]:%s" % (key,value,type(value)))
return current_text
text="""
#pragma once
namespace res {
namespace %s {
""" % topic
text += _WriteSceneHeader(input)
text+="}}"
print(text)
WriteStringFile(text,output_path,None)
def WriteSceneHeaderFileDotNet(topic,input,output_path):
def _WriteSceneHeader(input):
current_text=""
for key in input:
value=input[key]
if isinstance(value,dict):
namespace_name = re.sub('[_.\.)( -]', '_', key)
if namespace_name[0].isdigit():
namespace_name="_"+namespace_name
current_text+="public partial class %s {\n%s\n}\n" % (namespace_name,_WriteSceneHeader(value))
elif isinstance(value,int):
current_text+="public const int %s=%s;\n" % (key,value)
elif isinstance(value,float):
current_text+="public const float %s=%sf;\n" % (key,value)
elif isinstance(value,str):
current_text+='public const string %s="%s";\n' % (key,value)
else:
print("unsupported type for %s[%s]:%s" % (key,value,type(value)))
return current_text
text="""
namespace Assets {
namespace %s {
""" % topic
text += _WriteSceneHeader(input)
text+="}}"
print(text)
WriteStringFile(text,output_path,None)
|
from django.test import TestCase
from safedelete.utils import get_deleted_or_not_deleted_filters_dictionary
from safedelete import utils
try:
from unittest.mock import patch
except ImportError:
from mock import patch # for python 2 supporting
class TestFiltersDictionary(TestCase):
@patch.object(utils, 'USE_BOOLEAN_FIELD', False)
@patch.object(utils, 'FIELD_NAME', 'deleted')
def test_get_deleted_with_datetime_field(self):
filters = get_deleted_or_not_deleted_filters_dictionary(get_deleted=True)
self.assertEqual(filters, {'deleted__isnull': False})
@patch.object(utils, 'USE_BOOLEAN_FIELD', False)
@patch.object(utils, 'FIELD_NAME', 'deleted')
def test_get_not_deleted_with_datetime_field(self):
filters = get_deleted_or_not_deleted_filters_dictionary(get_deleted=False)
self.assertEqual(filters, {'deleted__isnull': True})
@patch.object(utils, 'USE_BOOLEAN_FIELD', True)
@patch.object(utils, 'BOOLEAN_FIELD_NAME', 'is_deleted')
def test_get_deleted_with_boolean_field(self):
filters = get_deleted_or_not_deleted_filters_dictionary(get_deleted=True)
self.assertEqual(filters, {'is_deleted': True})
@patch.object(utils, 'USE_BOOLEAN_FIELD', True)
@patch.object(utils, 'BOOLEAN_FIELD_NAME', 'is_deleted')
def test_get_not_deleted_with_boolean_field(self):
filters = get_deleted_or_not_deleted_filters_dictionary(get_deleted=False)
self.assertEqual(filters, {'is_deleted': False})
|
import asyncio
from nats import NATS, Msg
async def main():
nc = NATS()
# It is very likely that the demo server will see traffic from clients other than yours.
# To avoid this, start your own locally and modify the example to use it.
# await nc.connect("nats://127.0.0.1:4222")
await nc.connect("nats://demo.nats.io:4222")
async def message_handler(msg: Msg) -> None:
subject = msg.subject
reply = msg.reply
data = msg.data.decode()
print(
"Received a message on '{subject} {reply}': {data}".format(
subject=subject, reply=reply, data=data
)
)
# "*" matches any token, at any level of the subject.
await nc.subscribe("foo.*.baz", cb=message_handler)
await nc.subscribe("foo.bar.*", cb=message_handler)
# ">" matches any length of the tail of a subject, and can only be the last token
# E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22'
await nc.subscribe("foo.>", cb=message_handler)
# Matches all of the above.
await nc.publish("foo.bar.baz", b'Hello World')
# Gracefully close the connection.
await nc.drain()
if __name__ == '__main__':
asyncio.run(main())
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
redshift_conn_id="",
table="",
sql="",
update_strategy="", # append, overwrite
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.table = table
self.sql = sql
def execute(self, context):
redshift = PostgresHook(self.redshift_conn_id)
if update_strategy == "overwrite":
# Update with truncate first strategy (only for dimension tables)
query = 'TRUNCATE {}; INSERT INTO {} ({})'.format(self.table, self.table, self.sql)
else if update_strategy == "append":
query = 'INSERT INTO {} ({})'.format(self.table, self.sql)
redshift.run(query)
self.log.info('Success')
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def countWeight(X, y):
w = np.sum(((X - X.mean()) * (y - y.mean()))) / np.sum((X - X.mean())**2)
return w
def polynomialUni(X, w, degrees):
y = 0
for degree in range(degrees+1):
y = w * (X ** degree)
return y
# Importing the dataset
df = pd.read_csv("trainset1-15.csv", header=None)
X = df.iloc[:, 0:-1].values
y = df.iloc[:, -1].values
# Find w and a predicted value
w = countWeight(X, y)
# Predict the value
y_predict = polynomialUni(X, w, 3)
# Visualizing
plt.scatter(X, y, color="yellowgreen", label="Data")
plt.plot(X, y_predict, label="Model")
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.show()
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y, y_predict))
|
import sys
import github.overfl0.stack_overflow_import.stackoverflow as stackoverflow
# Workaround: if the module name starts with 'github.[...]' the github import
# hook will fire for each subpackage
# To prevent that, we rename the module to look like a top level module
stackoverflow.__name__ = 'stackoverflow' # Was: 'github. [...] .stackoverflow'
sys.modules['stackoverflow'] = stackoverflow
from stackoverflow import quicksort
args = [1, 3, 2, 5, 4]
print('Sorting array:', args)
print('Result:', quicksort.quick_sort(None, args))
|
# MIT License
#
# Copyright (c) 2020 Christopher Friedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''The HanoiState Class
The HanoiState Class encapsulates information for 1 instance of the classic
Towers of Hanoi puzzle.
https://en.wikipedia.org/wiki/Tower_of_Hanoi
The optimal solution to the Towers of Hanoi requires 2^N - 1 moves. Thus, the
time complexity is O(2^N). The memory requirements are O(N), where N is the
number of discs.
Currently a maximum of 64 discs are 'supported'. I say that loosely because
it could take an extremely long amount of time for a single computer to solve
the problem when N is 64.
The way that this object keeps track of the state of the 3 towers is by encoding
each disc as a bit in an unsigned integer. Specifically, disc N is represented by
bit N - 1 of any given tower.
Towers are numbered 0 to 2.
This class itself does not have methods, so it's more like a data aggregate.
To operate on a HanoiState instance, use the Hanoi Class.
'''
from threading import Lock
counterLock = Lock()
counter = 0
class HanoiState(object):
def __init__(self, numberOfDiscs=4, source=0, target=2):
'''Initialize a HanoiState object
Upon initialization, numberOfDiscs discs are placed on
source, other towers are empty, and the number of
moves is set to 0.
:param numberOfDiscs: the number of discs in the game
:param source: the tower from which discs should be moved
:param target: the tower to which discs should be moved
'''
global counter
# check arguments
if numberOfDiscs <= 0 or numberOfDiscs > 64:
raise ValueError(
'numberOfDiscs {} is invalid'.format(numberOfDiscs))
if source < 0 or source > 2:
raise ValueError('source {} is invalid'.format(source))
if target < 0 or target > 2:
raise ValueError('target {} is invalid'.format(target))
if source == target:
raise ValueError('source may not equal target')
counterLock.acquire()
self.id = counter
counter += 1
counterLock.release()
self.numberOfDiscs = numberOfDiscs
self.tower = [0, 0, 0]
self.tower[source] = (1 << numberOfDiscs) - 1
self.source = source
self.target = target
self.numberOfMoves = 0
def to_json(self):
s = ''
s += '{'
s += '"sessionId": {}'.format(self.id) + ', '
s += '"numberOfDiscs": {}'.format(self.numberOfDiscs) + ', '
s += '"fromTower": {}'.format(self.source) + ', '
s += '"toTower": {}'.format(self.target) + ', '
s += '"numberOfMoves": {}'.format(self.numberOfMoves) + ', '
s += '"towers": {}'.format(self.tower)
s += '}'
return s
|
from flask import Flask, jsonify, request
from flask_cors import CORS
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
app.config['JSON_SORT_KEYS'] = False #Used to not sort json objects by keys (jsonify) -
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
from route import *
if __name__ == '__main__':
app.run()
|
def julday(month, day, year, hour=12, minute=0, second=0):
'''
NAME:
julday
PURPOSE:
Calculate the Julian Date Number for a given month, day, and year.
Can also take in hours, minutes, and seconds.
INPUTS:
month: Number of the month of the year (1 = jan, ..., 12 = dec)
day: Number of the day of the month.
year: Number of the desired year. Year parameters must be valid
values fron the civil calendar. Years B.C.E. are represented
as negative integers. Years in the common era are represented
as positive integers. In particular, note that there is no year
0 in the civil calendar. 1 B.C.E. (-1) is followed by 1 C.E. (1).
hour: Number of the hour of the day.
minute: Number of the minute of the hour.
second: Number of the second of the minute.
round: Optional; if true, round to one decimal place
OUTPUTS:
Julian Day Number (which begins at noon) of the specified calendar date is
returned in double precision format.
SIDE EFFECTS:
None.
NOTES:
If a time is not given, the routine defaults to noon.
Adopted from julday.pro; JULDAY is a standard routine in IDL.
This is roughly equivalent to the IDL routine, with slightly
more precision in results.
MODIFICATION HISTORY:
2009-01-06 0.1 Christopher Campo, UCF Initial version
ccampo@gmail.com
'''
import numpy as np
# catches a wrong date input
if month > 12 or month < 1 or day > 31 or day < 1:
raise ValueError('Error: Date does not exist. Check the input...')
# Gregorian to Julian conversion formulae; wikipedia
a = np.floor((14-month)/12.)
y = year + 4800 - a
m = month + (12*a) - 3
jdn = day + np.floor(((153*m) + 2)/5.) + 365*y + np.floor(y/4.)\
- np.floor(y/100.) + np.floor(y/400.) - 32045
jd = jdn + ((hour-12)/24.) + (minute/1440.) + (second/86400.)
return jd
|
## DranoTheCat's Pink and Blue fireworks
##
## Tilt badge left to make lights move to the left
## Tilt badge right to make lights move to the right
## Boop to change speed -- meh, this effect sucked so disabled
##
from random import randrange
import math
import dcfurs
import badge
import utime
### TESTS FOR PC
#from os import system
# Each rainbowLight object keeps track of its own settings
class rainbowLight:
def __init__(self, x, y, r, l):
self.x = x
self.y = y
self.r = r
self.l = l
if randrange(0, 2) >= 1:
self.cr = randrange(200, 255)
self.cg = randrange(0, 64)
self.cb = randrange(200, 255)
else:
self.cr = randrange(0, 64)
self.cg = randrange(200, 255)
self.cb = randrange(200, 255)
self.state = 'appear'
class fireworks:
maxRadius = 2 # Maximum radius for lights
maxCycles = 12 # Maximum cycles lights will stay alive
newDropPct = 90 # Percent chance each cycle for a new droplet to be created
lights = []
def __init__(self):
self.interval=75
self.ivals = [75, 150, 250, 500, 50]
self.cval = 0
self.rows=18
self.columns=7
self.initGrid()
self.createLight()
# def boop(self):
# self.cval += 1
# if self.cval >= len(self.ivals):
# self.cval = 0
# self.interval = self.ivals[self.cval]
def checkButtons(self):
(tx, ty, tz) = badge.imu.filtered_xyz()
if ty < -40: # Tilt Right
for light in self.lights:
if light.x < self.rows:
light.x += 1
else:
light.x = self.rows - 1
elif ty > 40: # Tilt Left
for light in self.lights:
if light.x > 1:
light.x -= 1
else:
light.x = 0
# This method displays a quasi-grid for testing on a CLI
def bogusDisplay(self):
print("------------------------------------------------------------")
for i in range(self.rows):
for j in range(self.columns):
a = hex(self.getGrid(i, j))
print(a, " ", end="")
print("")
print("============================================================")
def createLight(self):
x = randrange(0, self.rows)
y = randrange(0, self.columns)
r = randrange(1, self.maxRadius)
l = randrange(3, self.maxCycles)
self.lights.append(rainbowLight(x, y, r, l))
def render(self, light):
# Appearance effect:
# Draw circle at radius at min brightness
# Then decrement radius, increase brightness, and draw the new circle, until done
# ThrobA effect:
# Draw final circle at r+1 at min brightness
# Find remainder brightness steps for rest of radius; ignore bottom brightness
# Draw circle at radius r/3 + 1 at next brightness level, then r/3 + n until min brightness
# Draw circle at radius r/3 at max brightness
# throbB effect: same as appearance
# Vanish effect:
# 1 - apperance
# 2 - apperance but r -= 1
# 3 - apperance but r -= 2
# ...
#print("ID {} | State: {}".format(light.id, light.state))
if light.state == 'appear':
radius = light.r
while (radius > 0):
self.drawLight(light, radius, (light.r - radius + 1) / light.r)
radius -= 3
light.state = 'throbA'
elif light.state == 'throbA':
radius = light.r
minBright = 1 / light.r
self.drawLight(light, radius, minBright)
radius -= 3
while (radius > 0):
bright = (light.r - radius + 1) / light.r
if bright > 1:
bright = 1
if radius < light.r / 4:
bright = 1
self.drawLight(light, radius, bright)
radius -= 3
light.state = 'throbB'
elif light.state == 'throbB':
radius = light.r + 1
while (radius > 0):
self.drawLight(light, radius, (light.r - radius + 2) / light.r)
radius -= 3
c = self.mkColor(255, 255, 255)
self.setGrid(light.x, light.y, c)
light.state = 'throbA'
elif light.state == 'vanish':
#print("ID: {} | vanish! {}".format(light.id, light.r))
radius = light.r
while (radius > 0):
self.drawLight(light, radius, (light.r - radius + 1) / (light.r + 0 - light.l))
radius -= 3
light.r -= 1
light.l = 1
light.state = 'throbB'
def zfill(self, s, width):
if len(s) < width:
return ("0" * (width - len(s))) + s
else:
return s
def mkColor(self, r, g, b):
br = self.zfill(bin(r)[2:], 8)
bg = self.zfill(bin(g)[2:], 8)
bb = self.zfill(bin(b)[2:], 8)
# print("Color Code: {}, {}, {}".format(br, bg, bb))
return int(br + bg + bb, 2)
def drawLight(self, light, radius, brightness):
#print("drawLight brightness: {}".format(brightness))
# start = utime.ticks_ms()
if brightness < 0.75 and brightness >= 0.35:
brightness = 0.35
elif brightness < 0.25 and brightness >= 0.1:
brightness = 0.1
cr = int(light.cr * brightness + 0.5)
cg = int(light.cg * brightness + 0.5)
cb = int(light.cb * brightness + 0.5)
rr = radius * radius
for x in range(light.x - radius, light.x + 1):
ax = light.x - x
axax = ax * ax
for y in range(light.y - radius, light.y + 1):
# Are we within the radius?
ay = light.y - y
ayay = ay * ay
extra = 0
if radius > 1:
extra = 1
if axax + ayay <= rr + extra:
c = self.mkColor(cr, cg, cb)
self.setGrid(x, y, c)
self.setGrid(light.x + ax, y, c)
self.setGrid(light.x + ax, light.y + ay, c)
self.setGrid(x, light.y + ay, c)
# end = utime.ticks_ms()
# t = end - start
# print("drawLight of radius {} Done in {}".format(radius, t))
def draw(self):
# start = utime.ticks_ms()
# print("start")
self.checkButtons()
self.update()
# dcfurs.clear()
for i in range(self.rows):
for j in range(self.columns):
dcfurs.set_pix_rgb(i, j, self.getGrid(i, j))
# self.bogusDisplay()
if randrange(0, 100) < self.newDropPct:
self.createLight()
# end = utime.ticks_ms()
# t = end - start
# print("done in {}".format(t))
def initGrid(self):
self.grid = [[0 for col in range(self.columns)] for row in range(self.rows)]
return self.grid
def fixColumns(self, j):
if j < 0:
j = 0
elif j >= self.columns:
j = self.columns - 1
return j
def fixRows(self, i):
if i < 0:
i = self.rows + i
elif i >= self.rows:
i = 0 + i % self.rows
return i
def setGrid(self, i, j, value):
i = self.fixRows(i)
j = self.fixColumns(j)
# If we already have a value, let's try to mix the colors...
# if self.getGrid(i, j) > 0:
# ec = self.getGrid(i, j)
# cr = format(ec, '08b')[0:3]
# cg = format(ec, '08b')[3:6]
# cb = format(ec, '08b')[6:8]
# dr = int(cr, 2)
# dg = int(cg, 2)
# db = int(cb, 2)
# value = value
# xr = format(value, '08b')[0:3]
# xg = format(value, '08b')[3:6]
# xb = format(value, '08b')[6:8]
# yr = int(xr, 2)
# yg = int(xg, 2)
# yb = int(xb, 2)
# nr = math.ceil((dr + yr) / 2)
# ng = math.ceil((dg + yg) / 2)
# nb = math.ceil((db + yb) / 2)
# nv = self.mkColor(nr, ng, nb)
# self.grid[i][j] = nv
# else:
self.grid[i][j] = value
def getGrid(self, i, j):
i = self.fixRows(i)
j = self.fixColumns(j)
return self.grid[i][j]
def update(self):
self.initGrid()
if len(self.lights) < 1:
self.createLight()
for light in self.lights:
light.l -= 1
if light.l < 1:
light.state = 'vanish'
if light.r < 1:
self.lights.remove(light)
else:
self.render(light)
|
"""
stoclust.visualization
Contains functions for visualizing data and clusters.
Functions
---------
heatmap(mat,show_x=None,show_y=None,xlabels=None,ylabels=None,layout=None,**kwargs):
Generates a heatmap of a given matrix:
that is, displays the matrix as a table of colored blocks
such that the colors correspond to matrix values.
scatter3D(x,y,z,agg=None,layout=None,show_items=None,**kwargs):
Generates a 3-dimensional scatter plot
of given coordinate vectors; optionally plots
them on separate traces based on an aggregation.
scatter2D(x,y,agg=None,layout=None,show_items=None,**kwargs):
Generates a 2-dimensional scatter plot
of given coordinate vectors; optionally plots
them on separate traces based on an aggregation.
bars(mat,show_x=None,show_y=None,xlabels=None,ylabels=None,layout=None,**kwargs):
Generates a stacked bar plot of a given array of vectors;
the rows index the horizontally separate bars
and the columns index the stack heights.
dendrogram(hier,line=None,layout=None,show_progress=False,**kwargs):
Generates a dendrogram of a hierarchical clustering scheme
in a Plotly Figure. Uses Plotly Shapes to draw
the dendrogram and a scatter plot to
highlight clusters at their branching points.
"""
import numpy as _np
import pandas as _pd
import plotly.graph_objects as _go
from stoclust.Aggregation import Aggregation as _Aggregation
from tqdm import tqdm as _tqdm
def heatmap(mat,show_x=None,show_y=None,xlabels=None,ylabels=None,layout=None,**kwargs):
"""
Generates a heatmap of a given matrix: that is, displays the matrix as a table of colored blocks such that the colors correspond to matrix values.
Arguments
---------
mat : The matrix whose values are being visualized in a heatmap.
Keyword Arguments
-----------------
show_x : An array of the column indices which are to be shown, in the order they should be shown.
show_y : An array of the row indices which are to be shown, in the order they should be shown.
xlabels : An array or group of how the columns should be labeled on the plot.
ylabels : An array or group of how the rows should be labeled on the plot.
layout : A dictionary for updating values for the Plotly Figure layout.
**kwargs : Keyword arguments for the Plotly Heatmap trace.
Output
------
fig : A Plotly Figure containing the heatmap.
"""
if show_x is None:
show_x = _np.arange(mat.shape[1])
if show_y is None:
show_y = _np.arange(mat.shape[0])
if xlabels is None:
xlabels = _np.arange(mat.shape[1])
if ylabels is None:
ylabels = _np.arange(mat.shape[0])
fig = _go.Figure(data=_go.Heatmap(
z=mat[show_y][:,show_x],**kwargs))
fig.update_layout(
xaxis = dict(
tickmode = 'array',
tickvals = _np.arange(len(show_x)),
ticktext = xlabels[show_x]
),
yaxis = dict(
tickmode = 'array',
tickvals = _np.arange(len(show_y)),
ticktext = ylabels[show_y]
),
margin=dict(l=100, r=100, t=20, b=20),
)
if layout is not None:
fig.update_layout(**layout)
return fig
def scatter3D(x,y,z,agg=None,layout=None,show_items=None,**kwargs):
"""
Generates a 3-dimensional scatter plot of given coordinate vectors; optionally plots them on separate traces based on an aggregation.
Arguments
---------
x : The x-coordinates of the data points.
y : The y-coordinates of the data points.
z : The z-coordinates of the data points.
Keyword Arguments
-----------------
agg : An Aggregation of the indices of x, y and z.
show_items : A one-dimensional array of which indices of x, y and z are to be shown.
layout : A dictionary for updating values for the Plotly Figure layout.
**kwargs : Keyword arguments for the Plotly Scatter3d trace.
If an attribute is given as a single string or float, will be applied to all data points.
If as an array of length x.shape[0], will be applied separately to each data point.
If an an array of length agg.clusters.__len__(), will be applied separately to each cluster.
Output
------
fig : A Plotly Figure containing the scatter plot.
"""
if agg is None:
agg = _Aggregation(_pd.Index(_np.arange(x.shape[0])),
_pd.Index(_np.array([0])),
{0:_np.arange(x.shape[0])})
specific_keywords = [{} for i in range(agg.clusters.__len__())]
for k,v in kwargs.items():
if hasattr(v, '__len__') and not(isinstance(v,str)):
if len(v)==len(agg.clusters):
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v[i]
elif len(v)==len(agg.items):
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v[agg._aggregations[i]]
else:
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v
if kwargs.get('name',None) is None:
for i in range(agg.clusters.__len__()):
specific_keywords[i]['name'] = str(agg.clusters[i])
fig = _go.Figure(data=[_go.Scatter3d(x=x[agg._aggregations[i]],
y=y[agg._aggregations[i]],
z=z[agg._aggregations[i]],
**(specific_keywords[i]))
for i in range(agg.clusters.__len__())])
if layout is not None:
fig.update_layout(**layout)
return fig
def scatter2D(x,y,agg=None,layout=None,show_items=None,**kwargs):
"""
Generates a 2-dimensional scatter plot of given coordinate vectors; optionally plots them on separate traces based on an aggregation.
Arguments
---------
x : The x-coordinates of the data points.
y : The y-coordinates of the data points.
Keyword Arguments
-----------------
agg : An Aggregation of the indices of x and y.
show_items : A one-dimensional array of which indices of x and y are to be shown.
layout : A dictionary for updating values for the Plotly Figure layout.
**kwargs : Keyword arguments for the Plotly Scatter trace.
If an attribute is given as a single string or float, will be applied to all data points.
If as an array of length x.shape[0], will be applied separately to each data point.
If an an array of length agg.clusters.__len__(), will be applied separately to each cluster.
Output
------
fig : A Plotly Figure containing the scatter plot.
"""
if agg is None:
agg = _Aggregation(_pd.Index(_np.arange(x.shape[0])),
_pd.Index(_np.array([0])),
{0:_np.arange(x.shape[0])})
specific_keywords = [{} for i in range(agg.clusters.__len__())]
for k,v in kwargs.items():
if hasattr(v, '__len__') and not(isinstance(v,str)):
if len(v)==len(agg.clusters):
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v[i]
elif len(v)==len(agg.items):
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v[agg._aggregations[i]]
else:
for i in range(agg.clusters.__len__()):
specific_keywords[i][k] = v
if kwargs.get('name',None) is None:
for i in range(agg.clusters.__len__()):
specific_keywords[i]['name'] = str(agg.clusters[i])
fig = _go.Figure(data=[_go.Scatter(x=x[agg._aggregations[i]],
y=y[agg._aggregations[i]],
**(specific_keywords[i]))
for i in range(agg.clusters.__len__())])
if layout is not None:
fig.update_layout(**layout)
return fig
def bars(mat,show_x=None,show_y=None,xlabels=None,ylabels=None,layout=None,**kwargs):
"""
Generates a stacked bar plot of a given array of vectors; the rows index the horizontally separate bars and the columns index the stack heights.
Arguments
---------
mat : The matrix whose values are being visualized in a stacked bar plot.
Keyword Arguments
-----------------
show_x : An array of the row indices (horizontally separate bars) which are to be shown, in the order they should be shown.
show_y : An array of the column indices (stacked bars) which are to be shown, in the order they should be shown.
xlabels : An array or group of how the rows should be labeled on the plot.
ylabels : An array or group of how the columns should be labeled on the plot.
layout : A dictionary for updating values for the Plotly Figure layout.
**kwargs : Keyword arguments for the Plotly Bar trace.
If an attribute is given as a single string or float, will be applied to all bars.
If as an array of length mat.shape[1], will be applied separately to each layer of the stack.
Output
------
fig : A Plotly Figure containing the stacked bars.
"""
if show_x is None:
show_x = _np.arange(mat.shape[0])
if show_y is None:
show_y = _np.arange(mat.shape[1])
if xlabels is None:
xlabels = _np.arange(mat.shape[0]).astype(str)
if ylabels is None:
ylabels = _np.arange(mat.shape[1]).astype(str)
specific_keywords = [{} for i in range(mat.shape[1])]
for k,v in kwargs.items():
if hasattr(v, '__len__') and not(isinstance(v,str)):
if isinstance(v,_np.ndarray):
if len(v.shape)==2:
for i in range(mat.shape[1]):
specific_keywords[i][k] = v[k,i]
else:
for i in range(mat.shape[1]):
specific_keywords[i][k] = v[i]
else:
for i in range(mat.shape[1]):
specific_keywords[i][k] = v[i]
else:
for i in range(mat.shape[1]):
specific_keywords[i][k] = v
if kwargs.get('width',None) is None:
for i in range(mat.shape[1]):
specific_keywords[i]['width'] = 1
fig = _go.Figure(data=[
_go.Bar(name=ylabels[o], x=xlabels, y=mat[show_x,o], **specific_keywords[o]) for o in show_y
])
fig.update_layout(barmode='stack',
xaxis = dict(
tickmode = 'array',
tickvals = _np.arange(len(show_x)),
ticktext = (xlabels)[show_x]
),)
if layout is not None:
fig.update_layout(**layout)
return fig
def dendrogram(hier,line=None,layout=None,show_progress=False,**kwargs):
"""
Generates a dendrogram of a hierarchical clustering scheme in a Plotly Figure. Uses Plotly Shapes to draw the dendrogram and a scatter plot to highlight clusters at their branching points.
Arguments
---------
hier : A Hierarchy which is to be plotted as a Dendrogram.
Keyword Arguments
-----------------
line : A dict for formatting Plotly shape lines.
If an attribute is given as a single string or float, will be applied to all lines.
If as an array of length hier.clusters.__len__(), will be applied separately to the lines immediately beneath each cluster.
layout : A dictionary for updating values for the Plotly Figure layout.
show_progress : Boolean; whether to show a tqdm progress bar as the dendrogram is generated.
**kwargs : Keyword arguments for the Plotly Scatter trace.
If an attribute is given as a single string or float, will be applied to all branch points.
If as an array of length hier.clusters.__len__(), will be applied separately to each cluster's branch point.
Output
------
fig : A Plotly Figure containing the dendrogram.
"""
groups = hier.cluster_groups()
x_items = _np.zeros([hier.items.__len__()])
s_max = _np.max(hier._scales)
top_agg = hier.at_scale(s_max)
x_base = 0
x_in_superset = []
for c in range(top_agg.clusters.__len__()):
grp = top_agg._aggregations[c]
n = len(grp)
x_items[grp] = _np.arange(n)+x_base
x_base += n
x_in_superset = x_in_superset + list(top_agg._aggregations[c])
x_in_superset = _np.array(x_in_superset)
x_clusters = _np.zeros([hier.clusters.__len__()])
y_clusters = _np.zeros([hier.clusters.__len__()])
fig = _go.Figure()
lineinfo = [{} for c in range(hier.clusters.__len__())]
if line is None:
for c in range(hier.clusters.__len__()):
lineinfo[c]=dict(
color="RoyalBlue",
width=3)
else:
for k,v in line.items():
if hasattr(v, '__len__') and not(isinstance(v,str)):
for c in range(hier.clusters.__len__()):
lineinfo[c][k] = v[c]
else:
for c in range(hier.clusters.__len__()):
lineinfo[c][k] = v
if show_progress:
clust_iter = _tqdm(range(hier.clusters.__len__()))
else:
clust_iter = range(hier.clusters.__len__())
for c in clust_iter:
x_clusters[c] = _np.average(x_items[hier.items.get_indexer(groups[hier.clusters[c]])])
y_clusters[c] = hier._scales[c]
if len(hier._children[c])>0:
xmin = _np.min(x_clusters[hier._children[c]])
xmax = _np.max(x_clusters[hier._children[c]])
fig.add_shape(
# Line Horizontal
dict(
type="line",
x0=xmin,
y0=y_clusters[c],
x1=xmax,
y1=y_clusters[c],
line=lineinfo[c]
))
for k in hier._children[c]:
fig.add_shape(
# Line Vertical
dict(
type="line",
x0=x_clusters[k],
y0=y_clusters[k],
x1=x_clusters[k],
y1=y_clusters[c],
line=lineinfo[c]
))
if kwargs.get('customdata',None) is None:
customdata=hier.clusters.to_numpy()
if kwargs.get('hovertemplate',None) is None:
hovertemplate = '<b>ID</b>: %{customdata} <br><b>Scale</b>: %{y} '
fig.add_trace(_go.Scatter(x=x_clusters,y=y_clusters,
mode='markers',
customdata=customdata,
hovertemplate = hovertemplate,**kwargs))
fig.update_layout(
title = kwargs.get('title','Dendrogram'),
margin=dict(l=20, r=20, t=30, b=10),
xaxis_title=kwargs.get('x_axis_label','Items'),
yaxis_title=kwargs.get('y_axis_label','Scale'),
xaxis = dict(
tickmode = 'array',
tickvals = _np.arange(hier.items.__len__()),
ticktext = hier.items[x_in_superset]
))
fig.update_shapes(layer='below')
fig.update_xaxes(showgrid=False,zeroline=False)
fig.update_yaxes(showgrid=False,zeroline=False)
if layout is not None:
fig.update_layout(layout)
return fig
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from socket import AF_INET, AF_INET6, SOCK_STREAM
from pytest import fixture, raises
from py2neo.wiring import Wire, Address, WireError
class FakeSocket(object):
def __init__(self, in_packets=(), out_packets=()):
self._in_packets = deque(in_packets)
self._in_buffer = bytearray()
self._out_packets = out_packets
self._closed = False
def settimeout(self, value):
pass
def recv(self, n_bytes, flags=None):
while not self._in_buffer:
try:
data = self._in_packets.popleft()
except IndexError:
return b""
else:
self._in_buffer.extend(data)
value, self._in_buffer = self._in_buffer[:n_bytes], self._in_buffer[n_bytes:]
return value
def send(self, b, flags=None):
if self._closed:
raise OSError("Socket closed")
self._out_packets.append(bytes(b))
return len(b)
def sendall(self, b, flags=None):
if self._closed:
raise OSError("Socket closed")
self._out_packets.append(bytes(b))
def close(self):
self._closed = True
@fixture
def fake_reader():
def reader(packets):
s = FakeSocket(packets)
return Wire(s)
return reader
@fixture
def fake_writer():
def writer(into):
s = FakeSocket(out_packets=into)
return Wire(s)
return writer
class MockSocket(object):
fail_on_connect = False
fail_on_recv = False
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
self.__family = family
self.__type = type
self.__proto = proto
self.__fileno = fileno
self.__timeout = None
self.__peer = None
self.on_connect = None
def settimeout(self, value):
self.__timeout = value
def setsockopt(self, level, optname, value, optlen=None):
pass
def connect(self, address):
if self.fail_on_connect:
raise OSError("Connection refused to %r" % (address,))
else:
self.__peer = address
def getpeername(self):
return self.__peer
def recv(self, bufsize, flags=None):
if self.fail_on_recv:
raise OSError("Connection broken")
else:
raise NotImplementedError
@fixture
def mock_socket(monkeypatch):
monkeypatch.setattr("socket.socket", MockSocket)
return MockSocket
def test_wire_open_simple(mock_socket):
wire = Wire.open(("localhost", 7687))
assert wire.remote_address == ("localhost", 7687)
def test_wire_open_with_keep_alive(mock_socket):
wire = Wire.open(("localhost", 7687), keep_alive=True)
assert wire.remote_address == ("localhost", 7687)
def test_wire_open_with_connect_error(mock_socket):
mock_socket.fail_on_connect = True
try:
with raises(WireError):
_ = Wire.open(("localhost", 7687))
finally:
mock_socket.fail_on_connect = False
def test_wire_read_with_recv_error(mock_socket):
mock_socket.fail_on_recv = True
try:
wire = Wire.open(("localhost", 7687))
with raises(WireError):
_ = wire.read(1)
finally:
mock_socket.fail_on_recv = False
def test_byte_reader_read_when_enough_available(fake_reader):
reader = fake_reader([b"hello, world"])
data = reader.read(12)
assert data == b"hello, world"
def test_byte_reader_read_when_extra_available(fake_reader):
reader = fake_reader([b"hello, world"])
data = reader.read(5)
assert data == b"hello"
def test_byte_reader_read_when_multiple_packets_available(fake_reader):
reader = fake_reader([b"hello, world"])
data = reader.read(12)
assert data == b"hello, world"
def test_byte_reader_read_when_not_enough_available(fake_reader):
reader = fake_reader([b"hello"])
with raises(OSError):
_ = reader.read(12)
def test_byte_writer_write_once(fake_writer):
into = []
writer = fake_writer(into)
writer.write(b"hello, world")
writer.send()
assert into == [b"hello, world"]
def test_byte_writer_write_twice(fake_writer):
into = []
writer = fake_writer(into)
writer.write(b"hello,")
writer.write(b" world")
writer.send()
assert into == [b"hello, world"]
def test_byte_writer_close(fake_writer):
into = []
writer = fake_writer(into)
writer.close()
with raises(OSError):
assert writer.send()
def test_address_parse_ipv4():
parsed = Address.parse("127.0.0.1:7687")
assert parsed.family == AF_INET
assert parsed.host == "127.0.0.1"
assert parsed.port_number == 7687
def test_address_parse_ipv6():
parsed = Address.parse("[::1]:7687")
assert parsed.family == AF_INET6
assert parsed.host == "::1"
assert parsed.port_number == 7687
|
from . import agent_collection
from . import entity
from . import identity
from . import model
from . import time
from . import world
|
"""
Import as:
import helpers.hparquet as hparque
"""
import logging
import os
from typing import Any, List, Optional
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import helpers.hdbg as hdbg
import helpers.hintrospection as hintros
import helpers.hio as hio
import helpers.htimer as htimer
_LOG = logging.getLogger(__name__)
def to_parquet(
df: pd.DataFrame,
file_name: str,
*,
log_level: int = logging.DEBUG,
) -> None:
"""
Save a dataframe as Parquet.
"""
hdbg.dassert_isinstance(df, pd.DataFrame)
hdbg.dassert_isinstance(file_name, str)
hdbg.dassert_file_extension(file_name, ["pq", "parquet"])
#
hio.create_enclosing_dir(file_name, incremental=True)
_LOG.debug("df.shape=%s", str(df.shape))
mem = df.memory_usage().sum()
_LOG.debug("df.memory_usage=%s", hintros.format_size(mem))
# Save data.
with htimer.TimedScope(logging.DEBUG, "To parquet '%s'" % file_name) as ts:
table = pa.Table.from_pandas(df)
pq.write_table(table, file_name)
# Report stats.
file_size = hintros.format_size(os.path.getsize(file_name))
_LOG.log(
log_level,
"Saved '%s' (size=%s, time=%.1fs)",
file_name,
file_size,
ts.elapsed_time,
)
# TODO(gp): What's the difference with read_pq? Maybe we use pandas there,
# while here we use PQ directly with Dataset.
def from_parquet(
file_name: str,
columns: Optional[List[str]] = None,
filters: Optional[List[Any]] = None,
*,
log_level: int = logging.DEBUG,
) -> pd.DataFrame:
"""
Load a dataframe from a Parquet file.
"""
hdbg.dassert_isinstance(file_name, str)
hdbg.dassert_file_extension(file_name, ["pq", "parquet"])
# Load data.
with htimer.TimedScope(logging.DEBUG, "From parquet '%s'" % file_name) as ts:
filesystem = None
dataset = pq.ParquetDataset(
file_name,
filesystem=filesystem,
filters=filters,
use_legacy_dataset=False,
)
# To read also the index we need to use `read_pandas()`, instead of `read_table()`.
# See https://arrow.apache.org/docs/python/parquet.html#reading-and-writing-single-files.
table = dataset.read_pandas(columns=columns)
df = table.to_pandas()
# Report stats.
file_size = hintros.format_size(os.path.getsize(file_name))
_LOG.log(
log_level,
"Loaded '%s' (size=%s, time=%.1fs)",
file_name,
file_size,
ts.elapsed_time,
)
# Report stats about the df.
_LOG.debug("df.shape=%s", str(df.shape))
mem = df.memory_usage().sum()
_LOG.debug("df.memory_usage=%s", hintros.format_size(mem))
return df
|
from . import scapy_scan, nmap_scan, banner_scan, vulnerability_analysis_menu
__all__ = ["scapy_scan", "nmap_scan", "banner_scan", "vulnerability_analysis_menu"]
|
# Generated by Django 2.0.3 on 2019-06-10 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('site', '0024_career'),
]
operations = [
migrations.AlterField(
model_name='career',
name='title',
field=models.CharField(max_length=200),
),
]
|
import board
import busio
import digitalio
import time
import circuitpython_hmac as hmac
import binascii
from adafruit_wiznet5k.adafruit_wiznet5k import *
import adafruit_wiznet5k.adafruit_wiznet5k_socket as socket
from adafruit_wiznet5k.adafruit_wiznet5k_ntp import NTP #NTP library
import adafruit_wiznet5k.adafruit_wiznet5k_dns as dns #DNS library
import adafruit_minimqtt.adafruit_minimqtt as MQTT
def set_password (cal_time):
#set the password
print(cal_time)
decoded_data = binascii.a2b_base64("CCCCCCCCCCCC") #C = device key
#other variables settings
et = str(cal_time)
method = "md5"
res = "products/AAAAAAA/devices/BBBBB" #A = Product ID, B = Device name
version = "2018-10-31"
#create the signiture for the password
secret = decoded_data
msg = et + "\n" + method+ "\n" + res+ "\n" + version
en_msg = msg.encode()
key = hmac.new(secret, msg=en_msg, digestmod="md5").digest()
encoded_data = binascii.b2a_base64(key).strip()
encoded_data = encoded_data.decode("UTF-8")
new_res = res.replace("/","%2F")
#modified the password into url format
if encoded_data.find("=")>= 0:
new_encoded_data = encoded_data.replace("=","%3D")
if encoded_data.find("+")>= 0:
new_encoded_data = new_encoded_data.replace("+","%2B")
if encoded_data.find("/")>= 0:
new_encoded_data = new_encoded_data.replace("/","%2F")
#combine the whole password
login_msg = "version=" + version+ "&res=" + new_res+ "&et=" + et+ "&method=" + method+ "&sign="+ new_encoded_data
return login_msg
### MQTT Setup ###
# MQTT Topic
# Use this topic if you'd like to connect to a standard MQTT broker
# pubs
Monitoring_pub = "$sys/AAAAAAA/BBBBBBBB/thing/property/post" # Subscribe channel for sending information to the platform
Control_pub = "$sys/AAAAAAA/BBBBBBBB/thing/property/set_reply" # Subscribe channel for returning message after received data from platform
#subs
Monitoring_sub = "$sys/AAAAAAA/BBBBBBBB/thing/property/post/reply" #Checking did the platform received message from the device
Control_sub = "$sys/AAAAAAA/BBBBBBBB/thing/property/set" #Receiving data from the platform
#MQTT send msg format
def Monitor_message_setup (cal_time):
cal_time_ms = (cal_time- 28800) *1000
cal_time_ms = str(cal_time_ms)
Monitoring_msg = '{"id": "123","version": "1.0","params": {"Power": {"value": "500","time": '+cal_time_ms+'},"temp": {"value": 20,"time": '+cal_time_ms+'}}}'
return Monitoring_msg
### Message Code ###
# Define callback methods which are called when events occur
# pylint: disable=unused-argument, redefined-outer-name
def message(client, topic, message):
# Method callled when a client's subscribed feed has a new value.
# print("New message on topic {0}: {1}".format(topic, message))
# function used subs and pub (same like the above pub & subs)
Monitoring_sub = "$sys/AAAAAAA/BBBBBBBB/thing/property/post/reply"
Control_sub = "$sys/AAAAAAA/BBBBBBBB/thing/property/set"
Control_pub = "$sys/AAAAAAA/BBBBBBBB/thing/property/set_reply"
counter = 0
value = 0
if (topic == Monitoring_sub and message.find('"code":200') >= 0 ):
print("New message on topic {0} : {1}".format(topic, message))
if topic == Control_sub:
print("Received message on topic {0} : {1}".format(topic, message))
s_msg = message.split('"id":"')
while value != 1:
if s_msg[1].find('{0}"'.format(counter)) == 0:
print("found value = {0}".format(counter))
value = 1
counter += 1
control_msg = '{"id":"'+str(counter-1)+'","code":200,"msg":"success"}'
print(control_msg)
mqtt_client.publish(Control_pub, control_msg)
#SPI0
SPI0_SCK = board.GP18
SPI0_TX = board.GP19
SPI0_RX = board.GP16
SPI0_CSn = board.GP17
#Reset
W5x00_RSTn = board.GP20
print("Wiznet5k MQTT Test (DHCP)")
# Setup your network configuration below
# random MAC, later should change this value on your vendor ID
MY_MAC = (0x00, 0x01, 0x02, 0x03, 0x04, 0x05)
IP_ADDRESS = (192, 168, 1, 100)
SUBNET_MASK = (255, 255, 255, 0)
GATEWAY_ADDRESS = (192, 168, 1, 1)
DNS_SERVER = (8, 8, 8, 8)
led = digitalio.DigitalInOut(board.GP25)
led.direction = digitalio.Direction.OUTPUT
ethernetRst = digitalio.DigitalInOut(W5x00_RSTn)
ethernetRst.direction = digitalio.Direction.OUTPUT
# For Adafruit Ethernet FeatherWing
cs = digitalio.DigitalInOut(SPI0_CSn)
# For Particle Ethernet FeatherWing
# cs = digitalio.DigitalInOut(board.D5)
spi_bus = busio.SPI(SPI0_SCK, MOSI=SPI0_TX, MISO=SPI0_RX)
# Reset W5500 first
ethernetRst.value = False
time.sleep(1)
ethernetRst.value = True
# # Initialize ethernet interface without DHCP
# eth = WIZNET5K(spi_bus, cs, is_dhcp=False, mac=MY_MAC, debug=False)
# # Set network configuration
# eth.ifconfig = (IP_ADDRESS, SUBNET_MASK, GATEWAY_ ADDRESS, DNS_SERVER)
# Initialize ethernet interface with DHCP
eth = WIZNET5K(spi_bus, cs, is_dhcp=True, mac=MY_MAC, debug=False)
print("Chip Version:", eth.chip)
print("MAC Address:", [hex(i) for i in eth.mac_address])
print("My IP address is:", eth.pretty_ip(eth.ip_address))
ntpserver_ip = eth.pretty_ip(eth.get_host_by_name("time.google.com"))
print("NTP : %s" % ntpserver_ip) #DNS Domain
ntp = NTP(iface = eth, ntp_address =ntpserver_ip ,utc=8)
cal_time = ntp.get_time()
cal_time = time.mktime(cal_time) + 300000
login_msg = set_password(cal_time)
# Set up a MQTT Client
# NOTE: We'll need to connect insecurely for ethernet configurations.
mqtt_client = MQTT.MQTT(
broker="218.201.45.7", #MQTT server IP address
port=1883,
username="AAAAAA", #username = product id
password=login_msg, #created by the function -> set_password
client_id="BBBBBB", # client id = device name
is_ssl=False,
socket_pool=None,
ssl_context=None,
keep_alive=60,
)
# Initialize MQTT interface with the ethernet interface
MQTT.set_socket(socket, eth)
# Setup the callback methods above
mqtt_client.on_message = message
# Connect the client to the MQTT broker.
print("Connecting to Broker...")
mqtt_client.connect()
#MQTT Subscriber Run
while True:
mqtt_client.loop()
print("connected")
#send a new message
mqtt_client.subscribe(Monitoring_sub)
mqtt_client.subscribe(Control_sub)
cal_time = ntp.get_time()
Monitoring_msg = Monitor_message_setup(time.mktime(cal_time))
print("Message to topic {0} : {1}".format(Monitoring_pub, Monitoring_msg))
mqtt_client.publish(Monitoring_pub, Monitoring_msg)
time.sleep(2)
#Disconnected
print("Disconnecting from %s" % mqtt_client.broker)
|
"""
Defines the Job Ledger
necessary for jobs being
serviced only once
"""
from collections import deque
__author__: str = "Splice Machine, Inc."
__copyright__: str = "Copyright 2019, Splice Machine Inc. All Rights Reserved"
__credits__: list = ["Amrit Baveja"]
__license__: str = "Proprietary"
__version__: str = "2.0"
__maintainer__: str = "Amrit Baveja"
__email__: str = "abaveja@splicemachine.com"
class JobLedger:
"""
A data structure that fills up
to a maximum size with elements
and then starts deleting oldest elements
to maintain this size.
*We use this to provide a buffer for jobs
that are taking a while (especially if
Splice Machine DB is running slow). We
do not want to EVER service the same job
from multiple threads (as it is probable
to produce a write-write conflict). Thus,
if a Job hasn't been updated to RUNNING
status by the thread in time for the next
poll, the Job ledger will make sure that it
isn't serviced again. Obviously, this task
could be done with a list, but this data
structure is better-- it doesn't require
searching through hundreds of jobs to find
if one has been serviced yet. We use a deque
(implemented as a linked list) because it
has efficient deleting at head: O(1)*
"""
def __init__(self, max_size: int) -> None:
"""
:param max_size: (int) the maximum size of the
list
"""
self.max_size: int = max_size
self.current_size: int = 0 # so we don't need to
# keep finding length to improve performance
self._linked_list: deque = deque()
def record(self, job_id: int) -> None:
"""
Record a new job_id in the job ledger,
maintaining the maximum size
:param job_id: (int) the job id to
record in the list
"""
if self.current_size >= self.max_size:
self._linked_list.popleft()
self.current_size -= 1
self._linked_list.append(job_id)
self.current_size += 1
def __contains__(self, job_id: int) -> bool:
"""
Return whether or not the Job Ledger
contains a given job_id
:param job_id: (int) the job id to check
for the existence of
:return: (bool) whether or not the job ledger
contains the specified job id
"""
return job_id in self._linked_list
|
import time
from turtle import *
from random import randint
speed(30)
up()
goto(-140, 140)
for i in range(16):
write(i, align='center') # text
right(90)
fd(10)
down()
fd(150)
up()
bk(160)
left(90)
fd(20)
finish_line = xcor() - 20
print('finish line :', finish_line)
tutle_color = ['red', 'blue', 'green', 'gold']
tutle_list = []
for i in range(len(tutle_color)):
t = Turtle() # 거북이 선수 생성
t.color(tutle_color[i]) # 거북이 선수 색상
t.shape('turtle') # 거북이 모양
t.up() # 팬을 업
t.goto(-160, 140 - 30 * (i + 1))
t.down()
tutle_list.append(t)
def start_game():
while(True):
for t in tutle_list:
dist = randint(5, 10)
t.fd(dist)
if t.xcor() >= finish_line:
return t
my_game = start_game()
for i in range(1, 10):
my_game.shapesize(i, i)
time.sleep(0.1)
for i in range(18 * 3):
my_game.right(20)
goto(0, 0)
color_name = str(my_game.color()[0])
write('Congratulation ' + color_name, align='center', font=('Arial', 20, 'normal'))
|
from keras.layers.core import Lambda
from keras import backend as K
class RecursiveLoopLayer(Lambda):
"""Class representing RecursiveLoop for keras"""
def __init__(self, maxlend, rnn_size, activation_rnn_size, maxlenh, **kwargs):
super().__init__(self.recursive_loop, **kwargs)
self.rnn_size = rnn_size
self.activation_rnn_size = activation_rnn_size
self.maxlend = maxlend
self.maxlenh = maxlenh
self.supports_masking = True
def compute_mask(self, inputs, mask=None):
return mask[:, self.maxlend:]
def compute_output_shape(self, input_shape):
nb_samples = input_shape[0]
n = 2*(self.rnn_size - self.activation_rnn_size)
return (nb_samples, self.maxlenh, n)
@staticmethod
def recursive_loop(X, mask, n, maxlend, maxlenh):
desc, head = X[:, :maxlend, :], X[:, maxlend:, :]
head_activations, head_words = head[:, :, :n], head[:, :, n:]
desc_activations, desc_words = desc[:, :, :n], desc[:, :, n:]
activation_energies = K.batch_dot(
head_activations, desc_activations, axes=(2, 2))
activation_energies = activation_energies + -1e20 * \
K.expand_dims(1. - K.cast(mask[:, :maxlend], 'float32'), 1)
activation_energies = K.reshape(activation_energies, (-1, maxlend))
activation_weights = K.softmax(activation_energies)
activation_weights = K.reshape(
activation_weights, (-1, maxlenh, maxlend))
desc_avg_word = K.batch_dot(
activation_weights, desc_words, axes=(2, 1))
return K.concatenate((desc_avg_word, head_words))
|
"""
Python 3.6
PyTorch 0.4
"""
import logging
import itertools
import torch
import torch.nn.functional as F
import torch.optim as optim
import utils
from Models.AbstractModel import AbstractModel, Models
class InfoGAN(AbstractModel):
""" InfoGAN
InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets
"""
# ================ init part ================
def __init__(self, lambda_, **kwargs): # parameters
super().__init__(**kwargs)
self.lambda_ = lambda_
self.model_name = f'InfoGAN_{lambda_}'
self.code_dim = max(self.hidden_dim // 16, 2)
self.init_net_arch()
self.init_optimizer()
logging.debug(f'{self.model_name} initialized.')
def init_net_arch(self, specified_net_arch = None):
models = Models[self.dataset] if specified_net_arch == None else specified_net_arch
self.net_arch = models.net_arch
self.D = models.Discriminator_InfoGAN(self.hidden_dim, self.code_dim)
self.G = models.Generator(self.hidden_dim, self.tanh)
self.name_model_dict = { 'Discriminator':self.D, 'Generator':self.G }
self.init_net_component(**self.name_model_dict)
def init_optimizer(self):
""" initialize optimizer """
beta1, beta2 = 0.5, 0.99
self.G_optimizer = optim.Adam(self.G.parameters(), lr=self.lr, betas=(beta1, beta2), weight_decay = 0)
self.D_optimizer = optim.Adam(self.D.parameters(), lr=self.lr, betas=(beta1, beta2), weight_decay = 0)
self.info_optimizer = optim.Adam(itertools.chain(self.G.parameters(), self.D.parameters()), lr=self.lr, betas=(beta1, beta2))
utils.print_line()
logging.debug('Use ADAM optimizers for D and G.')
def encode(self, X):
""" encode and decode the samples X
with the encoder and the decoder of the model.
"""
self.D.eval()
X = X.to(self.device)
with torch.no_grad():
_, regressed_code = self.D( X )
return regressed_code.cpu()
# ================ training part ================
def stepTraining(self, batch_x):
this_batch_size = len(batch_x)
batch_x = batch_x.to(self.device)
batch_ones = torch.ones(this_batch_size).to(self.device)
batch_zeros = torch.zeros(this_batch_size).to(self.device)
self.D.train()
with torch.enable_grad():
# ================================== train D ==================================
r""" E_{x \sim P_r} \log D(x) """
D_real, _ = self.D(batch_x)
# loss combines a Sigmoid layer and the BCE loss
D_real_loss = F.binary_cross_entropy_with_logits( D_real, batch_ones)
r""" E_{x \sim P_g} \log ( 1- D(x) ) """
self.G.eval()
z = self.get_noise(this_batch_size)
x_fake = self.G(z).detach()
D_fake, _ = self.D(x_fake)
D_fake_loss = F.binary_cross_entropy_with_logits( D_fake, batch_zeros )
D_loss = D_real_loss + D_fake_loss
self.D_optimizer.zero_grad()
D_loss.backward()
self.D_optimizer.step()
# ================================== train G ==================================
r""" E_{z \sim P_z} \log D( G(z) ) + \lambda E_{c \sim P(c), x \sim G(z,c)}[log Q(c|x)] """
self.G.train()
z = self.get_noise(this_batch_size)
c = z[:, :self.code_dim]
x_fake = self.G(z)
D_fake, regressed_code = self.D(x_fake)
G_loss = F.binary_cross_entropy_with_logits( D_fake, batch_ones )
Info_loss = F.mse_loss(regressed_code, c)
G_Info_loss = G_loss + self.lambda_ * Info_loss
self.info_optimizer.zero_grad()
G_Info_loss.backward()
self.info_optimizer.step()
loss_dict = {'D_real_loss': D_real_loss.item(),
'D_fake_loss': D_fake_loss.item(),
'D_loss': D_loss.item(),
'G_loss': G_loss.item(),
'Info_loss': Info_loss.item(),
}
return loss_dict
|
import httpx
import json
import time
import asyncio
import sys
import os
PROJECT = "bench-functions"
FUNCTION = "place-test"
HTTP_ENDPOINT_HOST = "https://us-central1-{}.cloudfunctions.net/{}"
RESULT_FILE_NAME = os.path.join("results", "{}_size={}.json")
async def async_request(client, orch_name, size):
url = HTTP_ENDPOINT_HOST.format(PROJECT, FUNCTION)
result_file_name = RESULT_FILE_NAME.format(orch_name, size)
start = time.time()
res = await client.get(url)
query_ts = time.time()
try:
body = res.json()
except Exception:
body = res.text
start_ms = start * 1000
end_ms = query_ts * 1000
output = {
# Client times:
'pystart_ms': start_ms,
'pyend_ms': end_ms,
'pyduration_ms': end_ms - start_ms,
# In-func data
'func_data': body
}
with open(result_file_name, 'a') as rf:
rf.write('\n{},'.format(json.dumps(output, indent=4)))
async def main():
client = httpx.AsyncClient(timeout=None,
pool_limits=httpx.PoolLimits(soft_limit=10,
hard_limit=1100))
orch_name = sys.argv[1]
num_itr = int(sys.argv[2])
result_file_name = RESULT_FILE_NAME.format(orch_name, num_itr)
with open(result_file_name, 'w') as rf:
rf.write("")
async_jobs = []
for _ in range(num_itr):
async_jobs.append(async_request(client, orch_name, num_itr))
await asyncio.gather(*async_jobs) # async
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
if not loop.is_closed():
loop.close()
|
# Tests on main client object
import TestHelperSuperClass
import EllucianEthosPythonClient
import base64
import json
import TestingHelper
import queue
class helpers(TestHelperSuperClass.testClassWithHelpers):
pass
@TestHelperSuperClass.wipd
class test_MainClient_poller(helpers):
def test_notAbleToStartPollerTwiceWithoutClose(self):
pollerQueue = queue.Queue()
mockResponse={}
self.ethosClient.mock.registerNextResponse(
reqFnName="get",
url="/consume?limit=20",
data=None,
status_code=200,
contentBytes=base64.b64encode(json.dumps(mockResponse).encode()),
contentHeaders={ "x-remaining": "0"},
ignoreData=True
)
self.ethosClient.startChangeNotificationPollerThread(
loginSession=None,
frequency=60, #number of seconds between fetches
pageLimit=20, #number of change notifications to get per requests
maxRequests=4, #maximum number of rquests to use in each fecth
pollerQueue=pollerQueue
)
with self.assertRaises(EllucianEthosPythonClient.CanNotStartChangeNotificationPollerTwiceException) as context:
self.ethosClient.startChangeNotificationPollerThread(
loginSession=None,
frequency=60, #number of seconds between fetches
pageLimit=20, #number of change notifications to get per requests
maxRequests=4, #maximum number of rquests to use in each fecth
pollerQueue=pollerQueue
)
self.ethosClient.close()
def test_startThenStopPoller(self):
pollerQueue = queue.Queue()
mockResponse={}
self.ethosClient.mock.registerNextResponse(
reqFnName="get",
url="/consume?limit=20",
data=None,
status_code=200,
contentBytes=base64.b64encode(json.dumps(mockResponse).encode()),
contentHeaders={ "x-remaining": "0"},
ignoreData=True
)
self.ethosClient.startChangeNotificationPollerThread(
loginSession=None,
frequency=60, #number of seconds between fetches
pageLimit=20, #number of change notifications to get per requests
maxRequests=4, #maximum number of rquests to use in each fecth
pollerQueue=pollerQueue
)
self.ethosClient.close()
def test_startThenStopPollerFunctionMode(self):
def processSingleMessage(apiClient, messageid, changeNotification):
# in a real example this part would write to file or update a db
print("received ", changeNotification.operation, changeNotification.resourceName, changeNotification.resourceID)
return True
mockResponse={}
self.ethosClient.mock.registerNextResponse(
reqFnName="get",
url="/consume?limit=20&lastProcessedID=123",
data=None,
status_code=200,
contentBytes=base64.b64encode(json.dumps(mockResponse).encode()),
contentHeaders={ "x-remaining": "0"},
ignoreData=True
)
self.ethosClient.startChangeNotificationPollerThreadInFunctionMode(
loginSession=None,
frequency=60, #number of seconds between fetches
pageLimit=20, #number of change notifications to get per requests
maxRequests=4, #maximum number of rquests to use in each fecth
lastProcessedID="123",
messageProcessingFunction=processSingleMessage
)
self.ethosClient.close()
|
import matplotlib.pyplot as plt
import numpy as np
import math
import keras
# Importar la API keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer, Input
from tensorflow.python.keras.layers import Reshape, MaxPooling2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.keras.optimizers import Adam
import keras.callbacks as cb
class LossHistory(cb.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
batch_loss = logs.get('loss')
self.losses.append(batch_loss)
#Cargo los datos
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
data.test.cls = np.argmax(data.test.labels, axis=1)
#Se definen las variables
img_size = 28
img_size_flat = img_size * img_size
# Tupla con la altura y el ancho de las imagenes utilizadas para remodelar matrices.
# Esto se utiliza para pintar las imagenes.
img_shape = (img_size, img_size)
# Tupla con altura, anchura y profundidad utilizada para remodelar matrices.
# Esto se usa para remodelar en Keras.
img_shape_full = (img_size, img_size, 1)
# Numero de canales de color para las imagenes: 1 canal para escala de grises.
num_channels = 1
# Numero de clases, una clase para cada uno de 10 digitos.
num_classes = 10
#Construccion de la red neuronal de forma secuencial
model = Sequential()
# La entrada es una matriz aplanada con 784 elementos (img_size * img_size),
# pero las capas convolucionales esperan imagenes con forma (28, 28, 1), por tanto hacemos un reshape
model.add(Reshape(img_shape_full))
# Primera capa convolucional con ReLU-activation y max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=16, padding='same', activation='relu', name='layer_conv1'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Segunda capa convolucional con ReLU-activation y max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=36, padding='same', activation='relu', name='layer_conv2'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Aplanar la salida de 4 niveles de las capas convolucionales
# a 2-rank que se puede ingresar a una capa totalmente conectada
model.add(Flatten())
# Primera capa completamente conectada con ReLU-activation.
model.add(Dense(128, activation='relu'))
# Ultima capa totalmente conectada con activacion de softmax para usar en la clasificacion.
model.add(Dense(num_classes, activation='softmax'))
#Anadir funcion de coste, un optimizador y las metricas de rendimiento
optimizer = Adam(lr=1e-3)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
#Fase de entrenamiento
print("Comienza el entrenamiento")
history = LossHistory()
model.fit(x=data.train.images, y=data.train.labels, callbacks=[history], epochs=15, batch_size=128)
result = model.evaluate(x=data.train.images, y=data.train.labels)
for name, value in zip(model.metrics_names, result):
print(name, value)
#Pruebo con el conjunto de test
print("Pruebo el conjunto de test")
result = model.evaluate(x=data.test.images, y=data.test.labels)
for name, value in zip(model.metrics_names, result):
print(name, value)
print("")
model.summary()
plt.switch_backend('agg')
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(history.losses)
ax.set_title('Batch losses')
plt.show()
fig.savefig('img1.png')
|
__________________________________________________________________________________________________
sample 108 ms submission
class Solution:
def countVowelPermutation(self, n: int) -> int:
# a -> e
# e -> a, i
# i -> a, e, o, u
# o -> i, u
# u -> a
mod = 1000000007
a, e, i, o, u = 1, 1, 1, 1, 1
for _ in range(n - 1):
a, e, i, o, u = (e + i + u) % mod, (a + i) % mod, (e + o) % mod, i, (i + o) % mod
return (a + e + i + o + u) % mod
__________________________________________________________________________________________________
sample 160 ms submission
import numpy as np
class Mint:
def __init__(self, value):
self.value = value % (10**9 + 7)
def __add__(self, other):
return Mint(self.value + other.value)
def __mul__(self, other):
return Mint(self.value * other.value)
class Solution:
def countVowelPermutation(self, n: int) -> int:
O = Mint(0)
I = Mint(1)
transition = np.matrix([
[O, I, O, O, O],
[I, O, I, O, O],
[I, I, O, I, I],
[O, O, I, O, I],
[I, O, O, O, O]
], dtype=object)
x = transition**(n - 1)
res = np.sum(x)
return res.value if type(res) != int else res
__________________________________________________________________________________________________
sample 804 ms submission
from functools import lru_cache
m = 10**9 + 7
import sys
sys.setrecursionlimit(10**6)
class Solution:
def countVowelPermutation(self, n: int) -> int:
@lru_cache(None)
def dp(idx, last):
if idx == n: return 1
if last == 'a':
return dp(idx + 1, 'e') % m
if last == 'e':
return (dp(idx + 1, 'a') + dp(idx + 1, 'i')) % m
if last == 'i':
return sum(dp(idx + 1, j) for j in 'aeou') % m
if last == 'o':
return (dp(idx + 1, 'u') + dp(idx + 1, 'i')) % m
if last == 'u':
return dp(idx + 1, 'a') % m
return sum(dp(1, i) for i in 'aeiou') % m
|
class Reaction:
""" A (possibly lifted) reaction """
def __init__(self, language, name, parameters, condition, effect):
self.name = name
self.language = language
self.parameters = parameters
self.condition = condition
self.effect = effect
self._check_well_formed()
def _check_well_formed(self):
pass
def ident(self):
params = ', '.join([str(o) for o in self.parameters])
return '{}({})'.format(self.name, params)
def dump(self):
return dict(name=self.name,
params=[par.dump() for par in self.parameters],
condition=self.condition.dump(),
effect=[eff.dump() for eff in self.effect.dump()])
def __str__(self):
tokens = ['reaction {}:'.format(self.name),
'cond: ({})'.format(self.condition),
'eff: ({})'.format(self.effect)]
return '\n'.join(tokens)
|
import socket
import pytest
from airflow.utils import timezone
from airflow.utils.db import create_session
from airflow.utils.state import State
from pytest_socket import disable_socket, SocketBlockedError
from tests import mocks
from tests.factories import DAGFactory, PythonOperatorFactory
def pytest_runtest_setup():
"""
This test suite uses pytest-socket which causes tests to fail immediately if
a call to socket.socket is made, or in other words, tries to access the
internet. However, the boto library tries to resolve addresses by calling
socket.getaddrinfo which will make blocking network calls but is not covered
by pytest-socket.
This test setup will cause tests to fail immediately if socket.getaddrinfo
is called.
"""
def block_lookup(*args, **kwargs):
raise SocketBlockedError
disable_socket()
socket.getaddrinfo = block_lookup
@pytest.fixture
def context():
"""
Generic Airflow context fixture that can be passed to a callable that
requires context
"""
with create_session() as session:
dag = DAGFactory()
previous_task = PythonOperatorFactory(task_id='previous_task', dag=dag)
current_task = PythonOperatorFactory(task_id='current_task', dag=dag)
next_task = PythonOperatorFactory(task_id='next_task', dag=dag)
current_task.set_upstream(previous_task)
current_task.set_downstream(next_task)
dag_run = dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=timezone.utcnow(),
state=State.RUNNING,
conf=None,
session=session
)
ti = dag_run.get_task_instances()[1]
ti.task = current_task
return ti.get_template_context()
@pytest.fixture
def s3_client(mocker):
"""
mocks boto client
"""
return mocker.patch('boto3.client', new_callable=mocks.s3ClientMock)
|
import numpy as np
import pandas as pd
dataset = pd.read_csv('Bd-Rainfall-prediction.csv')
dataset.describe()
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:,-1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =.20,random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier()
# Model training
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_prob = classifier.predict_proba(X_test)[:,1]
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,y_pred))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
#made prediction using new data
prediction_1 = classifier.predict(sc.transform(np.array([[13,2.5,2,75,60,5.7,4.8,300]])))
prediction_1_proba = classifier.predict_proba(sc.transform(np.array([[13,2.5,2,75,60,5.7,4.8,300]])))[:,1]
prediction_2 = classifier.predict(sc.transform(np.array([[27,26,1,90,94,4.6,5.6,275]])))
prediction_2_proba = classifier.predict_proba(sc.transform(np.array([[27,26,1,90,94,4.6,5.6,275]])))[:,1]
# Picking the Model and Standard Scaler
import pickle
model_file = "RandomForest.pickle"
pickle.dump(classifier, open(model_file,'wb'))
scaler_file = "scaller.pickle"
pickle.dump(sc, open(scaler_file,'wb'))
|
def expand_int_list(input):
items = []
if not input:
return items
parts = input.split(',')
for part in parts:
if part.strip() != '':
try:
if '-' in part:
start_stop = part.split('-')
start = start_stop[0]
stop = start_stop[-1]
for number in range(int(start), int(stop)+1):
items.append(number)
else:
items.append(int(part))
except Exception:
pass
return items
|
import datetime
import sqlalchemy
import pytest
from acondbs.db.sa import sa
from acondbs.models import Map
# __________________________________________________________________||
def test_type(app):
'''confirm the type of the date field
'''
with app.app_context():
map = Map.query.filter_by(name='lat20200120').first()
# The type of the field "date_posted" of Map is "datetime.date"
assert isinstance(map.date_posted, datetime.date)
# __________________________________________________________________||
def test_add(app):
'''A simple test of adding an object with a date field
'''
# date_posted needs to be initialized with a datetime.date
date_posted = datetime.date(2019, 2, 23)
map1 = Map(name="map1", date_posted=date_posted)
with app.app_context():
sa.session.add(map1)
sa.session.commit()
with app.app_context():
map1 = Map.query.filter_by(name='map1').first()
assert datetime.date(2019, 2, 23) == map1.date_posted
# __________________________________________________________________||
def test_add_raise(app):
'''A simple test of adding an object with a wrong type
'''
# It is not impossible to instnaiate a date field with a wrong
# type, e.g, str
map1 = Map(name="map1", date_posted="2019-02-13")
with app.app_context():
# It is also possible to add
sa.session.add(map1)
# However, it is not possible to commit
with pytest.raises(sqlalchemy.exc.StatementError):
sa.session.commit()
# __________________________________________________________________||
|
# This is the script without the need of a FFmpeg installation, pure OpenCV
# This is not useful for image processing (eg: find faces) as there will be more lag, around 6 seconds added.
import socket
from time import time
import cv2
import numpy as np
from goprocam import GoProCamera, constants
gpCam = GoProCamera.GoPro()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
t = time()
gpCam.livestream("start")
cap = cv2.VideoCapture("udp://10.5.5.9:8554")
while True:
nmat, frame = cap.read()
cv2.imshow("GoPro OpenCV", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if time() - t >= 2.5:
sock.sendto(b"_GPHD_:0:0:2:0.000000\n", ("10.5.5.9", 8554))
t = time()
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows()
|
from apkdecompiler import APKDecompile
|
from datetime import datetime
from random import randint
from typing import List
from crypto_package.candles.get_candles import get_candles
from pandas import DataFrame, to_datetime
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from .models import AnalysisResult, Trade
def get_candles_and_plot(exchange: str, pair: str, candle_size: str, time_start: datetime = None,
time_end: datetime = None, last_n_candles: int = None, trades: AnalysisResult = None,
width: int = 1000, height: int = 600):
candles, _ = get_candles(exchange, pair, candle_size, last_n_candles=last_n_candles, time_start=time_start,
time_end=time_end)
# candles = candles.rename(columns={"time": "date"})
# candles["date"] = to_datetime(candles["date"], unit='s')
plot_candles(candles, trades, pair, width, height)
# return fig
return candles
def plot_candles(candles: DataFrame, trades: AnalysisResult = None, pair=None, width: int = 1000, height: int = 650):
if "time" in candles.columns:
candles = candles.rename(columns={"time": "date"})
if type(candles["date"][0]) is not datetime:
candles["date"] = to_datetime(candles["date"], unit='s')
fig = go.Figure()
fig.add_trace(go.Candlestick(
x=candles['date'],
open=candles['open'],
high=candles['high'],
low=candles['low'],
close=candles['close']))
if trades is not None:
res = trades.trades
if pair is not None:
res = [t for t in trades.trades if t.pair == pair]
buy_trades_price = [tr.price for tr in res if tr.is_buy]
buy_trades_time = [tr.timestamp for tr in res if tr.is_buy]
sell_trades_price = [tr.price for tr in res if not tr.is_buy]
sell_trades_time = [tr.timestamp for tr in res if not tr.is_buy]
fig.add_trace(go.Scatter(
x=buy_trades_time,
y=buy_trades_price,
mode='markers',
name='buy trades',
marker_symbol='diamond',
marker=dict(
color='blue',
line_width=2,
size=7,
)
))
fig.add_trace(go.Scatter(
x=sell_trades_time,
y=sell_trades_price,
mode='markers',
name='sell trades',
marker_symbol='square',
marker=dict(
color='yellow',
line_width=2,
size=7
)
))
fig.update_layout(
title="Candles",
xaxis_title="time",
yaxis_title="price",
width=width,
height=height
)
fig.show()
# return fig
# def plot_indicators(indicators_df: DataFrame, indicators: List[str], width: int = 1000,
# height: int = 650, fig_type:str='lines'): # indicators_df contains columns with indicators and column "date" with datetime
# if "time" in indicators_df.columns:
# indicators_df = indicators_df.rename(columns={"time": "date"})
# if type(indicators_df["date"][0]) is not datetime:
# indicators_df["date"] = to_datetime(indicators_df["date"], unit='s')
#
# for ind in indicators:
# fig = go.Figure()
# fig.add_trace(go.Scatter(
# x=indicators_df['date'],
# y=indicators_df[ind],
# mode=fig_type,
# name=ind,
# ))
# fig.update_layout(
# title=ind,
# xaxis_title="time",
# yaxis_title="value",
# width=width,
# height=height
# )
# fig.show()
#
def plot_patterns_on_candles(indicators_df, indicators, trades=None, pair=None, show_signal=False, lines=[],
width: int = 1000,
height: int = 650): # indicators_df contains columns with indicators and column "date" with datetime
if "time" in indicators_df.columns:
indicators_df = indicators_df.rename(columns={"time": "date"})
if type(indicators_df["date"][0]) is not datetime:
indicators_df["date"] = to_datetime(indicators_df["date"], unit='s')
fig = go.Figure()
fig.add_trace(go.Candlestick(
x=indicators_df['date'],
open=indicators_df['open'],
high=indicators_df['high'],
low=indicators_df['low'],
close=indicators_df['close']))
for ind in indicators:
xup = []
xdown = []
yup = []
ydown = []
for idx, row in indicators_df.iterrows():
if row[ind] == 100:
xup.append(row['date'])
yup.append(((row['high'] + row['low']) / 2) + 1.5 * abs(row['high'] - row['low']))
elif row[ind] == -100:
xdown.append(row['date'])
ydown.append(((row['high'] + row['low']) / 2) - 1.5 * abs(row['high'] - row['low']))
color = 'green' if len(indicators) <= 2 else randint(1, 500)
fig.add_trace(go.Scatter(
x=xup,
y=yup,
mode='markers',
marker=dict(
color=color,
line_width=1,
size=8,
),
marker_symbol='triangle-up',
name=ind + " bullish",
))
color = 'red' if len(indicators) <= 2 else randint(1, 500)
fig.add_trace(go.Scatter(
x=xdown,
y=ydown,
mode='markers',
marker=dict(
color=color,
line_width=1,
size=8,
),
marker_symbol='triangle-down',
name=ind + " bearish",
))
for line in lines:
fig.add_trace(go.Scatter(
x=indicators_df["date"],
y=indicators_df[line],
mode='lines',
name=line,
))
if trades is not None:
res = trades.trades
if pair is not None:
res = [t for t in trades.trades if t.pair == pair]
buy_trades_price = [tr.price for tr in res if tr.is_buy]
buy_trades_time = [tr.timestamp for tr in res if tr.is_buy]
sell_trades_price = [tr.price for tr in res if not tr.is_buy]
sell_trades_time = [tr.timestamp for tr in res if not tr.is_buy]
fig.add_trace(go.Scatter(
x=buy_trades_time,
y=buy_trades_price,
mode='markers',
name='buy trades',
marker_symbol='diamond',
marker=dict(
color='blue',
line_width=2,
size=7,
)
))
fig.add_trace(go.Scatter(
x=sell_trades_time,
y=sell_trades_price,
mode='markers',
name='sell trades',
marker_symbol='square',
marker=dict(
color='yellow',
line_width=2,
size=7
)
))
if show_signal:
buy_sig = [a[0] for a in trades.buy_signals]
buy_sig_time = [a[1] for a in trades.buy_signals]
sell_sig = [a[0] for a in trades.sell_signals]
sell_sig_time = [a[1] for a in trades.sell_signals]
fig.add_trace(go.Scatter(
x=buy_sig_time,
y=buy_sig,
mode='markers',
name='buy signal',
marker_symbol='diamond',
marker=dict(
color='lightblue',
line_width=2,
size=7,
)
))
fig.add_trace(go.Scatter(
x=sell_sig_time,
y=sell_sig,
mode='markers',
name='sell signal',
marker_symbol='square',
marker=dict(
color='lightyellow',
line_width=2,
size=7
)
))
fig.update_layout(
title="Indicators on candles",
xaxis_title="time",
yaxis_title="value",
width=width,
height=height
)
fig.show()
def process_plots_info(one_plot, indicators):
rows = 0
indicators_titles = []
for idx, i in enumerate(one_plot):
if i == False:
rows += 1
indicators_titles.append(indicators[idx])
else:
title = indicators_titles.pop() if len(indicators_titles) > 0 else ""
title = title + " + " + str(indicators[idx])
indicators_titles.append(title)
return rows, indicators_titles
def plot_indicators(indicators_df, indicators, trades=None, pair=None, show_signal=False, plot_candles=False,
one_plot=None, width: int = 1000, height: int = 650,
fig_type: str = 'lines'): # indicators_df contains columns with indicators and column "date" with datetime
if "time" in indicators_df.columns:
indicators_df = indicators_df.rename(columns={"time": "date"})
if type(indicators_df["date"][0]) is not datetime:
indicators_df["date"] = to_datetime(indicators_df["date"], unit='s')
rows = 0
indicators_titles = []
if plot_candles or trades is not None:
rows += 1
indicators_titles = ['candles']
if one_plot is not None:
ind_rows, ind_titles = process_plots_info(one_plot, indicators)
rows += ind_rows
indicators_titles += ind_titles
else:
rows += len(indicators)
indicators_titles += [i for i in indicators]
print(indicators_titles)
fig = make_subplots(rows=rows, cols=1,
subplot_titles=indicators_titles,
shared_xaxes=True,
vertical_spacing=0.05,
x_title="time",
y_title="value"
)
ridx = 0
if plot_candles or trades is not None:
ridx = 1
fig.add_trace(go.Candlestick(
x=indicators_df['date'],
open=indicators_df['open'],
high=indicators_df['high'],
low=indicators_df['low'],
close=indicators_df['close']), row=ridx, col=1
)
if trades is not None:
just_trades = trades.trades
if pair is not None:
just_trades = [t for t in trades.trades if t.pair == pair]
add_trades(fig, just_trades, show_signal, ridx)
for idx, ind in enumerate(indicators):
if one_plot is None or one_plot[idx] == False:
ridx += 1
fig.add_trace(go.Scatter(
x=indicators_df['date'],
y=indicators_df[ind],
name=ind,
line_color='rgb' + str((randint(0, 255), randint(0, 255), randint(0, 255)))
),
row=ridx,
col=1
)
fig.update_layout(
title_text='indicators',
width=width,
height=height,
xaxis_rangeslider_visible=False,
)
fig.show()
def add_trades(fig, res, show_signal, ridx):
buy_trades_price = [tr.price for tr in res if tr.is_buy]
buy_trades_time = [tr.timestamp for tr in res if tr.is_buy]
sell_trades_price = [tr.price for tr in res if not tr.is_buy]
sell_trades_time = [tr.timestamp for tr in res if not tr.is_buy]
fig.add_trace(go.Scatter(
x=buy_trades_time,
y=buy_trades_price,
mode='markers',
name='buy trades',
marker_symbol='diamond',
marker=dict(
color='blue',
line_width=2,
size=7,
)
), row=ridx, col=1)
fig.add_trace(go.Scatter(
x=sell_trades_time,
y=sell_trades_price,
mode='markers',
name='sell trades',
marker_symbol='square',
marker=dict(
color='yellow',
line_width=2,
size=7
)
), row=ridx, col=1)
if show_signal:
buy_sig = [a[0] for a in trades.buy_signals]
buy_sig_time = [a[1] for a in trades.buy_signals]
sell_sig = [a[0] for a in trades.sell_signals]
sell_sig_time = [a[1] for a in trades.sell_signals]
fig.add_trace(go.Scatter(
x=buy_sig_time,
y=buy_sig,
mode='markers',
name='buy signal',
marker_symbol='diamond',
marker=dict(
color='lightblue',
line_width=2,
size=7,
)
), row=ridx, col=1)
fig.add_trace(go.Scatter(
x=sell_sig_time,
y=sell_sig,
mode='markers',
name='sell signal',
marker_symbol='square',
marker=dict(
color='lightyellow',
line_width=2,
size=7
)
), row=ridx, col=1)
# def plot_indicators_on_candles(indicators_df: DataFrame, indicators: List[str], width: int = 1000,
# height: int = 650): # indicators_df contains columns with indicators and column "date" with datetime
# if "time" in indicators_df.columns:
# indicators_df = indicators_df.rename(columns={"time": "date"})
# if type(indicators_df["date"][0]) is not datetime:
# indicators_df["date"] = to_datetime(indicators_df["date"], unit='s')
#
# fig = go.Figure()
#
# fig.add_trace(go.Candlestick(
# x=indicators_df['date'],
# open=indicators_df['open'],
# high=indicators_df['high'],
# low=indicators_df['low'],
# close=indicators_df['close']))
#
# for ind in indicators:
# fig.add_trace(go.Scatter(
# x=indicators_df['date'],
# y=indicators_df[ind],
# mode='markers',
# marker=dict(
# color=randint(1,500),
# line_width=2,
# size=7,
# ),
# name=ind,
# ))
# fig.update_layout(
# title="Indicators on candles",
# xaxis_title="time",
# yaxis_title="value",
# width=width,
# height=height
# )
# fig.show()
def calculate_profit_from_trades(transactions: List[Trade], start_datetime, end_datetime):
transactions.sort(key=lambda x: x.timestamp)
tr_buy_amount = {}
start = 0
end = len(transactions)
profit = 0
sell_costs = 0
buy_costs = 0
x_v = [start_datetime]
y_v = [profit]
for trade_id in range(start, end):
trade = transactions[trade_id]
results = []
if trade.amount in tr_buy_amount.keys():
results = tr_buy_amount.get(trade.amount)
if trade.is_buy:
if len(results) == 0:
results = [trade]
else:
results.append(trade)
tr_buy_amount.update({trade.amount: results})
else:
oldest_buy = results.pop(0)
if len(results) > 0:
tr_buy_amount.update({trade.amount: results})
else:
tr_buy_amount.pop(trade.amount)
sell_costs += trade.amount * trade.price
buy_costs += trade.amount * oldest_buy.price
profit = (sell_costs - buy_costs) / buy_costs
# profit = ((trade.amount * trade.price) - (trade.amount * oldest_buy.price)) / (
# trade.amount * oldest_buy.price)
y_v.append(profit*100)
x_v.append(trade.timestamp)
x_v.append(end_datetime)
y_v.append(profit*100)
return x_v, y_v
def calculate_bot_profit(transactions: List[Trade], start_datetime, end_datetime, start_amount):
transactions.sort(key=lambda x: x.timestamp)
tr_buy_amount = {}
start = 0
end = len(transactions)
profit = 0
profit_value = 0
x_v = [start_datetime]
y_v = [profit]
for trade_id in range(start, end):
trade = transactions[trade_id]
results = []
if trade.amount in tr_buy_amount.keys():
results = tr_buy_amount.get(trade.amount)
if trade.is_buy:
if len(results) == 0:
results = [trade]
else:
results.append(trade)
tr_buy_amount.update({trade.amount: results})
else:
oldest_buy = results.pop(0)
if len(results) > 0:
tr_buy_amount.update({trade.amount: results})
else:
tr_buy_amount.pop(trade.amount)
profit_value += trade.amount * trade.price - trade.amount * oldest_buy.price
# buy_costs += trade.amount * oldest_buy.price
profit = profit_value / start_amount
# profit = ((trade.amount * trade.price) - (trade.amount * oldest_buy.price)) / (
# trade.amount * oldest_buy.price)
y_v.append(profit*100)
x_v.append(trade.timestamp)
x_v.append(end_datetime)
y_v.append(profit*100)
return x_v, y_v
def plot_profit(trades: AnalysisResult, width: int = 1000, height: int = 650):
x, y = calculate_profit_from_trades(trades.trades, trades.start_datetime, trades.end_datetime)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=x,
y=y,
mode='lines+markers',
name="profit",
))
fig.update_layout(
title='BacktestingBots profit',
xaxis_title="time",
yaxis_title="value [%]",
width=width,
height=height
)
fig.show()
# return fig
def plot_bot_profit(trades: AnalysisResult, start_amount, width: int = 1000, height: int = 650):
x, y = calculate_bot_profit(trades.trades, trades.start_datetime, trades.end_datetime, start_amount)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=x,
y=y,
mode='lines+markers',
name="profit",
))
fig.update_layout(
title='BacktestingBots profit',
xaxis_title="time",
yaxis_title="value [%]",
width=width,
height=height
)
fig.show()
# return fig
def plot_profit_per_pair(trades: AnalysisResult, pairs: List[str] = None, width: int = 1000, height: int = 650):
pair_trades = {}
if pairs is None:
for trade in trades.trades:
uptrades = pair_trades.get(trade.pair) if trade.pair in pair_trades.keys() else []
uptrades.append(trade)
pair_trades.update({trade.pair: uptrades})
else:
for trade in trades.trades:
if trade.pair in pairs:
uptrades = pair_trades.get(trade.pair) if trade.pair in pair_trades.keys() else []
uptrades.append(trade)
pair_trades.update({trade.pair: uptrades})
fig = go.Figure()
for pair, transactions in pair_trades.items():
x, y = calculate_profit_from_trades(transactions, trades.start_datetime, trades.end_datetime)
fig.add_trace(go.Scatter(
x=x,
y=y,
mode='lines+markers',
name=str(pair),
))
calc_pairs = [item for item in pair_trades.keys()]
fig.update_layout(
title='BacktestingBots profit per pair ' + str(calc_pairs),
xaxis_title="time",
yaxis_title="value [%]",
width=width,
height=height
)
fig.show()
|
import numpy as np
from qiskit import BasicAer
from qiskit.quantum_info import Pauli
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.algorithms import NumPyMinimumEigensolver, VQE
from qiskit.circuit.library import TwoLocal
from qiskit.aqua.components.optimizers import SPSA
from qiskit.aqua.operators import WeightedPauliOperator
from qiskit.optimization import QuadraticProgram
from qiskit.optimization.algorithms import MinimumEigenOptimizer
class QuantumOptimizer:
def __init__(self, instance, n, K):
self.instance = instance
self.n = n
self.K = K
def binary_representation(self, x_sol=0):
instance = self.instance
n = self.n
K = self.K
A = np.max(instance) * 100 # A parameter of cost function
# Determine the weights w
instance_vec = instance.reshape(n ** 2)
w_list = [instance_vec[x] for x in range(n ** 2) if instance_vec[x] > 0]
w = np.zeros(n * (n - 1))
for ii in range(len(w_list)):
w[ii] = w_list[ii]
# Some variables I will use
Id_n = np.eye(n)
Im_n_1 = np.ones([n - 1, n - 1])
Iv_n_1 = np.ones(n)
Iv_n_1[0] = 0
Iv_n = np.ones(n - 1)
neg_Iv_n_1 = np.ones(n) - Iv_n_1
v = np.zeros([n, n * (n - 1)])
for ii in range(n):
count = ii - 1
for jj in range(n * (n - 1)):
if jj // (n - 1) == ii:
count = ii
if jj // (n - 1) != ii and jj % (n - 1) == count:
v[ii][jj] = 1.0
vn = np.sum(v[1:], axis=0)
# Q defines the interactions between variables
Q = A * (np.kron(Id_n, Im_n_1) + np.dot(v.T, v))
# g defines the contribution from the individual variables
g = (
w
- 2 * A * (np.kron(Iv_n_1, Iv_n) + vn.T)
- 2 * A * K * (np.kron(neg_Iv_n_1, Iv_n) + v[0].T)
)
# c is the constant offset
c = 2 * A * (n - 1) + 2 * A * (K ** 2)
try:
max(x_sol)
# Evaluates the cost distance from a binary representation of a path
fun = (
lambda x: np.dot(np.around(x), np.dot(Q, np.around(x)))
+ np.dot(g, np.around(x))
+ c
)
cost = fun(x_sol)
except:
cost = 0
return Q, g, c, cost
def construct_problem(self, Q, g, c) -> QuadraticProgram:
n = self.n
qp = QuadraticProgram()
for i in range(n * (n - 1)):
qp.binary_var(str(i))
qp.objective.quadratic = Q
qp.objective.linear = g
qp.objective.constant = c
return qp
def solve_problem(self, qp):
aqua_globals.random_seed = 10598
quantum_instance = QuantumInstance(
BasicAer.get_backend("qasm_simulator"),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed,
)
vqe = VQE(quantum_instance=quantum_instance)
optimizer = MinimumEigenOptimizer(min_eigen_solver=vqe)
result = optimizer.solve(qp)
# compute cost of the obtained result
_, _, _, level = self.binary_representation(x_sol=result.x)
return result.x, level
|
# Кожен трансформер має свою вагу, запишемо її у словник
# transformersWeight = { "Оптімус": 5000, "Бамблбі": 2500, "Джаз": 3000 }
# Яка вага всіх трансформерів у словнику?
transformersWeight = { "Оптімус": 5000, "Бамблбі": 2500, "Джаз": 3000 }
total_weight = 0
for value in transformersWeight.values():
total_weight+=value
print("Total weight: ",total_weight)
|
"""
Enables programmatic accessing of most recent docker images
"""
import pkg_resources
import armory
USER = "twosixarmory"
TAG = armory.__version__
TF1 = f"{USER}/tf1:{TAG}"
TF2 = f"{USER}/tf2:{TAG}"
PYTORCH = f"{USER}/pytorch:{TAG}"
ALL = (
TF1,
TF2,
PYTORCH,
)
ARMORY_BASE = f"{USER}/armory:{TAG}"
TF1_BASE = f"{USER}/tf1-base:{TAG}"
TF2_BASE = f"{USER}/tf2-base:{TAG}"
PYTORCH_BASE = f"{USER}/pytorch-base:{TAG}"
BASES = (
ARMORY_BASE,
TF1_BASE,
TF2_BASE,
PYTORCH_BASE,
)
REPOSITORIES = tuple(x.split(":")[0] for x in (ALL + BASES))
def parse_version(tag):
"""
Return PEP 440 version for given version tag
"""
if not isinstance(tag, str):
raise ValueError(f"tag is a {type(tag)}, not a str")
if tag.endswith(armory.DEV):
numeric_tag = tag[: -len(armory.DEV)]
else:
numeric_tag = tag
if len(numeric_tag.split(".")) != 3:
raise ValueError(f"tag {tag} must be of form 'major.minor.patch[-dev]'")
version = pkg_resources.parse_version(tag)
if not isinstance(version, pkg_resources.extern.packaging.version.Version):
raise ValueError(f"tag {tag} parses to type {type(version)}, not Version")
return version
VERSION = parse_version(armory.__version__)
def is_old(tag: str):
"""
Return True if tag is an old armory container, False otherwise
If current version is dev, only returns True for old "-dev" containers.
"""
if not isinstance(tag, str):
raise ValueError(f"tag must be of type str, not type {type(tag)}")
if tag in ALL:
return False
tokens = tag.split(":")
if len(tokens) != 2:
return False
repo, tag = tokens
if repo in REPOSITORIES:
try:
other = parse_version(tag)
if other < VERSION:
# return True if both prerelease or both not prerelease
return not (other.is_prerelease ^ VERSION.is_prerelease)
except (AttributeError, ValueError):
# Catch empty tag and tag parsing errors
pass
return False
|
#! Assignment 2
#! Previously in 1_notmnist.ipynb,
#! we created a pickle with formatted datasets for training, development and testing on the notMNIST dataset.
#! The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
#! These are all the modules we'll be using later.
#! Make sure you can import them before proceeding further.
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
#! First reload the data we generated in `1_notmnist.ipynb`.
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
#! Reformat into a shape that's more adapted to the models we're going to train:
#! * data as a flat matrix,
#! * labels as float 1-hot encodings.
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
#! We're first going to train a multinomial logitsic regression using simple gradient descent.
#! TensorFlow works like this:
#! * First you describe the computation that you want to see performed: what the inputs, the variables,
#! and the operations look like. These get created as nodes over a computation graph.
#! This description is all contained within the block below:
#! with graph.as_default():
#! ...
#! * Then you can run the operations on this graph as many times as you want by calling session.run(),
#! providing it outputs to fetch from the graph that get returned.
#! This runtime operation is all contained in the block below:
#! with tf.Session(graph=graph) as session:
#! ...
#! Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
#! With gradient descent training, even this much data is prohibitive.
#! Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
#! Input data.
#! Load the training, validation and test data into constants that are
#! attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
#! Variables.
#! These are the parameters that we are going to be training.
#! The weight matrix will be initialized using random values following a (truncated) normal distribution.
#! The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
#! Training computation.
#! We multiply the inputs with the weight matrix, and add biases. We compute
#! the softmax and cross-entropy (it's one operation in TensorFlow, because
#! it's very common, and it can be optimized). We take the average of this
#! cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
#! Optimizer.
#! We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
#! Predictions for the training, validation, and test data.
#! These are not part of training,
#! but merely here so that we can report accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
#! Let's run this computation and iterate:
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
#! This is a one-time operation which ensures the parameters get initialized as
#! we described in the graph: random weights for the matrix, zeros for the
#! biases.
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
#! Run the computations. We tell .run() that we want to run the optimizer,
#! and get the loss value and the training predictions returned as numpy
#! arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
#! Calling .eval() on valid_prediction is basically like calling run(), but
#! just to get that one numpy array. Note that it recomputes all its graph
#! dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
#! Let's now switch to stochastic gradient descent training instead, which is much faster.
#! The graph will be similar, except that instead of holding all the training data into a constant node,
#! we create a Placeholder node which will be fed actual data at every call of session.run().
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
#!Let's run it:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
#! Pick an offset within the training data, which has been randomized.
#! Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
#! Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
#! Prepare a dictionary telling the session where to feed the minibatch.
#! The key of the dictionary is the placeholder node of the graph to be fed,
#! and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#! Problem
#! Turn the logitsic regression example with SGD into a 1-hidden layer neural network
#! with rectified linear units nn.relu() and 1024 hidden nodes.
#! This model should improve your validation / test accuracy.
batch_size = 128
hidden_layer_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the trainlogitsing data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights_1 = tf.Variable(
tf.truncated_normal([image_size * image_size, hidden_layer_nodes]))
biases_1 = tf.Variable(tf.zeros([hidden_layer_nodes]))
weights_2 = tf.Variable(
tf.truncated_normal([hidden_layer_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits_1 = tf.matmul(tf_train_dataset, weights_1) + biases_1
hidden_1 = tf.nn.relu(logits_1)
logits_2 = tf.matmul(hidden_1,weights_2)+biases_2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits_2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits_2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset,weights_1)+biases_1),weights_2)+biases_2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset,weights_1)+biases_1),weights_2)+biases_2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized, Using Neural Network")
for step in range(num_steps):
#! Pick an offset within the training data, which has been randomized.
#! Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
#! Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
#! Prepare a dictionary telling the session where to feed the minibatch.
#! The key of the dictionary is the placeholder node of the graph to be fed,
#! and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.