content
stringlengths 5
1.05M
|
|---|
import config
from app import make_app
import pytest
from sqlalchemy import create_engine, MetaData
@pytest.yield_fixture
def app():
create_db(config.SQLA_URI)
autoapi_app = make_app()
yield autoapi_app
drop_db(config.SQLA_URI)
def create_db(sqlalchemy_uri):
CREATE_TABLE_SQL = """
CREATE TABLE people (
id int primary key,
name text,
dob date,
number_of_pets int)
"""
INSERT_TABLE_SQL = """
INSERT INTO people (id, name, dob, number_of_pets)
VALUES (?, ?, ?, ?)
"""
engine = create_engine(config.SQLA_URI)
connection = engine.connect()
connection.execute(CREATE_TABLE_SQL)
connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0)
connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3)
connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2)
def drop_db(sqlalchemy_uri):
engine = create_engine(config.SQLA_URI)
meta = MetaData(bind=engine)
meta.reflect()
meta.drop_all()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-23 15:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Rule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=32)),
('slug', models.SlugField(max_length=128)),
('mode', models.IntegerField(choices=[(0, 'darf nicht'), (1, 'kann'), (2, 'muss')])),
('description', models.CharField(max_length=2048)),
('rule', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Sentence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=2048)),
('comma_select', models.CommaSeparatedIntegerField(default='0', max_length=255)),
('total_submits', models.IntegerField(default='0', max_length=25)),
],
),
migrations.CreateModel(
name='SentenceRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.IntegerField()),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trainer.Rule')),
('sentence', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trainer.Sentence')),
],
),
migrations.CreateModel(
name='Solution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=255)),
('solution', models.BigIntegerField()),
('sentence', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trainer.Sentence')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=255)),
('user_rank', models.IntegerField(choices=[(0, 'Kommachaot'), (1, 'Kommakönner'), (2, 'Kommakommandant'), (3, 'Kommakönig')], default=0)),
('comma_type_false', models.CharField(default='A1:0/0, A2:0/0, A3:0/0, A4:0/0, B1.1:0/0, B1.2:0/0, B1.3:0/0, B1.4.1:0/0, B1.4.2:0/0, B1.5:0/0, B2.1:0/0, B2.2:0/0, B2.3:0/0, B2.4.1:0/0, B2.4.2:0/0, B2.5:0/0, C1:0/0, C2:0/0, C3.1:0/0, C3.2:0/0, C4.1:0/0, C4.2:0/0, C5:0/0, C6.1:0/0, C6.2:0/0, C6.3.1:0/0, C6.3.2:0/0, C6.4:0/0, C7:0/0, C8:0/0, D1:0/0, D2:0/0, D3:0/0, E1:0/0', max_length=400)),
],
),
migrations.AddField(
model_name='sentence',
name='rules',
field=models.ManyToManyField(through='trainer.SentenceRule', to='trainer.Rule'),
),
]
|
output_file = None
indent = 0
assign_operators = ['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '^=', '|=']
temp_var_count = 0
operator_mappings = {
'!' : 'not',
'||' : 'or',
'&&' : 'and',
}
internal_functions = ['printf', ]
def generate_unique_tempname():
global temp_var_count
temp_var_count += 1
return 'temp_var_%d' % temp_var_count
def print_code(*args):
global output_file
if output_file is None:
print(*args, end='')
else:
print(*args, end='', file=output_file)
def print_operator(op):
real_op = operator_mappings.get(op)
if real_op is not None:
print_code(real_op)
else:
print_code(op)
def type_checking(data_type, value_node):
if data_type in ['int', 'char', 'short', 'long']:
if value_node.data_type in ['float', 'double']:
raise TypeError("%s type cannot be assigned to a %s type." \
% (value_node.data_type, data_type))
else:
value_node.data_type = 'float'
if value_node.value:
value_node.value = float(value_node.value)
class BaseNode(object):
def __init__(self):
pass
def traverse(self):
for key in self.__dict__.keys():
if isinstance(self.__dict__[key], BaseNode):
self.__dict__[key].traverse()
def generate_code(self):
return None
def is_leaf(self):
is_leaf = isinstance(self, ConstantNode) \
or isinstance(self, StringLiteralNode) \
or isinstance(self, IdentifierNode)
return is_leaf
class ConstantNode(BaseNode):
def __init__(self, value):
self.value = value
if isinstance(self.value, int):
self.data_type = 'int'
else:
self.data_type = 'float'
def generate_code(self, table=None):
print_code(self.value)
class StringLiteralNode(BaseNode):
def __init__(self, value):
if value[-3:-1] != '\\0':
self.value = "%s\\0%s" % (value[0:-1], value[-1:])
else:
self.value = value
self.data_type = 'str'
def generate_code(self, table=None):
print_code(self.value)
class IdentifierNode(BaseNode):
def __init__(self, item):
self.value = None
self.item = item
self.data_type = None
def generate_code(self, table=None):
if self.item['data_type'] == 'char' and len(self.item['array_size']) == 0:
print_code('ord(%s)' % self.item['actual_name'])
else:
print_code(self.item['actual_name'])
class ArrayNode(BaseNode):
def __init__(self, item, bias):
self.item = item
self.data_type = item.data_type
self.value = None
self.bias = bias
def generate_code(self, table=None):
pos = self
while isinstance(pos, ArrayNode):
pos = pos.item
item = pos.item
pos = self
flag = True
factor = 1
flattened_bias = 0
i = -1
while isinstance(pos, ArrayNode):
if flag:
if pos.bias.value is not None:
flattened_bias += factor * pos.bias.value
else:
flag = False
last_temp_name = str(flattened_bias)
if flag == False:
temp_name_middle = generate_unique_tempname()
temp_name = generate_unique_tempname()
if pos.bias.is_leaf():
print_code(' ' * indent)
print_code('%s = %d * ' % (temp_name_middle, factor))
pos.bias.generate_code()
print_code('\n')
else:
temp_name_expr = pos.bias.generate_code()
print_code(' ' * indent)
print_code('%s = %d * %s\n' % (temp_name_middle, factor, temp_name_expr))
print_code(' ' * indent)
print_code('%s = %s + %s\n' % (temp_name, temp_name_middle, last_temp_name))
last_temp_name = temp_name
factor *= item['array_size'][i]
i -= 1
pos = pos.item
if flag:
final_name = '%s[%d]' % (item['actual_name'], flattened_bias)
else:
final_name = '%s[%s]' % (item['actual_name'], last_temp_name)
if item['data_type'] == 'char':
return 'ord(%s)' % final_name
return final_name
class FunctionCallNode(BaseNode):
def __init__(self, func, argument_list):
self.func = func
self.argument_list = argument_list
self.data_type = self.func.item['return_type']
self.value = None
if self.func.item['actual_name'] in internal_functions:
return
args_type = []
pos = self.argument_list
while isinstance(pos, ArgumentListNode):
args_type.append(pos.next_arg.data_type)
pos = pos.previous_args
if pos:
args_type.append(pos.data_type)
args_type.reverse()
required_len = len(self.func.item['param_type_list'])
actual_len = len(args_type)
if required_len != actual_len:
#return
raise SyntaxError('Function %s requires %d arguments but %d is given.' \
% (self.func.item['actual_name'], required_len, actual_len))
for i in range(required_len):
type_1 = self.func.item['param_type_list'][i]
type_2 = args_type[i]
if type_1 == 'int' and type_2 == 'float':
raise SyntaxError('Function %s requires %s type for %dth argument, but %s type is given.'\
% (self.func.item['actual_name'], type_1, i, type_2))
def generate_code(self, table=None):
temp_args = []
pos = self.argument_list
# calculate values of argument expressions(if necessary)
while isinstance(pos, ArgumentListNode):
if pos.next_arg.is_leaf():
temp_args.append(pos.next_arg)
elif pos.next_arg.value is not None:
temp_args.append(pos.next_arg.value)
else:
temp_args.append(pos.next_arg.generate_code())
pos = pos.previous_args
if pos is None:
pass
elif pos.is_leaf():
temp_args.append(pos)
elif pos.value is not None:
temp_args.append(pos.value)
else:
temp_args.append(pos.generate_code())
temp_args.reverse()
if self.func.item['actual_name'] == 'printf':
return self.print_printf_function(temp_args)
# print code for function names
temp_name = generate_unique_tempname()
print_code(' ' * indent)
print_code("%s = %s" % (temp_name, self.func.item['actual_name']))
print_code('(')
# print code for arguments
for i in range(len(temp_args)):
arg = temp_args[i]
if isinstance(arg, BaseNode):
arg.generate_code()
else:
print_code(arg)
if i < len(temp_args) - 1:
print_code(', ')
print_code(')\n')
return temp_name
def print_printf_function(self, args):
temp_name = generate_unique_tempname()
print_code(' ' * indent)
print_code("%s = %s" % (temp_name, 'print'))
print_code('(')
for i in range(len(args)):
arg = args[i]
if isinstance(arg, BaseNode):
arg.generate_code()
else:
print_code(arg)
if i == 0:
print_code(' % (')
elif i < len(args) - 1:
print_code(', ')
else:
print_code(')')
print_code(')\n')
return temp_name
class ArgumentListNode(BaseNode):
def __init__(self, previous_args, next_arg):
self.previous_args = previous_args
self.next_arg = next_arg
class ExpressionNode(BaseNode):
@classmethod
def cal_unary_expression(cls, op, operator):
if operator == '+':
return op
elif operator == '-':
return -op
elif operator == '!':
return (not op)
else:
raise NotImplementedError('Unary operator %s is not implemented.' % operator)
@classmethod
def cal_binary_expression(cls, op1, operator, op2):
if operator == '||':
return int(op1 or op2)
elif operator == '&&':
return int(op1 and op2)
elif operator == '|':
return op1 | op2
elif operator == '^':
return op1 ^ op2
elif operator == '&':
return op1 & op2
elif operator == '==':
return int(op1 == op2)
elif operator == '!=':
return int(op1 != op2)
elif operator == '<':
return int(op1 < op2)
elif operator == '>':
return int(op1 > op2)
elif operator == '<=':
return int(op1 <= op2)
elif operator == '>=':
return int(op1 >= op2)
elif operator == '<<':
return op1 << op2
elif operator == '>>':
return op1 >> op2
elif operator == '+':
return op1 + op2
elif operator == '-':
return op1 - op2
elif operator == '*':
return op1 * op2
elif operator == '/':
# TODO: type checking
return op1 / op2
elif operator == '%':
return op1 % op2
def __init__(self, op1, operator, op2):
self.op1 = op1
self.op2 = op2
self.operator = operator
# TODO: type checking
self.data_type = op1.data_type
# TODO: type checking
self.value = None
if op2 is None:
if op1.value is not None:
self.value = self.cal_unary_expression(op1.value, operator)
else:
if op1.value is not None and op2.value is not None:
self.value = self.cal_binary_expression(op1.value, operator, op2.value)
def generate_code(self, table=None):
new_symbol_name = None
if self.op2 is not None:
is_leaf_1 = self.op1.is_leaf()
is_leaf_2 = self.op2.is_leaf()
# For complex expressions, deal with sub-expressions first.
if is_leaf_1 == False and self.op1.value is None:
temp_op1 = self.op1.generate_code()
if is_leaf_2 == False and self.op2.value is None:
temp_op2 = self.op2.generate_code()
print_code(' ' * indent)
if self.operator not in assign_operators:
new_symbol_name = generate_unique_tempname()
print_code('%s = ' % new_symbol_name)
if is_leaf_1:
self.op1.generate_code()
elif self.op1.value is not None:
print_code(self.op1.value)
else:
print_code(temp_op1)
print_code(' ')
print_operator(self.operator)
print_code(' ')
if is_leaf_2:
self.op2.generate_code()
elif self.op2.value is not None:
print_code(self.op2.value)
else:
print_code(temp_op2)
print_code('\n')
if self.operator not in assign_operators:
return new_symbol_name
else:
is_leaf_1 = self.op1.is_leaf()
if is_leaf_1 == False and self.op1.value is None:
temp_op1 = self.op1.generate_code()
print_code(' ' * indent)
new_symbol_name = generate_unique_tempname()
print_code('%s = ' % new_symbol_name)
print_operator(self.operator)
if is_leaf_1:
self.op1.generate_code()
elif self.op1.value is not None:
print_code(self.op1.value)
else:
print_code(temp_op1)
print_code('\n')
return new_symbol_name
class ExpressionListNode(BaseNode):
def __init__(self, previous_exprs, next_expr):
self.previous_exprs = previous_exprs
self.next_expr = next_expr
class DeclarationNode(BaseNode):
def __init__(self, data_type, init_declarator_list):
self.data_type = data_type
self.init_declarator_list = init_declarator_list
def add_into_table(self, table):
self.init_declarator_list.add_into_table(self.data_type, table)
def generate_code(self, table=None):
self.init_declarator_list.generate_code()
class DeclarationListNode(BaseNode):
def __init__(self, previous_declarations, next_declaration):
self.previous_declarations = previous_declarations
self.next_declaration = next_declaration
def generate_code(self, table=None):
self.previous_declarations.generate_code()
self.next_declaration.generate_code()
class InitDeclaratorListNode(BaseNode):
def __init__(self, previous_declarators, next_declarator):
self.previous_declarators = previous_declarators
self.next_declarator = next_declarator
def add_into_table(self, data_type, table):
pos = self
while isinstance(pos, InitDeclaratorListNode):
pos.next_declarator.add_into_table(data_type, table)
pos = pos.previous_declarators
pos.add_into_table(data_type, table)
def generate_code(self, table=None):
pos = self
self.previous_declarators.generate_code()
self.next_declarator.generate_code()
class InitDeclaratorNode(BaseNode):
def __init__(self, declarator, initializer):
self.declarator = declarator
self.initializer = initializer
def add_into_table(self, data_type, table):
pos = self.declarator
array_size = list()
while isinstance(pos, DeclaratorArrayNode):
array_size.append(pos.constant_expression.value)
pos = pos.declarator
array_size.reverse()
name = pos.item
# Type checking
if self.initializer:
type_checking(data_type, self.initializer)
pos.item = table.insert(name, data_type, array_size)
def generate_code(self, table=None):
array_size = list()
item = None
if isinstance(self.declarator, DeclaratorArrayNode):
(item, array_size) = self.declarator.array_meta
else:
item = self.declarator.item
if len(array_size) > 0:
flattened_array_size = 1
for n in array_size:
flattened_array_size *= n
print_code(' ' * indent)
if isinstance(self.initializer, StringLiteralNode):
print_code("%s = " % item['actual_name'])
self.initializer.generate_code()
print_code("\n")
else:
print_code("%s = [0] * %d\n" % (item['actual_name'], flattened_array_size))
else:
if self.initializer is not None:
if self.initializer.is_leaf():
print_code(' ' * indent)
print_code("%s = " % item['actual_name'])
self.initializer.generate_code()
print_code('\n')
else:
temp_ini = self.initializer.generate_code()
print_code(' ' * indent)
print_code("%s = %s\n" % (item['actual_name'], temp_ini))
else:
print_code(' ' * indent)
print_code("%s = None\n" % item['actual_name'])
class DeclaratorFunctionNode(BaseNode):
def __init__(self, declarator, param_type_list):
self.declarator = declarator
self.param_type_list = param_type_list
def generate_code(self, table=None):
global indent
print_code(self.declarator.item['actual_name'])
print_code("(")
if self.param_type_list is not None:
self.param_type_list.generate_code()
print_code(")")
class DeclaratorArrayNode(BaseNode):
def __init__(self, declarator, constant_expression):
self.declarator = declarator
self.constant_expression = constant_expression
@property
def array_meta(self):
pos = self
array_size = list()
while isinstance(pos, DeclaratorArrayNode):
array_size.append(pos.constant_expression.value)
pos = pos.declarator
array_size.reverse()
return (pos.item, array_size)
class ParameterTypeListNode(BaseNode):
def __init__(self, previous_declarations, next_declaration):
self.previous_declarations = previous_declarations
self.next_declaration = next_declaration
def generate_code(self, table=None):
self.previous_declarations.generate_code()
print_code(", ")
self.next_declaration.generate_code()
class ParameterDeclarationNode(BaseNode):
def __init__(self, data_type, declarator):
self.data_type = data_type
self.declarator = declarator
def add_into_table(self, table):
pos = self.declarator
array_size = list()
while isinstance(pos, DeclaratorArrayNode):
array_size.append(pos.constant_expression.value)
pos = pos.declarator
array_size.reverse()
name = pos.item
pos.item = table.insert(name, self.data_type, array_size)
def generate_code(self, table=None):
pos = self.declarator
while isinstance(pos, DeclaratorArrayNode):
pos = pos.declarator
print_code(pos.item['actual_name'])
class InitializerNode(BaseNode):
def __init__(self, initializer_list):
self.initializer_list = initializer_list
class IntializerListNode(BaseNode):
def __init__(self, previous_initializers, next_initializer):
self.previous_initializers = previous_initializers
self.next_initializer = next_initializer
class CompoundStatementNode(BaseNode):
def __init__(self, declaration_list, statement_list):
self.declaration_list = declaration_list
self.statement_list = statement_list
def generate_code(self, table=None):
if table is not None:
symbol_count = len(table.current_table.items)
#print(table.current_table.items)
for key in table.current_table.items:
print_code(' ' * indent)
print_code('global %s\n' % table.current_table.items[key]['actual_name'])
if self.declaration_list is not None:
self.declaration_list.generate_code()
if self.statement_list is not None:
self.statement_list.generate_code()
if self.declaration_list is None and self.statement_list is None:
if table is None :
print_code(' ' * indent)
print_code('pass\n')
elif symbol_count == 0:
print_code(' ' * indent)
print_code('pass\n')
class StatementListNode(BaseNode):
def __init__(self, previous_statements, next_statement):
self.previous_statements = previous_statements
self.next_statement = next_statement
def generate_code(self, table=None):
self.previous_statements.generate_code()
self.next_statement.generate_code()
class ExpressionStatementNode(BaseNode):
def __init__(self, expression):
self.expression = expression
def generate_code(self, table=None):
self.expression.generate_code()
class SelectionStatementNode(BaseNode):
def __init__(self, condition, true_statement, false_statement):
self.condition = condition
self.true_statement = true_statement
self.false_statement = false_statement
def generate_code(self, table=None):
global indent
if self.condition.is_leaf() == False:
temp_cond = self.condition.generate_code()
print_code(' ' * indent)
print_code('if ')
if self.condition.is_leaf():
self.condition.generate_code()
else:
print_code(temp_cond)
print_code(':\n')
indent += 4
self.true_statement.generate_code()
indent -= 4
if self.false_statement is not None:
print_code(' ' * indent)
print_code('else:\n')
indent += 4
self.false_statement.generate_code()
indent -= 4
class IterationStatementNode(BaseNode):
def __init__(self, condition, statement):
self.condition = condition
self.statement = statement
def generate_code(self, table=None):
global indent
print_code(' ' * indent)
print_code('while ')
if self.condition.is_leaf():
self.condition.generate_code()
else:
print_code('True')
print_code(':\n')
indent += 4
if self.condition.is_leaf() == False:
temp_cond = self.condition.generate_code()
print_code(' ' * indent)
print_code('if not %s:\n' % temp_cond)
print_code(' ' * (indent + 4))
print_code('break\n')
self.statement.generate_code()
indent -= 4
class JumpStatementNode(BaseNode):
def __init__(self, jump_type, expression):
self.jump_type = jump_type
self.expression = expression
def generate_code(self, table=None):
if self.jump_type == 'continue' or self.jump_type == 'break':
print_code(' ' * indent)
print_code(self.jump_type)
print_code('\n')
elif self.jump_type == 'return':
if self.expression is None:
print_code(' ' * indent)
print_code("return\n")
else:
if self.expression.is_leaf():
print_code(' ' * indent)
print_code("return ")
self.expression.generate_code()
print_code("\n")
else:
ret_val = self.expression.generate_code()
print_code(' ' * indent)
print_code("return %s\n" % ret_val)
class TranslationUnitNode(BaseNode):
def __init__(self, previous_units, next_unit):
self.previous_units = previous_units
self.next_unit = next_unit
def generate_code(self, table):
self.previous_units.generate_code(table)
self.next_unit.generate_code(table)
class FunctionDefinition(BaseNode):
def __init__(self, return_type, declarator, statements):
self.return_type = return_type
self.declarator = declarator
self.statements = statements
def generate_code(self, table):
global indent
print_code("def ")
self.declarator.generate_code()
print_code(":\n")
indent += 4
self.statements.generate_code(table)
print_code("\n")
indent -= 4
|
# -*- coding: utf-8 -*-
import json
import logging
from beer_garden.api.http.authorization import authenticated, Permissions
from beer_garden.api.http.base_handler import BaseHandler
class PermissionsAPI(BaseHandler):
logger = logging.getLogger(__name__)
@authenticated(permissions=[Permissions.LOCAL_ADMIN])
def get(self):
"""
---
summary: Retrieve all Permissions
responses:
200:
description: All Permissions
schema:
type: array
items:
type: string
50x:
$ref: '#/definitions/50xError'
tags:
- Permissions
"""
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(json.dumps(sorted(Permissions.values)))
|
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--librispeech_path",
type=pathlib.Path,
required=True,
help="Path to LibriSpeech datasets.",
)
args = parser.parse_args()
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url="test-clean")
decoder = EMFORMER_RNNT_BASE_LIBRISPEECH.get_decoder()
token_processor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_token_processor()
feature_extractor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_feature_extractor()
streaming_feature_extractor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_streaming_feature_extractor()
hop_length = EMFORMER_RNNT_BASE_LIBRISPEECH.hop_length
num_samples_segment = EMFORMER_RNNT_BASE_LIBRISPEECH.segment_length * hop_length
num_samples_segment_right_context = (
num_samples_segment + EMFORMER_RNNT_BASE_LIBRISPEECH.right_context_length * hop_length
)
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis.tokens)
if transcript:
print(transcript, end=" ", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0].tokens))
print()
if __name__ == "__main__":
cli_main()
|
# Python - 2.7.6
Test.describe('Basic tests')
Test.assert_equals(year_days(0), '0 has 366 days')
Test.assert_equals(year_days(-64), '-64 has 366 days')
Test.assert_equals(year_days(2016), '2016 has 366 days')
Test.assert_equals(year_days(1974), '1974 has 365 days')
Test.assert_equals(year_days(-10), '-10 has 365 days')
Test.assert_equals(year_days(666), '666 has 365 days')
Test.assert_equals(year_days(1857), '1857 has 365 days')
Test.assert_equals(year_days(2000), '2000 has 366 days')
Test.assert_equals(year_days(-300), '-300 has 365 days')
Test.assert_equals(year_days(-1), '-1 has 365 days')
|
#!/usr/bin/env python
# coding:utf-8
import math
import logging
import torch
from torch.optim import Optimizer
from torch.optim import lr_scheduler as lrs
class LRSchedulerWorker(object):
def __init__(self, log_type, logger=None):
super(LRSchedulerWorker, self).__init__()
self.log_level = log_type
self.logger = logger
self._build_map()
def __call__(self, optimizer, scheduler_type, scheduler_setting, scheduler_state=None, **other_params):
scheduler = self.init_scheduler(
scheduler_type, optimizer, group_num=1, **scheduler_setting)
scheduler = self.load_state(scheduler=scheduler, scheduler_state=scheduler_state)
return scheduler
def _build_map(self):
self.name2scheduler = {
'constant': keep_constant,
'power': reduce_with_power,
'plateau': reduce_on_plateau,
'improve': ReduceByImprove,
'cosine': reduce_cosine_annealing,
'cosine_restart': reduce_cosine_annealing_warm_restart
}
self.name2default_setting = {
'constant': {},
'power': {'sleep_epochs': 25, 'reduce_factor': 0.5},
'plateau': {'mode': 'min', 'patience': 3, 'reduce_factor': 0.5, 'cooldown': 3, 'min_lr': 1e-5},
'improve': {'factor': 0.5, 'patience': 3},
'cosine': {'t_max': 4, 'min_lr': 1e-5, 'last_epoch': -1},
'cosine_restart': {'t0': 20, 't_mult': 2, 'min_lr': 1e-5}
}
def init_scheduler(self, scheduler_type, optimizer, group_num=1, **scheduler_setting):
write_log(content='Using scheduler: {}'.format(scheduler_type), level=self.log_level, logger=self.logger)
default_setting = self.name2default_setting[scheduler_type]
if group_num == 1:
default_setting.update(scheduler_setting)
scheduler = self.name2scheduler[scheduler_type](
**{**{'optimizer': optimizer}, **default_setting})
else:
raise NotImplementedError('cannot support more than 1 params group')
return scheduler
def load_state(self, scheduler, scheduler_state=None):
if not scheduler_state:
write_log(content='No exist scheduler state', level=self.log_level, logger=self.logger)
elif isinstance(scheduler_state, (dict, str, tuple, list)):
if isinstance(scheduler_state, str):
already_optimizer_state = torch.load(scheduler_state, map_location=lambda storage, loc: storage)
write_log(content='Loading exist scheduler state from {}'.format(scheduler_state),
level=self.log_level, logger=self.logger)
elif isinstance(scheduler_state, (tuple, list)):
already_optimizer_state = torch.load(
scheduler_state[0], map_location=lambda storage, loc: storage)[scheduler_state[1]]
write_log(content='Loading exist scheduler state from {}'.format('.'.join(scheduler_state)),
level=self.log_level, logger=self.logger)
else:
already_optimizer_state = scheduler_state
write_log(content='Loading exist scheduler state', level=self.log_level, logger=self.logger)
scheduler.load_state_dict(already_optimizer_state)
else:
raise ValueError('unknown scheduler state')
return scheduler
def keep_constant(optimizer, **params):
def lr_constant(epoch):
return 1.
scheduler = lrs.LambdaLR(
optimizer,
lr_lambda=lr_constant,
last_epoch=-1)
return scheduler
def reduce_with_power(optimizer, sleep_epochs, reduce_factor, **params):
def lr_power_epoch(epoch):
if epoch >= sleep_epochs:
factor = math.pow(0.5, (epoch - sleep_epochs + 1) * reduce_factor)
else:
factor = 1.
return factor
scheduler = lrs.LambdaLR(
optimizer,
lr_lambda=lr_power_epoch,
last_epoch=-1)
return scheduler
def reduce_on_plateau(optimizer, mode, patience, reduce_factor, cooldown, min_lr, **params):
scheduler = lrs.ReduceLROnPlateau(
optimizer, mode=mode, factor=reduce_factor, patience=patience, verbose=False, threshold=0.0001,
threshold_mode='rel', cooldown=cooldown, min_lr=min_lr, eps=1e-08)
return scheduler
def reduce_cosine_annealing(optimizer, t_max, min_lr, last_epoch, **other_params):
scheduler = lrs.CosineAnnealingLR(optimizer=optimizer, T_max=t_max, eta_min=min_lr, last_epoch=last_epoch)
return scheduler
def reduce_cosine_annealing_warm_restart(optimizer, t0, t_mult, min_lr, **other_params):
scheduler = lrs.CosineAnnealingWarmRestarts(optimizer, T_0=t0, T_mult=t_mult, eta_min=min_lr, last_epoch=-1)
return scheduler
class ReduceByImprove(object):
def __init__(self, optimizer, factor=0.1, patience=10, **params):
super(ReduceByImprove, self).__init__()
assert factor < 1.0, 'Factor should be < 1.0.'
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
self.patience = patience
def step(self, no_improve):
if no_improve >= self.patience:
self._reduce_lr()
def _reduce_lr(self):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = old_lr * self.factor
param_group['lr'] = new_lr
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def write_log(content, logger=None, level=None, **other_params):
"""
write log
:param content: content need to write
:param level: level of content or None
:param logger: False or logger
:param other_params: reserved interface
:return: None
"""
if not logger:
pass
elif logger == 'print':
print(content)
elif isinstance(logger, logging.Logger):
if not level:
pass
else:
assert level in ['debug', 'info', 'warning', 'error', 'critical'], 'unknown level'
getattr(logger, level)(content)
else:
raise NotImplementedError('unknown logger')
return None
|
print('Accessing private members in Class:')
print('-'*35)
class Human():
# Private var
__privateVar = "this is __private variable"
# Constructor method
def __init__(self):
self.className = "Human class constructor"
self.__privateVar = "this is redefined __private variable"
# Public method
def showName(self, name):
self.name = name
return self.__privateVar + " with name: " + name
# Private method
def __privateMethod(self):
return "Private method"
def _protectedMethod(self):
return 'Protected Method'
# Public method that returns a private variable
def showPrivate(self):
return self.__privateMethod()
def showProtecded(self):
return self._protectedMethod()
class Male(Human):
def showClassName(self):
return "Male"
def showPrivate(self):
return self.__privateMethod()
def showProtected(self):
return self._protectedMethod()
class Female(Human):
def showClassName(self):
return "Female"
def showPrivate(self):
return self.__privateMethod()
human = Human()
print(f'\nCalling the: {human.className} from the Human class.')
print(f'\nAccessing the public method of Human class: {human.showName("Ling-Ling")}')
print(f'\nAccessing the private method of the Human class: {human.showPrivate()}, from Human Class.')
# print(f'Acessing the protected Method of the Human Class : {human.showProtected()},from Human Class.') -->AttributeError:'Human' object has no attribute 'showProtected'
male = Male()
print(f'\ncalling the {male.className} from the Male class')
print(f'\nAccessing the Public method of Male class: {male.showClassName()}, from male class')
print(f'\nAccessing the protected method of Male class: {male.showProtected()}, from male class.')
# print(f'Accessing the private method of Male class: {male.Human__showPrivate()}, from male Class.') --> AttributeError: 'Male' object has no attribute '_Male__privateMethod'
female = Female()
print(f'\ncalling the {female.className} from the Female class')
print(f'\nAccessing the Public method of female class: {female.showClassName()}, from Female class')
# print(f'Accessing the protected method of female class: {female.showProtected()}, from Female class.') --> AttributeError: 'Female' object has no attribute 'showProtected'
# print(f'Accessing the private method of female class: {female.showPrivate()}, from Female Class.') AttributeError: 'Female' object has no attribute '_Female__privateMethod'
print('\n'+'-'*25+"Method 2 -- Accessing private members in Class"+'-'*25)
print('\n'+'Example: Public Attributes: ')
print('-'*20)
class Employee:
def __init__(self,name,sal):
self.name=name #Public attribute
self.salary=sal #Public attribute
e1=Employee('Ling1',30000)
print(f'Accessing the Public Attributes: {e1.name} : {e1.salary}')
# if attribute is public then the value can be modified too
e1.salary=40000
print(f'Accessing the Public Attributes after modifying: {e1.name} : {e1.salary}')
print('\n'+'Example: Protected Attributes: ')
'''Python's convention to make an instance variable protected is to add a prefix _ (single underscore) to it.
This effectively prevents it to be accessed, unless it is from within a sub-class.'''
print('-'*25)
class Employee:
def __init__(self,name,sal):
self._name=name #protected attribute
self._salary=sal #protected attribute
e2=Employee('Ling2',50000)
print(f'Accessing the Protected Attributes: {e2._name} : {e2._salary}')
#even if attribute is protected the value can be modified too
e2._salary=44000
print(f'Accessing the Protected Attributes after modifying: {e2._name} : {e2._salary}')
print('\n'+'Example: Private Attributes: ')
'''a double underscore __ prefixed to a variable makes it private.
It gives a strong suggestion not to touch it from outside the class.
Any attempt to do so will result in an AttributeError.'''
print('-'*25)
class Employee:
def __init__(self,name,sal):
self.__name=name # private attribute
self.__salary=sal # private attribute
e3=Employee('Ling3',60000)
# print(f'Accessing the Privated Attributes: {e3.__name} : {e3.__salary}') --> AttributeError: 'Employee' object has no attribute '__name
'''In order to access the attributes, Python performs name mangling of private variables.
Every member with double underscore will be changed to _object._class__variable.'''
print(f'Accessing the Private Attributes: {e3._Employee__name} : {e3._Employee__salary}')
#even if attribute is protected the value can be modified too
e3._Employee__salary=15000
print(f'Accessing the Protected Attributes after modifying: {e3._Employee__name} : {e3._Employee__salary}')
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 19:54:16 2020
@author: tobia
"""
import spacy
import pandas as pd
import numpy as np
import nltk
from nltk.stem import SnowballStemmer
from spacy.lang.es import Spanish
nltk.download('wordnet')
spacy.load("es_core_news_md")
nltk.download('stopwords')
#open csv file to construct the data
data = pd.read_csv('') #load network_data.csv
mom = list()
for time in data['retrieved']:
if time[38] == "1":
mom.append(2021)
elif time[38] == "4":
mom.append(2020)
else:
print("ERROR!")
data['mom'] = mom
data['subgraph'] = np.where(data['mom'] == 2021, data['subgraph'].astype(str) + "_2021", data['subgraph'].astype(str))
data = data.iloc[:,1:21]
prep_text = np.array(data['text'])
#correct for doubled exeptional threads
manual_stopwords = [ #append this list to take out all the stuff that we cannot process for the later topic modelling
">",
">>"
"negrin",
"negrines",
"negrín",
"negro",
"negros",
"anon",
"URL",
"number",
",",
".",
"?",
"¿",
"@",
"¡",
"!",
"(",
")",
"q",
"ser",
] #for the strings that start with digits in the beginning of every post
parser = Spanish()
stemmer = SnowballStemmer('spanish')
stop_list = nltk.corpus.stopwords.words('spanish')
[stop_list.append(word) for word in manual_stopwords]
es_stop = set(stop_list)
for index in (range(0,len(prep_text))):
prep_text[index] = str(prep_text[index]).replace('>', ' ')
prep_text[index] = str(prep_text[index]).replace('<', ' ')
prep_text[index] = str(prep_text[index]).replace('0', ' ')
prep_text[index] = str(prep_text[index]).replace('1', ' ')
prep_text[index] = str(prep_text[index]).replace('2', ' ')
prep_text[index] = str(prep_text[index]).replace('3', ' ')
prep_text[index] = str(prep_text[index]).replace('4', ' ')
prep_text[index] = str(prep_text[index]).replace('5', ' ')
prep_text[index] = str(prep_text[index]).replace('6', ' ')
prep_text[index] = str(prep_text[index]).replace('7', ' ')
prep_text[index] = str(prep_text[index]).replace('8', ' ')
prep_text[index] = str(prep_text[index]).replace('9', ' ')
prep_text[index] = str(prep_text[index]).replace('(OP)', ' ')
prep_text[index] = str(prep_text[index]).replace('/', ' ')
prep_text[index] = str(prep_text[index]).replace('\\', ' ')
prep_text[index] = str(prep_text[index]).replace('(', ' ')
prep_text[index] = str(prep_text[index]).replace(')', ' ')
prep_text[index] = str(prep_text[index]).replace('*', ' ')
prep_text[index] = str(prep_text[index]).replace('[', ' ')
prep_text[index] = str(prep_text[index]).replace(']', ' ')
prep_text[index] = str(prep_text[index]).replace(';', ' ')
prep_text[index] = str(prep_text[index]).replace('^', ' ')
prep_text[index] = str(prep_text[index]).replace('"', ' ')
prep_text[index] = str(prep_text[index]).replace('!', ' ')
prep_text[index] = str(prep_text[index]).replace('¡', ' ')
prep_text[index] = str(prep_text[index]).replace('?', ' ')
prep_text[index] = str(prep_text[index]).replace('¿', ' ')
prep_text[index] = str(prep_text[index]).replace('@', ' ')
prep_text[index] = str(prep_text[index]).replace('.', ' ')
prep_text[index] = str(prep_text[index]).replace('#', ' ')
prep_text[index] = str(prep_text[index]).replace('&', ' ')
prep_text[index] = str(prep_text[index]).replace('%', ' ')
prep_text[index] = str(prep_text[index]).replace('https:', ' ')
prep_text[index] = str(prep_text[index]).replace('http:', ' ')
prep_text[index] = str(prep_text[index]).replace(':', ' ')
prep_text[index] = str(prep_text[index]).replace('`', ' ')
prep_text[index] = str(prep_text[index]).replace('-', ' ')
prep_text[index] = str(prep_text[index]).replace('=', ' ')
prep_text[index] = str(prep_text[index]).replace('\'', ' ')
prep_text[index] = str(prep_text[index]).replace('xd', ' ')
print(str(index)+ ': nº of lines replaced')
#tokenizer and lemmatizer
def tokenize(text):
lda_tokens = list()
tokens = parser(text)
#### one possibility for adaptation is: [lda_tokens.append(token.lower_) for token in tokens if token.isalpha()] ########
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.isdigit():
lda_tokens.append('number')
else:
lda_tokens.append(token.lower_)
return lda_tokens
#a good idea could be a curseword stoplist
def prepare_text_for_lda(text):
lemmas = tokenize(text)
lemmas = [token for token in lemmas if token not in es_stop]
lemmas = [stemmer.stem(token) for token in lemmas]
return lemmas
#make the analyzable data strucutre
data['prep_text'] = prep_text
bodies = data['prep_text']
bodies = list(bodies)
text_data = list()
thread_len = list()
#tokenize the different bodies
counter = 1
for body in bodies:
#try to tokenize
try:
text_data.append(prepare_text_for_lda(body))
print(str(counter) + 'bodies tokenized and lemmatized')
#handles empty nodes
except TypeError:
text_data.append([])
print('empty text node')
print(str(counter) + 'bodies tokenized and lemmatized')
counter = counter + 1
#dictionary = corpora.Dictionary(text_data) #those couple of lines are going to be excluded in the final version
#corpus = [dictionary.doc2bow(text) for text in text_data] #we just keep them as reference until we have made the corpora per topic
#pickle.dump(corpus, open('C:\\Users\\tobia\\Desktop\\Working Copies\\Projects\\phd thesis\\code\\graphs\\corpus.pkl', 'wb'))
#pickle.dump(text_data, open('C:\\Users\\tobia\\Desktop\\Working Copies\\Projects\\phd thesis\\code\\graphs\\lemmatisedtext.pkl', 'wb'))
#dictionary.save('C:\\Users\\tobia\\Desktop\\Working Copies\\Projects\\phd thesis\\code\\graphs\\dictionary.gensim')
#set in a character for missing text
new_text_data = list()
for list_ in text_data:
if len(list_) == 0:
new_text_data.append(['?'])
elif list_ == ['nan']:
new_text_data.append(['?'])
elif len(list_) != 0:
new_text_data.append(list_)
#here we can add all the stuff that we will need to calculate later such as message lenght, mean centralities per topic and so on
#operate on the text_data array rathern than the text data dataframe column
data['text_data'] = new_text_data
filepath = ""
data.to_csv('', index = True) #create network_data_prep.csv
print("data saved. FINISH")
|
#!/usr/bin/env python3
"""
Author : shv <shv@email.arizona.edu>
Date : 2021-11-16
Purpose: Run length encoding
"""
import argparse
import os
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("str", metavar="str", help="DNA text or file")
args = parser.parse_args()
if os.path.isfile(args.str):
args.str = open(args.str).read()
return args
# --------------------------------------------------
def main():
"""Zoom"""
args = get_args()
for seq in args.str.splitlines():
print(rle(seq))
# --------------------------------------------------
def rle(seq: str) -> str:
"creating RLE"
encoded_dna = ""
i = 0
while i <= len(seq) - 1:
count = 1
ch = seq[i]
j = i
while j < len(seq) - 1:
if seq[j] == seq[j + 1]:
count = count + 1
j = j + 1
else:
break
encoded_dna = encoded_dna + ch + (str(count) if count > 1 else "")
i = j + 1
return encoded_dna
# -------------------------------------------------------
def test_rle():
"""Test rle"""
assert rle("A") == "A"
assert rle("ACGT") == "ACGT"
assert rle("AA") == "A2"
assert rle("AAAAA") == "A5"
assert rle("ACCGGGTTTT") == "AC2G3T4"
# --------------------------------------------------
if __name__ == "__main__":
main()
|
class StopRequest:
stop_id = 0
components = []
def __init__(self, stop_id, components=[]):
self.stop_id = stop_id
self.components = components
|
from typing import TYPE_CHECKING, Optional
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
if TYPE_CHECKING:
from requests import PreparedRequest # pylint: disable=ungrouped-imports
class TimeoutAdapter(HTTPAdapter):
""" Adds a default timeout to request Requests. """
DEFAULT_TIMEOUT = 10
def __init__(self, *args, **kwargs):
_timeout = kwargs.pop("timeout", None)
if _timeout is None:
self.timeout = self.DEFAULT_TIMEOUT
else:
self.timeout = _timeout
super().__init__(*args, **kwargs)
def __getstate__(self):
state = super().__getstate__()
state["timeout"] = self.timeout
return state
def send(
self, request: "PreparedRequest", *args, **kwargs
): # pragma: no cover # pylint: disable=signature-differs
kwargs.setdefault("timeout", self.timeout)
return super().send(request, *args, **kwargs)
retry_strategy = Retry(
total=3,
status_forcelist=[429, 500, 502, 503, 504],
backoff_factor=3,
)
timeout_retry_adapter = TimeoutAdapter(max_retries=retry_strategy)
DEFAULT_HEADERS = {"Accept": "application/json"}
def session_factory(session: Optional[Session] = None) -> Session:
""" Creates or modifies `requests.Session` by attaching a timeout adapter with a retry strategy and default headers. """
if session is None:
session = Session()
session.mount("https://", timeout_retry_adapter)
session.mount("http://", timeout_retry_adapter)
session.headers.update(DEFAULT_HEADERS)
return session
|
import tensorflow as tf
from tensorflow import keras
# # Helper libraries
import numpy as np
import math
import pathlib as path
from PIL import Image
import random
print(tf.__version__)
# Helper function to display digit images
MODEL_FILE = "mnist.h5"
def pre_process():
def _process(p):
return np.asfarray(Image.open(
p).resize((28, 28), Image.BICUBIC).convert('L').point(lambda x: 255 - x)) / 255
root_dir = path.Path("English")
train_dir = [i for i in root_dir.iterdir()
if i.name.startswith("Sample")]
test_dir = [i for i in root_dir.iterdir()
if i.name.startswith("Test")]
train_dir.sort()
test_dir.sort()
test_data = [[_process(str(j)) for j in i.iterdir()] for i in test_dir]
train_data = [[_process(str(j)) for j in i.iterdir()] for i in train_dir]
return train_data, test_data
def build():
train_data, test_data = pre_process()
train_labels = [i for i in range(len(train_data))
for j in train_data[i]]
test_labels = [i for i in range(len(test_data))
for j in test_data[i]]
train_data = [j for i in train_data for j in i]
test_data = [j for i in test_data for j in i]
test_dl = list(zip(test_data, test_labels))
train_dl = list(zip(train_data, train_labels))
random.shuffle(test_dl)
random.shuffle(train_dl)
test_data, test_labels = zip(*test_dl)
train_data, train_labels = zip(*train_dl)
test_data = np.asarray(test_data)
test_labels = np.asarray(test_labels)
train_data = np.asarray(train_data)
train_labels = np.asarray(train_labels)
# Define the model architecture
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
# keras.layers.Dense(128, activation=tf.nn.relu),
# Optional: You can replace the dense layer above with the convolution layers below to get higher accuracy.
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=32, kernel_size=(3, 3),
activation=tf.nn.relu),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3),
activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.25),
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the digit classification model
model.fit(train_data, train_labels, epochs=5)
# Evaluate the model using test dataset.
test_loss, test_acc = model.evaluate(test_data, test_labels)
print('Test accuracy:', test_acc)
# Predict the labels of digit images in our test dataset.
# predictions = model.predict(test_data)
# Then plot the first 25 test images and their predicted labels.
# show_sample(test_images,
# ['Predicted: %d' % np.argmax(result) for result in predictions])
model.save(MODEL_FILE)
def recognize(images) -> int:
if not path.Path(MODEL_FILE).exists():
build()
model = keras.models.load_model(MODEL_FILE)
predictions = model.predict(images)
return [np.argmax(result) for result in predictions]
class Recognizer:
def __init__(self):
if not path.Path(MODEL_FILE).exists():
build()
self.model = keras.models.load_model(MODEL_FILE)
def predict(self, images):
predictions = self.model.predict(images)
return [np.argmax(result) for result in predictions]
if __name__ == "__main__":
build()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" PauliExpectation Class """
import logging
from typing import Union
import numpy as np
from qiskit.opflow.converters.abelian_grouper import AbelianGrouper
from qiskit.opflow.converters.pauli_basis_change import PauliBasisChange
from qiskit.opflow.expectations.expectation_base import ExpectationBase
from qiskit.opflow.list_ops.composed_op import ComposedOp
from qiskit.opflow.list_ops.list_op import ListOp
from qiskit.opflow.operator_base import OperatorBase
from qiskit.opflow.primitive_ops.pauli_sum_op import PauliSumOp
from qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp
from qiskit.opflow.state_fns.operator_state_fn import OperatorStateFn
from qiskit.opflow.state_fns.state_fn import StateFn
logger = logging.getLogger(__name__)
class PauliExpectation(ExpectationBase):
r"""
An Expectation converter for Pauli-basis observables by changing Pauli measurements to a
diagonal ({Z, I}^n) basis and appending circuit post-rotations to the measured state function.
Optionally groups the Paulis with the same post-rotations (those that commute with one
another, or form Abelian groups) into single measurements to reduce circuit execution
overhead.
"""
def __init__(self, group_paulis: bool = True) -> None:
"""
Args:
group_paulis: Whether to group the Pauli measurements into commuting sums, which all
have the same diagonalizing circuit.
"""
self._grouper = AbelianGrouper() if group_paulis else None
def convert(self, operator: OperatorBase) -> OperatorBase:
"""Accepts an Operator and returns a new Operator with the Pauli measurements replaced by
diagonal Pauli post-rotation based measurements so they can be evaluated by sampling and
averaging.
Args:
operator: The operator to convert.
Returns:
The converted operator.
"""
if isinstance(operator, ListOp):
return operator.traverse(self.convert).reduce()
if isinstance(operator, OperatorStateFn) and operator.is_measurement:
# Change to Pauli representation if necessary
if (
isinstance(operator.primitive, (ListOp, PrimitiveOp))
and not isinstance(operator.primitive, PauliSumOp)
and {"Pauli", "SparsePauliOp"} < operator.primitive_strings()
):
logger.warning(
"Measured Observable is not composed of only Paulis, converting to "
"Pauli representation, which can be expensive."
)
# Setting massive=False because this conversion is implicit. User can perform this
# action on the Observable with massive=True explicitly if they so choose.
pauli_obsv = operator.primitive.to_pauli_op(massive=False)
operator = StateFn(pauli_obsv, is_measurement=True, coeff=operator.coeff)
if self._grouper and isinstance(operator.primitive, (ListOp, PauliSumOp)):
grouped = self._grouper.convert(operator.primitive)
operator = StateFn(grouped, is_measurement=True, coeff=operator.coeff)
# Convert the measurement into diagonal basis (PauliBasisChange chooses
# this basis by default).
cob = PauliBasisChange(replacement_fn=PauliBasisChange.measurement_replacement_fn)
return cob.convert(operator).reduce()
return operator
def compute_variance(self, exp_op: OperatorBase) -> Union[list, float, np.ndarray]:
def sum_variance(operator):
if isinstance(operator, ComposedOp):
sfdict = operator.oplist[1]
measurement = operator.oplist[0]
average = np.asarray(measurement.eval(sfdict))
variance = sum(
(v * (np.asarray(measurement.eval(b)) - average)) ** 2
for (b, v) in sfdict.primitive.items()
)
return operator.coeff * variance
elif isinstance(operator, ListOp):
return operator.combo_fn([sum_variance(op) for op in operator.oplist])
return 0.0
return sum_variance(exp_op)
|
import sys
import os
import pytest
import pandas as pd
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import json_to_excel.json_to_excel as source_code
def test_create_sample_json():
count=1
test_json = source_code.create_sample_json(count=count)
assert isinstance(test_json, list)
assert test_json[0]['Ref'] == f'ABC_{count}'
assert test_json[0]['sample'] == f'sample_status_{count}'
def test_create_sample_json_error():
test_json = source_code.create_sample_json(count='1')
assert test_json == "Exception occured!"
with pytest.raises(Exception):
test_json = source_code.create_sample_json(count=1)
assert test_json[1]['Ref'] == f'ABC_{1}'
def test_read_from_excel():
test_obj = source_code.ExcelHandling()
assert test_obj.read_from_excel()
def test_write_to_excel():
DUMMY_JSON_LOAD = [{'Ref': 'test_1', 'sample': 'test_1'}]
process_file = r"C:\Users\JP\OneDrive\Documents\GitHub\json_to_excel\json_to_excel\processing\excel_database.xlsx"
test_obj = source_code.ExcelHandling(json_load=DUMMY_JSON_LOAD)
test_obj.read_from_excel(process_file=process_file)
assert test_obj.write_to_excel()
data_ = pd.read_excel(process_file, sheet_name='Sheet1')
assert data_.values[0][1] == 'test_1'
assert data_.values[0][2] == 'test_1'
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.test import vis_detections
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import os, sys
# Make sure that caffe is on the python path:
caffe_root = './caffe-fast-rcnn/'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe
import cv2
import argparse
from PIL import Image
CLASSES = ('__background__','hand')
def demo(net, im, nframe):
"""Detect object classes in an image using pre-computed object proposals."""
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.95
NMS_THRESH = 0.01
dets = np.hstack((boxes, scores)).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) == 0:
print "not detected hand"
# im = im[:, :, (2, 1, 0)]
else:
print "scores:\n",scores
print "boxes:\n", boxes
print "dets:\n", dets
for i in xrange(dets.shape[0]):
if(dets[i][4]>CONF_THRESH):
cv2.rectangle(im, (dets[i][0], dets[i][1]),(dets[i][2], dets[i][3]),(255,0,0),1)
cv2.putText(im,str(dets[i][4] ), (dets[i][0], dets[i][1]), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,0))
#
# cv2.imshow("detections", im);
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
prototxt = "/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/models/MMCV5S8/faster_rcnn_end2end/test.prototxt"
caffemodel = "/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/models/MMCV5S8/mmcv5stride8bn128_neg01_iter_100000.caffemodel"
# prototxt = "/Users/momo/Desktop/gesture/from113/MMCV5_stride16/test.prototxt"
# caffemodel = "/Users/momo/Desktop/sdk/momocv2_model/original_model/object_detect/mmcv5stride16_iter_5250000.caffemodel"
caffe.set_mode_cpu()
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
cfg.TEST.SCALES = [144,]
cfg.TEST.MAX_SIZE = 256
cfg.DEDUP_BOXES = 1./8.
CONF_THRESH = 0.98
NMS_THRESH = 0.01
fromDir = "/Volumes/song/testVideos/0627/all/"
readFile = open(fromDir + "../testlists.txt", "r")
retfilename = '627_'+caffemodel.split('.')[0].split('/')[-1] + '_' + str(CONF_THRESH).split('.')[-1]
toDir = '/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/retTests/ret/' + retfilename
print toDir
if not os.path.isdir(toDir):
os.makedirs(toDir)
writeFile = open(toDir + "/../" + retfilename + ".txt", "w")
filelists = readFile.readlines()
print filelists
for filename in filelists:
print "filename:", filename
video_name = filename.split()[0]
video = cv2.VideoCapture(fromDir + video_name + '.mp4')
success, im = video.read()
numFrame = 0
while success:
if video_name == "20180627_momo_0007":
im = cv2.transpose(im)
if video_name == "20180627_momo_0023":
im = cv2.flip(im, 0)
numFrame += 1
savename = filename.split()[0] + '_f' + str(numFrame) + '.jpg'
scores, boxes = im_detect(net, im)
dets = np.hstack((boxes, scores)).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
nhand = 0
for i in xrange(dets.shape[0]):
if (dets[i][4] > CONF_THRESH):
nhand += 1
if nhand > 0:
writeFile.write(savename +' '+ str(nhand)+ ' ')
for i in xrange(dets.shape[0]):
if (dets[i][4] > CONF_THRESH):
writeFile.write('hand ' \
+ str( int( dets[i][0]) ) +' ' \
+ str( int( dets[i][1]) ) +' ' \
+ str( int( dets[i][2]) ) +' ' \
+ str( int( dets[i][3]) ) + ' ')
writeFile.write('\n')
# for i in xrange(dets.shape[0]):
# if (dets[i][4] > CONF_THRESH):
# cv2.rectangle(im, (dets[i][0], dets[i][1]), (dets[i][2], dets[i][3]), (255, 0, 0), 1)
# cv2.putText(im, str(dets[i][4]), (dets[i][0], dets[i][1]), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
# cv2.imwrite(toDir+'/' + savename, im)
# cv2.imshow("capture", im)
# cv2.waitKey(1)
success, im = video.read()
writeFile.close()
readFile.close()
|
from hypothesis_auto import auto_pytest_magic
from streamdeck_ui import api
auto_pytest_magic(api.set_button_command)
auto_pytest_magic(api.get_button_command)
auto_pytest_magic(api.set_button_switch_page)
auto_pytest_magic(api.get_button_switch_page)
auto_pytest_magic(api.set_button_keys)
auto_pytest_magic(api.get_button_keys)
auto_pytest_magic(api.set_button_write)
auto_pytest_magic(api.get_button_write)
auto_pytest_magic(api.set_brightness, auto_allow_exceptions_=(KeyError,))
auto_pytest_magic(api.get_brightness)
auto_pytest_magic(api.change_brightness, auto_allow_exceptions_=(KeyError,))
auto_pytest_magic(api.set_page)
auto_pytest_magic(api.get_page)
auto_pytest_magic(api.render)
|
import unittest
import json
from dataclasses import dataclass, field
from rlbottraining.history.metric import Metric
from .utils.example_metrics import ExampleMetric, ExampleMetric2
class MetricsTest(unittest.TestCase):
def test_dataclass(self):
"""
Just make sure we understand Data Classes properly.
"""
red_dude = ExampleMetric2(5, (3,0,4))
self.assertEqual(repr(red_dude), 'ExampleMetric2(speed=5, momentum=(3, 0, 4), violence=True)')
self.assertIsInstance(red_dude, Metric)
red_dude2 = ExampleMetric2(5, (3,0,4), True)
self.assertEqual(red_dude, red_dude2)
self.assertEqual(len(set([red_dude, red_dude2])), 1) # yay, easily hashable
def test_dataclass_2(self):
class NonDataClassBase:
foo:int = 3
def __init__(self, bar):
self.bar = bar
pass
@dataclass
class Rectangle(NonDataClassBase):
width: float
height: float
rect = Rectangle(3,4)
self.assertEqual(repr(rect), 'MetricsTest.test_dataclass_2.<locals>.Rectangle(width=3, height=4)')
self.assertTrue(hasattr(rect, 'foo'))
self.assertFalse(hasattr(rect, 'bar'))
class Cuboid(Rectangle):
def __init__(self, width, height, depth):
super().__init__(width, height)
self.depth = depth
cube = Cuboid(1,2,3)
self.assertEqual(cube.width, 1)
self.assertEqual(cube.height, 2)
self.assertEqual(cube.depth, 3)
def test_dataclass_3(self):
'''
Check how overriding default values works.
'''
@dataclass
class Rectangle:
width: float = 0
height: float = 0
@dataclass
class SpecificWidthRectangle(Rectangle):
width: float = 5
specificWidth: bool = True
self.assertEqual(
# Same argument-order as Rectangle
repr(SpecificWidthRectangle(3)),
'MetricsTest.test_dataclass_3.<locals>.SpecificWidthRectangle(width=3, height=0, specificWidth=True)'
)
self.assertEqual(
repr(SpecificWidthRectangle(height=3)),
'MetricsTest.test_dataclass_3.<locals>.SpecificWidthRectangle(width=5, height=3, specificWidth=True)'
)
self.assertEqual(
SpecificWidthRectangle(3).__dict__,
{'width': 3, 'height': 0, 'specificWidth': True}
)
@dataclass
class SpecificHeightRectangle(Rectangle):
height: float = 4
specificHeight: bool = True
'''
How does diamond-shaped multiple-inheritance work?
It grabs the overriding defauls of one, including its base class.
'''
@dataclass
class SpecificWidthHeightRectangle(SpecificWidthRectangle, SpecificHeightRectangle):
pass
self.assertEqual(
repr(SpecificWidthHeightRectangle()),
'MetricsTest.test_dataclass_3.<locals>.SpecificWidthHeightRectangle(width=5, height=0, specificHeight=True, specificWidth=True)'
)
@dataclass
class CompositeRectangle(Rectangle):
width: float = SpecificWidthRectangle.width
height: float = SpecificHeightRectangle.height
self.assertEqual(
repr(CompositeRectangle()),
'MetricsTest.test_dataclass_3.<locals>.CompositeRectangle(width=5, height=4)'
)
# A class with a nondefault may not override a dataclass with a default.
try:
@dataclass
class Cuboid(Rectangle):
depth: float
except TypeError:
pass
else:
self.fail()
if __name__ == '__main__':
unittest.main()
|
# Font recognition for the two (2) puzzles that have invoked it so far (2021 Day 13 and ... that one with the flying points from 2018?)
from typing import Set, Tuple
# Yes, this was generated by trawling through github for various inputs for day 13 and running them
# No, it might not have all the possible letters that are generated
# Yes, it seems to exclude ambiguous letters, almost surely intentionally (also notably 'M' or 'W' which would require variable width characters)
# Yes, getting all those inputs was tedious
LETTERS = {
('A', ('.##.', '#..#', '#..#', '####', '#..#', '#..#')),
('B', ('###.', '#..#', '###.', '#..#', '#..#', '###.')),
('C', ('.##.', '#..#', '#...', '#...', '#..#', '.##.')),
('E', ('####', '#...', '###.', '#...', '#...', '####')),
('F', ('####', '#...', '###.', '#...', '#...', '#...')),
('G', ('.##.', '#..#', '#...', '#.##', '#..#', '.###')),
('H', ('#..#', '#..#', '####', '#..#', '#..#', '#..#')),
('J', ('..##', '...#', '...#', '...#', '#..#', '.##.')),
('K', ('#..#', '#.#.', '##..', '#.#.', '#.#.', '#..#')),
('L', ('#...', '#...', '#...', '#...', '#...', '####')),
('P', ('###.', '#..#', '#..#', '###.', '#...', '#...')),
('R', ('###.', '#..#', '#..#', '###.', '#.#.', '#..#')),
('U', ('#..#', '#..#', '#..#', '#..#', '#..#', '.##.')),
('Z', ('####', '...#', '..#.', '.#..', '#...', '####'))
}
HEIGHT = 6
WIDTH = 4
def recognize(points: Set[Tuple[int, int]], min_x: int, max_x: int, min_y: int, max_y: int) -> str:
width = max_x - min_x + 1
height = max_y - min_y + 1
assert (width + 1) % (WIDTH + 1) == 0, 'Uneven amount of letters: width %d' % width
assert height == HEIGHT, 'Font height %d != 6' % height
dx = 0
letters = []
while dx < width:
for letter, pattern in LETTERS:
if dx + WIDTH <= width: # This letter can fit in the provided width
if all((pattern[y][x] == '#') == ((min_x + dx + x, min_y + y) in points)
for x in range(WIDTH)
for y in range(HEIGHT)):
letters.append(letter)
dx += WIDTH + 1
break
else:
raise ValueError('No matching text found! Found start = %s at offset dx = %d' % (letters, dx))
return ''.join(letters)
|
"""
Copyright (c) 2020 Tyler Cone
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .register import Register
from .field import Field
class CtrlUART1(Register):
def __init__(self, address=0x24, value=0x00004007):
fields = {
"crc_poly": Field(0, 8),
"noise_cancel_en": Field(8),
"break_on_err": Field(9),
"crc_en": Field(14),
"lsb_first": Field(15),
"timeout": Field(16, 8)
}
super(CtrlUART1, self).__init__(address, value, fields)
class CtrlUART2(Register):
def __init__(self, address=0x26, value=0x00000683):
fields = {
"baud": Field(0, 16),
"frame_delay": Field(16, 8)
}
super(CtrlUART2, self).__init__(address, value, fields)
class CtrlUARTStatus(Register):
def __init__(self, address=0x28, value=0x00000000):
fields = {
"irq_uart_crc_error": Field(1),
"irq_time_out_err": Field(2),
"irq_frame_err": Field(3),
"irq_noise_err": Field(4),
"irq_rx_ovr": Field(5),
"irq_tx_ovr": Field(6),
"irq_rx_full": Field(8),
"irq_tx_empty": Field(9),
"irq_read_error": Field(10),
"irq_write_error": Field(11),
"irq_spi_crc_error": Field(12),
"irq_underrun": Field(13),
"irq_overrun": Field(14),
"break_received": Field(16),
"uart_crc_error": Field(17),
"time_out_err": Field(18),
"frame_err": Field(19),
"noise_err": Field(20),
"rx_ovr": Field(21),
"tx_ovr": Field(22),
"rx_full": Field(24),
"tx_empty": Field(25),
"read_error": Field(26),
"write_error": Field(27),
"spi_crc_error": Field(28),
"underrun": Field(29),
"overrun": Field(30)
}
super(CtrlUARTStatus, self).__init__(address, value, fields)
|
from unittest.mock import call
from .. import service
from ..models import File
def test_stat():
file = service.stat("somefile")
assert isinstance(file, File)
def test_ls(mocker, settings):
def list_of_files(path):
assert path == "/data/subdir/somedir"
return ["file1", "file2", "file3", "dir2", "dir1"]
def is_file(_, name):
return name.startswith("file")
settings.CP_ROOT_DIR = "/data"
listdir_mock = mocker.patch("cutepaste.files.service.os.listdir")
listdir_mock.side_effect = list_of_files
file_mock = mocker.patch("cutepaste.files.service.File.is_file")
file_mock.side_effect = is_file
files = service.ls("subdir/somedir")
assert len(files) == 5
# Check ordering
assert files[0].name == "dir1"
assert files[1].name == "dir2"
assert files[2].name == "file1"
assert files[3].name == "file2"
assert files[4].name == "file3"
def test_ls_with_absolute_dir(mocker, settings):
def ensure_listdir_inside_data_dir(path):
assert path == "/data/root"
return []
settings.CP_ROOT_DIR = "/data"
listdir_mock = mocker.patch("cutepaste.files.service.os.listdir")
listdir_mock.side_effect = ensure_listdir_inside_data_dir
service.ls("/root")
def test_move(mocker, settings):
settings.CP_ROOT_DIR = "/data"
move_mock = mocker.patch("cutepaste.files.service.shutil.move")
service.move(["file1", "file2"], "/some/other/dir")
assert move_mock.mock_calls == [call("/data/file1", "/data/some/other/dir/"),
call("/data/file2", "/data/some/other/dir/")]
def test_move_absolute_dir(mocker, settings):
settings.CP_ROOT_DIR = "/data"
move_mock = mocker.patch("cutepaste.files.service.shutil.move")
service.move(["/file1"], "/some/dir")
assert move_mock.mock_calls == [call("/data/file1", "/data/some/dir/")]
def test_rename(mocker, settings):
settings.CP_ROOT_DIR = "/data"
move_mock = mocker.patch("cutepaste.files.service.shutil.move")
service.rename("file1", "file2")
assert move_mock.mock_calls == [call("/data/file1", "/data/file2")]
def test_rename_absolute(mocker, settings):
settings.CP_ROOT_DIR = "/data"
move_mock = mocker.patch("cutepaste.files.service.shutil.move")
service.rename("/etc/hosts", "/etc/resolv.conf")
assert move_mock.mock_calls == [call("/data/etc/hosts", "/data/etc/resolv.conf")]
def test_copy(mocker, settings):
settings.CP_ROOT_DIR = "/data"
copy_mock = mocker.patch("cutepaste.files.service.shutil.copy")
service.copy(["file1", "file2"], "/some/dir")
assert copy_mock.mock_calls == [call("/data/file1", "/data/some/dir/"),
call("/data/file2", "/data/some/dir/")]
def test_copy_absolute(mocker, settings):
settings.CP_ROOT_DIR = "/data"
copy_mock = mocker.patch("cutepaste.files.service.shutil.copy")
service.copy(["/etc/passwd"], "/tmp")
assert copy_mock.mock_calls == [call("/data/etc/passwd", "/data/tmp/")]
def test_remove_files(mocker, settings):
def is_file_results(path):
return path in ["/data/file1", "/data/file2"]
def is_dir_results(path):
return path in ["/data/dir1"]
settings.CP_ROOT_DIR = "/data"
remove_mock = mocker.patch("cutepaste.files.service.os.remove")
rmtree_mock = mocker.patch("cutepaste.files.service.shutil.rmtree")
isfile_mock = mocker.patch("cutepaste.files.service.path.isfile")
isfile_mock.side_effect = is_file_results
isdir_mock = mocker.patch("cutepaste.files.service.path.isdir")
isdir_mock.side_effect = is_dir_results
service.remove(["file1", "file2", "dir1"])
assert remove_mock.mock_calls == [call("/data/file1"), call("/data/file2")]
assert rmtree_mock.mock_calls == [call("/data/dir1")]
def test_remove_absolute(mocker, settings):
settings.CP_ROOT_DIR = "/data"
remove_mock = mocker.patch("cutepaste.files.service.os.remove")
isfile_mock = mocker.patch("cutepaste.files.service.path.isfile")
isfile_mock.return_value = True
service.remove(["/etc/hosts"])
assert remove_mock.mock_calls == [call("/data/etc/hosts")]
|
import re
import setuptools
import pathlib
WORK_DIR = pathlib.Path(__file__).parent
with open("README.md", "r") as fh:
long_description = fh.read()
def get_version():
"""
Read version
:return: str
"""
txt = (WORK_DIR / 'aiomanybots' / '__init__.py').read_text('utf-8')
try:
return re.findall(r"^__version__ = ('|\")([^\1]+)\1r?$", txt, re.M)[0][1]
except IndexError:
raise RuntimeError('Unable to determine version.')
setuptools.setup(
name="aiomanybots",
version=get_version(),
author="drforse",
author_email="george.lifeslice@gmail.com",
description="Library for running bots concurrently on aiogram",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/drforse/aiogram_many_bots_lib",
packages=setuptools.find_packages(),
install_requires=[
'manybots@git+git://github.com/Senderman/manybotslib.git#egg=manybots',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
|
#Python program for stem plot
#with Amplitude = 5 units
import numpy as np
import scipy as sy
from matplotlib import pyplot as plt
t = np.arange(0,1,0.01)
#frequency = 2 Hz
f = 2
#Amplitude of sine wave = 1
PI = 22/7
a = np.sin(2*PI*2*t)
a = 5*a;
#plt.stem(t,a,linefmt='grey',markerfmt='C',bottom =0.0)
plt.stem(t,a,linefmt='grey',markerfmt='D',bottom =0.0)
plt.xlabel('Time')
#Y-axis label
plt.ylabel('Amplitude')
plt.grid(True, which='both')
plt.title('Discrete Time Sine Wave')
plt.show()
|
import json, inspect
from helper.User import User
from helper.cEmbed import granted_msg, denied_msg
from helper.GitHub import GitHub
from helper.cLog import elog
from helper.Algorithm import Algorithm
from cDatabase.DB_Algorithm import DB_Algorithm
config = json.load(open('config.json', 'r'))
path = __file__.split(config['split_path'])
file = path[len(path) - 1][:-3]
github_api = GitHub()
db_algo = DB_Algorithm('db_algorithms')
# ------------------ [ is_admin_only() ] ------------------ #
# Limits this command to only admin users (i.e. Ahmad, Khaled, MB, Miguel)
def is_admin_only(): return True
# ------------------ [ usage() ] ------------------ #
# Returns how the command is called ex. "[prefix][command]"
def usage(): return file + " [algorithm] ([code] OR [file_attachment])"
# ------------------ [ description() ] ------------------ #
# Returns a short explanation of what the function does
def description():
return (
"Creates a new file in the repository.\n" +
"```" + file + " [algorithm] [code]\n" +
file + " [algorithm] [file_attachment]```\n" +
"Notes: \n```\n" +
"`[code]` format: '''[lang]\\n [code] \\n'''\n" +
"`[file_attachment]` name format:\n" +
" .zip: [algo]__[lang].zip \n" +
" .cpp, .java, .py: [algo].[lang] \n```"
)
# ------------------ [ check_args() ] ------------------ #
# Checks if the command called by the user is valid
async def check_args(msg, args):
author = User(id = str(msg.author.id))
if not author.is_admin():
description = msg.author.mention + " You are not allowed to use this function."
await msg.reply(embed = denied_msg("Admin Command", description))
return False
flag = (len(msg.attachments) == 0 and len(args) >= 4 and len(args[0].split()) == 3)
flag = flag or (len(msg.attachments) == 1 and len(args) == 1 and len(args[0].split()) == 3)
if not flag:
await msg.reply(embed = denied_msg("Invalid Command Format", usage()))
return False
if len(msg.attachments) == 1:
algo = args[0].split()[-1]
filename = msg.attachments[0].filename.split('.')
extension = filename[-1]
if extension == 'zip':
if len(filename) != 2 or len(filename[0].split("__")) != 2:
await msg.reply(embed= denied_msg("Invalid File Name", ""))
return False
file_path = config['module_cmds_loc'] + "/algo/code.zip"
await msg.attachments[0].save(file_path)
filename = filename[0].split("__")
algo = Algorithm(algo= filename[0], lang= filename[1], is_zip= True)
else:
if len(filename) != 2:
await msg.reply(embed= denied_msg("Invalid File Name", ""))
return False
file_path = config['module_cmds_loc'] + "/algo/code.txt"
await msg.attachments[0].save(file_path)
with open(file_path, 'r') as f: code = f.read()
algo = Algorithm(algo= filename[0], lang= extension, code= code, is_zip= False)
else:
algo = args[0].split()[-1]
lang = args[1].strip('`')
code = '\n'.join(args[2 : -1])
file_path = config['module_cmds_loc'] + "/algo/code.txt"
with open(file_path, 'w') as f: f.write(code)
algo = Algorithm(algo= algo, lang= lang, code= code, is_zip= False)
if algo.lang not in ['cpp', 'java', 'py']:
await msg.reply(embed = denied_msg("Invalid Language", "Try one of `cpp`, `java`, `py`"))
return False
if algo.is_found():
await msg.reply(embed = denied_msg("Error", "Algorithm already exists in this language"))
return False
return algo
async def execute(msg, args, client):
try:
algo = await check_args(msg, args)
if algo == False: return
result = algo.commit()
if result == True:
algo.add()
await msg.channel.send(embed = granted_msg("Algorithm Added Succesfully", str(algo)))
else:
elog(result, inspect.stack())
desc = "We faced an error while uploading the file.\n"
desc += "Consider trying again in a couple of minutes."
await msg.reply(embed = denied_msg("Error", desc))
except Exception as ex:
elog(ex, inspect.stack())
await msg.reply(embed = denied_msg())
|
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AudienceExportDetail(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'audience_update_id': 'int',
'timestamp': 'datetime',
'fast_stats_build_date': 'datetime',
'user': 'UserDisplayDetails',
'nett_counts': 'list[Count]',
'urn_file_path': 'str',
'maximum_number_of_rows_to_browse': 'int',
'return_browse_rows': 'bool',
'file_path': 'str',
'output': 'Output',
'columns': 'list[Column]',
'rows': 'list[Row]'
}
attribute_map = {
'audience_update_id': 'audienceUpdateId',
'timestamp': 'timestamp',
'fast_stats_build_date': 'fastStatsBuildDate',
'user': 'user',
'nett_counts': 'nettCounts',
'urn_file_path': 'urnFilePath',
'maximum_number_of_rows_to_browse': 'maximumNumberOfRowsToBrowse',
'return_browse_rows': 'returnBrowseRows',
'file_path': 'filePath',
'output': 'output',
'columns': 'columns',
'rows': 'rows'
}
def __init__(self, audience_update_id=None, timestamp=None, fast_stats_build_date=None, user=None, nett_counts=None, urn_file_path=None, maximum_number_of_rows_to_browse=None, return_browse_rows=None, file_path=None, output=None, columns=None, rows=None): # noqa: E501
"""AudienceExportDetail - a model defined in OpenAPI""" # noqa: E501
self._audience_update_id = None
self._timestamp = None
self._fast_stats_build_date = None
self._user = None
self._nett_counts = None
self._urn_file_path = None
self._maximum_number_of_rows_to_browse = None
self._return_browse_rows = None
self._file_path = None
self._output = None
self._columns = None
self._rows = None
self.discriminator = None
self.audience_update_id = audience_update_id
self.timestamp = timestamp
self.fast_stats_build_date = fast_stats_build_date
self.user = user
self.nett_counts = nett_counts
self.urn_file_path = urn_file_path
self.maximum_number_of_rows_to_browse = maximum_number_of_rows_to_browse
self.return_browse_rows = return_browse_rows
if file_path is not None:
self.file_path = file_path
if output is not None:
self.output = output
self.columns = columns
if rows is not None:
self.rows = rows
@property
def audience_update_id(self):
"""Gets the audience_update_id of this AudienceExportDetail. # noqa: E501
The id of the update (audience version) that the export was created from # noqa: E501
:return: The audience_update_id of this AudienceExportDetail. # noqa: E501
:rtype: int
"""
return self._audience_update_id
@audience_update_id.setter
def audience_update_id(self, audience_update_id):
"""Sets the audience_update_id of this AudienceExportDetail.
The id of the update (audience version) that the export was created from # noqa: E501
:param audience_update_id: The audience_update_id of this AudienceExportDetail. # noqa: E501
:type: int
"""
if audience_update_id is None:
raise ValueError("Invalid value for `audience_update_id`, must not be `None`") # noqa: E501
self._audience_update_id = audience_update_id
@property
def timestamp(self):
"""Gets the timestamp of this AudienceExportDetail. # noqa: E501
The date and time that the export was produced # noqa: E501
:return: The timestamp of this AudienceExportDetail. # noqa: E501
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this AudienceExportDetail.
The date and time that the export was produced # noqa: E501
:param timestamp: The timestamp of this AudienceExportDetail. # noqa: E501
:type: datetime
"""
if timestamp is None:
raise ValueError("Invalid value for `timestamp`, must not be `None`") # noqa: E501
self._timestamp = timestamp
@property
def fast_stats_build_date(self):
"""Gets the fast_stats_build_date of this AudienceExportDetail. # noqa: E501
The date and time that the FastStats system used to create this export was built # noqa: E501
:return: The fast_stats_build_date of this AudienceExportDetail. # noqa: E501
:rtype: datetime
"""
return self._fast_stats_build_date
@fast_stats_build_date.setter
def fast_stats_build_date(self, fast_stats_build_date):
"""Sets the fast_stats_build_date of this AudienceExportDetail.
The date and time that the FastStats system used to create this export was built # noqa: E501
:param fast_stats_build_date: The fast_stats_build_date of this AudienceExportDetail. # noqa: E501
:type: datetime
"""
if fast_stats_build_date is None:
raise ValueError("Invalid value for `fast_stats_build_date`, must not be `None`") # noqa: E501
self._fast_stats_build_date = fast_stats_build_date
@property
def user(self):
"""Gets the user of this AudienceExportDetail. # noqa: E501
:return: The user of this AudienceExportDetail. # noqa: E501
:rtype: UserDisplayDetails
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this AudienceExportDetail.
:param user: The user of this AudienceExportDetail. # noqa: E501
:type: UserDisplayDetails
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501
self._user = user
@property
def nett_counts(self):
"""Gets the nett_counts of this AudienceExportDetail. # noqa: E501
The set of overall counts for the audience behind this export # noqa: E501
:return: The nett_counts of this AudienceExportDetail. # noqa: E501
:rtype: list[Count]
"""
return self._nett_counts
@nett_counts.setter
def nett_counts(self, nett_counts):
"""Sets the nett_counts of this AudienceExportDetail.
The set of overall counts for the audience behind this export # noqa: E501
:param nett_counts: The nett_counts of this AudienceExportDetail. # noqa: E501
:type: list[Count]
"""
if nett_counts is None:
raise ValueError("Invalid value for `nett_counts`, must not be `None`") # noqa: E501
self._nett_counts = nett_counts
@property
def urn_file_path(self):
"""Gets the urn_file_path of this AudienceExportDetail. # noqa: E501
If a URN file was generated as part of this export then this will be its path within the FastStats system # noqa: E501
:return: The urn_file_path of this AudienceExportDetail. # noqa: E501
:rtype: str
"""
return self._urn_file_path
@urn_file_path.setter
def urn_file_path(self, urn_file_path):
"""Sets the urn_file_path of this AudienceExportDetail.
If a URN file was generated as part of this export then this will be its path within the FastStats system # noqa: E501
:param urn_file_path: The urn_file_path of this AudienceExportDetail. # noqa: E501
:type: str
"""
if urn_file_path is None:
raise ValueError("Invalid value for `urn_file_path`, must not be `None`") # noqa: E501
self._urn_file_path = urn_file_path
@property
def maximum_number_of_rows_to_browse(self):
"""Gets the maximum_number_of_rows_to_browse of this AudienceExportDetail. # noqa: E501
The requested maximum number of rows to return when browsing the data # noqa: E501
:return: The maximum_number_of_rows_to_browse of this AudienceExportDetail. # noqa: E501
:rtype: int
"""
return self._maximum_number_of_rows_to_browse
@maximum_number_of_rows_to_browse.setter
def maximum_number_of_rows_to_browse(self, maximum_number_of_rows_to_browse):
"""Sets the maximum_number_of_rows_to_browse of this AudienceExportDetail.
The requested maximum number of rows to return when browsing the data # noqa: E501
:param maximum_number_of_rows_to_browse: The maximum_number_of_rows_to_browse of this AudienceExportDetail. # noqa: E501
:type: int
"""
if maximum_number_of_rows_to_browse is None:
raise ValueError("Invalid value for `maximum_number_of_rows_to_browse`, must not be `None`") # noqa: E501
self._maximum_number_of_rows_to_browse = maximum_number_of_rows_to_browse
@property
def return_browse_rows(self):
"""Gets the return_browse_rows of this AudienceExportDetail. # noqa: E501
Whether data rows were requested to be returned or whether the data was exported directly to the specified file # noqa: E501
:return: The return_browse_rows of this AudienceExportDetail. # noqa: E501
:rtype: bool
"""
return self._return_browse_rows
@return_browse_rows.setter
def return_browse_rows(self, return_browse_rows):
"""Sets the return_browse_rows of this AudienceExportDetail.
Whether data rows were requested to be returned or whether the data was exported directly to the specified file # noqa: E501
:param return_browse_rows: The return_browse_rows of this AudienceExportDetail. # noqa: E501
:type: bool
"""
if return_browse_rows is None:
raise ValueError("Invalid value for `return_browse_rows`, must not be `None`") # noqa: E501
self._return_browse_rows = return_browse_rows
@property
def file_path(self):
"""Gets the file_path of this AudienceExportDetail. # noqa: E501
If specified, the path of a file that the data was exported to # noqa: E501
:return: The file_path of this AudienceExportDetail. # noqa: E501
:rtype: str
"""
return self._file_path
@file_path.setter
def file_path(self, file_path):
"""Sets the file_path of this AudienceExportDetail.
If specified, the path of a file that the data was exported to # noqa: E501
:param file_path: The file_path of this AudienceExportDetail. # noqa: E501
:type: str
"""
self._file_path = file_path
@property
def output(self):
"""Gets the output of this AudienceExportDetail. # noqa: E501
:return: The output of this AudienceExportDetail. # noqa: E501
:rtype: Output
"""
return self._output
@output.setter
def output(self, output):
"""Sets the output of this AudienceExportDetail.
:param output: The output of this AudienceExportDetail. # noqa: E501
:type: Output
"""
self._output = output
@property
def columns(self):
"""Gets the columns of this AudienceExportDetail. # noqa: E501
The list of columns that have been included in this export # noqa: E501
:return: The columns of this AudienceExportDetail. # noqa: E501
:rtype: list[Column]
"""
return self._columns
@columns.setter
def columns(self, columns):
"""Sets the columns of this AudienceExportDetail.
The list of columns that have been included in this export # noqa: E501
:param columns: The columns of this AudienceExportDetail. # noqa: E501
:type: list[Column]
"""
if columns is None:
raise ValueError("Invalid value for `columns`, must not be `None`") # noqa: E501
self._columns = columns
@property
def rows(self):
"""Gets the rows of this AudienceExportDetail. # noqa: E501
If data rows were requested to be returned then the set of rows containing data for the given columns selected by the audience queries # noqa: E501
:return: The rows of this AudienceExportDetail. # noqa: E501
:rtype: list[Row]
"""
return self._rows
@rows.setter
def rows(self, rows):
"""Sets the rows of this AudienceExportDetail.
If data rows were requested to be returned then the set of rows containing data for the given columns selected by the audience queries # noqa: E501
:param rows: The rows of this AudienceExportDetail. # noqa: E501
:type: list[Row]
"""
self._rows = rows
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudienceExportDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from datetime import date
from messaging.messaging_system import db
from messaging.models import DatabaseModel
from messaging.models.users import User
class Message(DatabaseModel):
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True)
sender = db.Column(db.ForeignKey(User.id), nullable=False)
receiver = db.Column(db.ForeignKey(User.id), nullable=False)
subject = db.Column(db.String(50), nullable=False)
message = db.Column(db.Text, nullable=False)
is_read = db.Column(db.Boolean, nullable=False, default=False)
creation_date = db.Column(db.Date, nullable=False, default=date.today)
|
"""Store Aisle Monitor"""
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit person to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
from argparse import ArgumentParser
import pathlib
import cv2
import numpy as np
import json
from azure.storage.blob import BlockBlobService, PublicAccess
from inference import Network
isasyncmode = True
CONFIG_FILE = '../resources/config.json'
# Weightage/ratio to merge (for Heatmap) original frame and colorMap frame(sum of both should be 1)
INITIAL_FRAME_WEIGHTAGE = 0.65
COLORMAP_FRAME_WEIGHTAGE = 0.35
# Weightage/ratio to merge (for integrated output) people count frame and colorMap frame(sum of both should be 1)
P_COUNT_FRAME_WEIGHTAGE = 0.65
COLORMAP_FRAME_WEIGHTAGE_1 = 0.35
# Multiplication factor to compute time interval for uploading snapshots to the cloud
MULTIPLICATION_FACTOR = 5
# Azure Blob container name
CONTAINER_NAME = 'store-aisle-monitor-snapshots'
# To get current working directory
CWD = os.getcwd()
# Creates subdirectory to save output snapshots
pathlib.Path(CWD + '/output_snapshots/').mkdir(parents=True, exist_ok=True)
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model",
help="Path to an .xml file with a trained model.",
required=True, type=str)
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers. Absolute "
"path to a shared library with the kernels impl.",
type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; "
"CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Application"
" will look for a suitable plugin for device "
"specified (CPU by default)", default="CPU", type=str)
parser.add_argument("-pt", "--prob_threshold",
help="Probability threshold for detections filtering",
default=0.5, type=float)
parser.add_argument("-an", "--account_name",
help="Account name of Azure cloud storage container",
default=None, type=str)
parser.add_argument("-ak", "--account_key",
help="Account key of Azure cloud storage container",
default=None, type=str)
parser.add_argument("-f", "--flag", help="sync or async", default="async", type=str)
return parser
def apply_time_stamp_and_save(image, people_count, upload_azure):
"""
Saves snapshots with timestamps.
"""
current_date_time = time.strftime("%y-%m-%d_%H:%M:%S", time.gmtime())
file_name = current_date_time + "_PCount_" + str(people_count) + ".png"
file_path = CWD + "/output_snapshots/"
local_file_name = "output_" + file_name
file_name = file_path + local_file_name
cv2.imwrite(file_name, image)
if upload_azure is 1:
upload_snapshot(file_path, local_file_name)
def create_cloud_container(account_name, account_key):
"""
Creates a BlockBlobService container on cloud.
"""
global BLOCK_BLOB_SERVICE
# Create the BlockBlobService to call the Blob service for the storage account
BLOCK_BLOB_SERVICE = BlockBlobService(account_name, account_key)
# Create BlockBlobService container
BLOCK_BLOB_SERVICE.create_container(CONTAINER_NAME)
# Set the permission so that the blobs are public
BLOCK_BLOB_SERVICE.set_container_acl(CONTAINER_NAME, public_access=PublicAccess.Container)
def upload_snapshot(file_path, local_file_name):
"""
Uploads snapshots to cloud storage container.
"""
try:
full_path_to_file = file_path + local_file_name
print("\nUploading to cloud storage as blob : " + local_file_name)
# Upload the snapshot, with local_file_name as the blob name
BLOCK_BLOB_SERVICE.create_blob_from_path(CONTAINER_NAME, local_file_name, full_path_to_file)
except Exception as e:
print(e)
def main():
global CONFIG_FILE
global is_async_mode
args = build_argparser().parse_args()
account_name = args.account_name
account_key = args.account_key
if account_name is "" or account_key is "":
print("Invalid account name or account key!")
sys.exit(1)
elif account_name is not None and account_key is None:
print("Please provide account key using -ak option!")
sys.exit(1)
elif account_name is None and account_key is not None:
print("Please provide account name using -an option!")
sys.exit(1)
elif account_name is None and account_key is None:
upload_azure = 0
else:
print("Uploading the results to Azure storage \""+ account_name+ "\"" )
upload_azure = 1
create_cloud_container(account_name, account_key)
assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
config = json.loads(open(CONFIG_FILE).read())
for idx, item in enumerate(config['inputs']):
if item['video'].isdigit():
input_stream = int(item['video'])
cap = cv2.VideoCapture(input_stream)
if not cap.isOpened():
print("\nCamera not plugged in... Exiting...\n")
sys.exit(0)
else:
input_stream = item['video']
cap = cv2.VideoCapture(input_stream)
if not cap.isOpened():
print("\nUnable to open video file... Exiting...\n")
sys.exit(0)
fps = cap.get(cv2.CAP_PROP_FPS)
if args.flag == "async":
is_async_mode = True
print('Application running in async mode')
else:
is_async_mode = False
print('Application running in sync mode')
# Initialise the class
infer_network = Network()
# Load the network to IE plugin to get shape of input layer
n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1, 2, args.cpu_extension)[1]
print("To stop the execution press Esc button")
initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_count = 1
accumulated_image = np.zeros((initial_h, initial_w), np.uint8)
mog = cv2.createBackgroundSubtractorMOG2()
ret, frame = cap.read()
cur_request_id = 0
next_request_id = 1
while cap.isOpened():
ret, next_frame = cap.read()
if not ret:
break
frame_count = frame_count + 1
in_frame = cv2.resize(next_frame, (w, h))
# Change data layout from HWC to CHW
in_frame = in_frame.transpose((2, 0, 1))
in_frame = in_frame.reshape((n, c, h, w))
# Start asynchronous inference for specified request.
inf_start = time.time()
if isasyncmode:
infer_network.exec_net(next_request_id, in_frame)
else:
infer_network.exec_net(cur_request_id, in_frame)
# Wait for the result
if infer_network.wait(cur_request_id) == 0:
det_time = time.time() - inf_start
people_count = 0
# Converting to Grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Remove the background
fgbgmask = mog.apply(gray)
# Thresholding the image
thresh = 2
max_value = 2
threshold_image = cv2.threshold(fgbgmask, thresh, max_value,
cv2.THRESH_BINARY)[1]
# Adding to the accumulated image
accumulated_image = cv2.add(threshold_image, accumulated_image)
colormap_image = cv2.applyColorMap(accumulated_image, cv2.COLORMAP_HOT)
# Results of the output layer of the network
res = infer_network.get_output(cur_request_id)
for obj in res[0][0]:
# Draw only objects when probability more than specified threshold
if obj[2] > args.prob_threshold:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
class_id = int(obj[1])
# Draw bounding box
color = (min(class_id * 12.5, 255), min(class_id * 7, 255),
min(class_id * 5, 255))
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
people_count = people_count + 1
people_count_message = "People Count : " + str(people_count)
inf_time_message = "Inference time: N\A for async mode" if is_async_mode else\
"Inference time: {:.3f} ms".format(det_time * 1000)
cv2.putText(frame, inf_time_message, (15, 25), cv2.FONT_HERSHEY_COMPLEX, 1,
(255, 255, 255), 2)
cv2.putText(frame, people_count_message, (15, 65), cv2.FONT_HERSHEY_COMPLEX, 1,
(255, 255, 255), 2)
final_result_overlay = cv2.addWeighted(frame, P_COUNT_FRAME_WEIGHTAGE,
colormap_image,
COLORMAP_FRAME_WEIGHTAGE_1, 0)
cv2.imshow("Detection Results", final_result_overlay)
time_interval = MULTIPLICATION_FACTOR * fps
if frame_count % time_interval == 0:
apply_time_stamp_and_save(final_result_overlay, people_count, upload_azure)
frame = next_frame
if isasyncmode:
cur_request_id, next_request_id = next_request_id, cur_request_id
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
infer_network.clean()
if __name__ == '__main__':
sys.exit(main() or 0)
|
import discord
from discord.ext import commands
from PIL import Image
import inspect
import random
import io
def random_color():
color = ('#%06x' % random.randint(8, 0xFFFFFF))
color = int(color[1:], 16)
color = discord.Color(value=color)
return color
class Utility:
'''Useful commands to make your life easier'''
def __init__(self, bot):
self.bot = bot
@commands.command(name='logout')
async def _logout(self, ctx):
'''
Shuts down the selfbot,
equivalent to a restart if you are hosting on heroku
'''
await ctx.send('`Selfbot Logging out...`')
await self.bot.logout()
@commands.command(name='help')
async def new_help_command(self, ctx, *commands: str):
'''Shows this message.'''
destination = ctx.message.author if self.bot.pm_help else ctx.message.channel
def repl(obj):
return self.bot._mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = await self.bot.formatter.format_help_for(ctx, self.bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = self.bot._mention_pattern.sub(repl, commands[0])
command = None
if name in self.bot.cogs:
command = self.bot.cogs[name]
else:
command = self.bot.all_commands.get(name)
if command is None:
await destination.send(self.bot.command_not_found.format(name))
return
pages = await self.bot.formatter.format_help_for(ctx, command)
else:
name = self.bot._mention_pattern.sub(repl, commands[0])
command = self.bot.all_commands.get(name)
if command is None:
await destination.send(self.bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = self.bot._mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await destination.send(self.bot.command_not_found.format(key))
return
except AttributeError:
await destination.send(self.bot.command_has_no_subcommands.format(command, key))
return
pages = await self.bot.formatter.format_help_for(ctx, command)
if self.bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
color = random_color()
for embed in pages:
em = discord.Embed(title='Command Help', color=color)
embed = embed.strip('```')
em.description = embed
await ctx.send(embed=em)
@commands.command()
async def tinyurl(self, ctx, *, link: str):
'''Makes a link shorter using the tinyurl api'''
await ctx.message.delete()
url = 'http://tinyurl.com/api-create.php?url=' + link
async with ctx.session.get(url) as resp:
new = await resp.text()
emb = discord.Embed(color=random_color())
emb.add_field(name="Original Link", value=link, inline=False)
emb.add_field(name="Shortened Link", value=new, inline=False)
emb.set_footer(text='Selfbot made by SharpBit | Powered by cr-api',
icon_url='http://cr-api.com/static/img/branding/cr-api-logo.png')
await ctx.send(embed=emb)
@commands.command()
async def hastebin(self, ctx, *, code):
'''Hastebin-ify your code!'''
async with ctx.session.post("https://hastebin.com/documents", data=code) as resp:
data = await resp.json()
key = data['key']
await ctx.message.edit(content=f"Hastebin-ified! <https://hastebin.com/{key}.py>")
@commands.command()
async def source(self, ctx, *, command):
'''See the source code for any command.'''
source = str(inspect.getsource(self.bot.get_command(command).callback))
fmt = '```py\n' + source.replace('`', '\u200b`') + '\n```'
if len(fmt) > 2000:
async with ctx.session.post("https://hastebin.com/documents", data=source) as resp:
data = await resp.json()
key = data['key']
return await ctx.send(f'Command source: <https://hastebin.com/{key}.py>')
else:
return await ctx.send(fmt)
def setup(bot):
bot.add_cog(Utility(bot))
|
import keyboard, pyautogui, pygame
pygame.init()
appVersion = 'python_pygame_gui_template'
dev = False
white = (255,255,255)
black = (0,0,0)
red = (200,0,0)
light_red = (255,0,0)
yellow = (200,200,0)
light_yellow = (255,255,0)
green = (34,177,76)
light_green = (0,255,0)
darkblue = (21,35,45)
lightblue = (22,48,66)
textgrey = (170,170,170)
display_width = 300
display_height = 450
display_width = 300
display_height = 80
FPS = 30
botMainApp = True
font = pygame.font.SysFont(None, 25)
pygame.display.set_caption(appVersion)
gameDisplay = pygame.display.set_mode((display_width,display_height))
clock = pygame.time.Clock()
|
import asyncio
from ..pool import ConnectionPool, ClosedPool, EmptyPool
from .aioconnection import AIOLDAPConnection
MYPY = False
if MYPY:
from ..ldapclient import LDAPClient
class AIOPoolContextManager:
def __init__(self, pool, *args, **kwargs):
self.pool = pool
self.__conn = None
async def __aenter__(self):
if self.pool.closed:
await self.pool.open()
self.__conn = await self.pool.get()
return self.__conn
async def __aexit__(self, type, value, traceback):
await self.pool.put(self.__conn)
class AIOConnectionPool(ConnectionPool):
"""
A connection pool that can be used with asnycio tasks. It's inherited from
:class:`bonsai.pool.ConnectionPool`.
:param LDAPClient client: the :class:`bonsai.LDAPClient` that's used to create
connections.
:param int minconn: the minimum number of connections that's created
after the pool is opened.
:param int maxconn: the maximum number of connections in the pool.
:param \\*\\*kwargs: additional keyword arguments that are passed to
the :meth:`bonsai.LDAPClient.connect` method.
:raises ValueError: when the minconn is negative or the maxconn is less
than the minconn.
"""
def __init__(
self,
client: "LDAPClient",
minconn: int = 1,
maxconn: int = 10,
loop=None,
**kwargs
):
super().__init__(client, minconn, maxconn, **kwargs)
self._loop = loop
try:
# The loop parameter is deprecated since 3.8, removed in 3.10
# and it raises TypeError.
self._lock = asyncio.Condition(loop=self._loop)
except TypeError:
self._lock = asyncio.Condition()
async def open(self) -> None:
async with self._lock:
for _ in range(
self._minconn - self.idle_connection - self.shared_connection
):
conn = await self._client.connect(
is_async=True, loop=self._loop, **self._kwargs
)
self._idles.add(conn)
self._closed = False
async def get(self) -> AIOLDAPConnection:
async with self._lock:
if self._closed:
raise ClosedPool("The pool is closed.")
await self._lock.wait_for(lambda: not self.empty or self._closed)
try:
conn = self._idles.pop()
except KeyError:
if len(self._used) < self._maxconn:
conn = await self._client.connect(
is_async=True, loop=self._loop, **self._kwargs
)
else:
raise EmptyPool("Pool is empty.") from None
self._used.add(conn)
self._lock.notify()
return conn
async def put(self, conn: AIOLDAPConnection) -> None:
async with self._lock:
super().put(conn)
self._lock.notify()
async def close(self) -> None:
async with self._lock:
super().close()
self._lock.notify_all()
def spawn(self, *args, **kwargs):
return AIOPoolContextManager(self, *args, **kwargs)
|
""" Bases for sqlalchemy data models """
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Union
from sqlalchemy import Column, text
from sqlalchemy.schema import PrimaryKeyConstraint
from sqlalchemy.sql.base import ImmutableColumnCollection
from sqlalchemy.sql.elements import BinaryExpression, TextClause
from sqlalchemy.sql.functions import Function
import util
import util.jsontools
from db import db
if TYPE_CHECKING:
from db.models.bases import Model
class ProxyBase:
def __init__(self, model: Model):
self.model: Model = model
class ColumnProxy(ProxyBase):
""" Proxy object for a data model's columns """
def __iter__(self):
for col in self.columns:
yield col
def __repr__(self):
return util.jsontools.make_repr(self.names)
def __getitem__(self, item):
try: # item is int
return self.columns[item]
except TypeError: # item is not int
pass
return self.dict()[item]
def dict(self) -> Dict[str, Column]:
return dict(self.sa_obj) # type: ignore
@property
def sa_obj(self) -> ImmutableColumnCollection:
""" Reference to the underlying sqlalchemy object """
return self.model.__table__.columns
@property
def columns(self) -> List[Column]:
return list(self.sa_obj)
@property
def names(self) -> List[str]:
return [x.name for x in self.columns]
@property
def pytypes(self) -> Dict[str, Any]:
""" Return a mapping of the model's field names to Python types.
Example:
>>> model.columns.pytypes
>>> {"id": int, "name": str}
Returns:
Dict[str, Any]
"""
dtypes = {}
for col in self.columns:
dtypes[col.name] = col.type.python_type
return dtypes
@property
def dtypes(self) -> Dict[str, Any]:
""" Return a mapping of the model's field names to SQL column types.
Example:
>>> model.columns.dtypes
>>> {'id': BigInteger(), 'first_name': String(length=100)}
Returns:
Dict[str, Any]
"""
dtypes = {}
for col in self.columns:
dtypes[col.name] = col.type
return dtypes
class PrimaryKeyProxy(ColumnProxy):
""" Proxy object for a data model's primary key attributes """
def dict(self) -> Dict:
return dict(self.sa_obj.columns) # type: ignore
@property
def sa_obj(self) -> PrimaryKeyConstraint: # type: ignore
""" Reference to the underlying sqlalchemy object """
return self.model.__table__.primary_key
@property
def columns(self) -> List[Column]:
return list(self.sa_obj.columns)
@property
async def values(self) -> List[Any]:
values: List
async with db.Session() as session:
async with session.begin():
values = (await session.execute(self.model.select(*self.columns))).all()
return [util.reduce(v) for v in values]
class AggregateProxy(ProxyBase):
""" Proxy object for invoking aggregate queries against a model's underlying data """
def __repr__(self):
return f"AggregateProxy: {self.model.__module__}"
@property
def _pk(self) -> PrimaryKeyProxy:
return self.model.pk
@property
def _c(self) -> ColumnProxy:
return self.model.c
@property
def default_column(self) -> Column:
return self._pk[0] if len(list(self._pk)) > 0 else self._c[0]
def ensure_column(self, column: Union[str, Column] = None) -> Column:
col: Column
if isinstance(column, str):
col = self._c[column]
elif column is None:
col = self.default_column
else:
raise ValueError(
f"No column named '{column}' on {self.model.__name__} model"
)
return col
# TODO: test with and without filter
async def agg(
self,
funcs: Union[Function, List[Function]],
filter: Union[str, TextClause, BinaryExpression] = None,
) -> Dict[str, Union[int, float]]:
func_map: Dict[str, Function] = {f.name: f for f in util.ensure_list(funcs)}
stmt = self.model.select(*func_map.values())
if filter is not None:
if not isinstance(filter, (TextClause, BinaryExpression)):
filter = text(filter)
stmt = stmt.where(filter)
result: db.Row
async with db.Session() as session:
async with session.begin():
result = (await session.execute(stmt)).one()
return dict(zip(func_map, result))
async def count(self, filter: Union[str, TextClause] = None) -> int:
""" Get the model's rowcount """
result = await self.agg(db.func.count(self.default_column), filter=filter)
return util.reduce(result.values())
async def max(
self, column: Union[str, Column] = None, filter: Union[str, TextClause] = None
) -> int:
""" Get the maximum value of the given column. If no column is specified,
the max value of the first primary key column is returned.
"""
func: Function = db.func.max(self.ensure_column(column))
result = await self.agg(func, filter=filter)
return util.reduce(result.values())
async def min(
self, column: Union[str, Column] = None, filter: Union[str, TextClause] = None
) -> int:
""" Get the minimum value of the given column. If no column is specified,
the min value of the first primary key column is returned.
"""
func: Function = db.func.min(self.ensure_column(column))
result = await self.agg(func, filter=filter)
return util.reduce(result.values())
|
from machine import Pin
try:
import usocket as socket
except:
import socket
# Set this with the corresponding pin of your board.
led_gpio = 2
led = Pin(led_gpio, Pin.OUT)
web_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
web_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
web_server.bind(("", 80))
web_server.listen(5)
# Just a simple web interface for reference.
def web_page():
if led.value() == 0:
led_state = "ON"
else:
led_state = "OFF"
html = """
<html>
<head>
<title>ESP Web Server</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<body>
<h1>ESP Web Server</h1>
<p>GPIO state: <strong>{0}</strong></p>
<form>
<p><button name="led" value="on">ON</button></p>
<p><button name="led" value="off">OFF</button></p>
</form>
</body>
</html>
""".format(led_state)
return html
def main():
client, addr = web_server.accept()
request = b""
while "\r\n\r\n" not in request:
request += client.recv(128)
request = str(request)
led_on = request.find("/?led=on")
led_off = request.find("/?led=off")
if led_on == 6:
led.value(0)
print("LED IS ON!")
if led_off == 6:
led.value(1)
print("LED IS OFF!")
client.send("HTTP/1.1 200 OK\r\n")
client.send("Content-Type: text/html\r\n")
client.sendall(web_page())
client.close()
print("Starting web server...")
while True:
main()
|
def threeSum(nums):
res = set()
nums.sort()
for i in range(len(nums)-2):
target = -(nums[i])
left = i + 1
right = len(nums)-1
while left < right:
sum = nums[left] + nums[right]
if sum == target:
res.add((nums[i],nums[left],nums[right]))
left += 1
elif sum < target:
left += 1
else:
right -= 1
print(list(res))
arr = [-1,0,1,2,-1,-4]
threeSum(arr)
# Find a pair of nums whose sum is equal to -ve value of a number in the list so that the sum of all three nums is 0
|
# -*- coding: utf-8 -*-
import glob
import json
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils import np_utils, plot_model
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
import keras_model
# バッチサイズ
BATCH_SIZE = 10
# エポック数
MAX_EPOCH = 10
# 出力数
N_OUT = len(glob.glob('image/*'))
# データセットをロードする関数
def load_dataset():
image_data = np.load("./data/image_data.npy")
label_data = np.load("./data/label_data.npy")
# 学習データとテストデータに分割
train_image, test_image, train_label, test_label = train_test_split(image_data, label_data, train_size=0.8, shuffle=True)
# ラベルをone-hot-label形式に変換
train_label = np_utils.to_categorical(train_label, N_OUT)
test_label = np_utils.to_categorical(test_label, N_OUT)
print("train_data: {0}\ttest_data: {1}".format(len(train_image), len(test_image)))
return train_image, train_label, test_image, test_label
# モデルを構築する関数
def build_model(in_shape):
N_OUT = len(glob.glob('image/*'))
model = keras_model.my_model(in_shape, N_OUT)
return model
# 学習する関数
def train_model(model, x, y):
model.fit(x, y, batch_size=BATCH_SIZE, epochs=MAX_EPOCH, shuffle=True)
# モデルを保存するパス
save_model_path = "./log/model.hdf5"
# モデルを保存
model.save_weights(save_model_path)
return model
# 評価する関数
def evaluate_model(model, x, y):
print("test loss: {:.04f}\ttest accuracy: {:.04f}".format(score[0], score[1]))
if __name__ == '__main__':
# データセットをロード
train_image, train_label, test_image, test_label = load_dataset()
# 入力サイズ
in_shape = train_image.shape[1:]
# モデルを構築
model = build_model(in_shape)
# モデルを可視化
plot_model(model, to_file='./log/model.png')
# 学習
model_train = train_model(model, train_image, train_label)
# 評価
evaluate_model(model_train, test_image, test_label)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tong Zhang <zhangt@frib.msu.edu>
# 2016-10-16 20:20:57 PM EDT
#
from flame import Machine
import numpy as np
import matplotlib.pyplot as plt
lat_fid = open('test.lat', 'r')
m = Machine(lat_fid)
## all BPMs and Correctors (both horizontal and vertical)
bpm_ids, cor_ids = m.find(type='bpm'), m.find(type='orbtrim')
corh_ids = cor_ids[0::2]
corv_ids = cor_ids[1::2]
observe_ids = bpm_ids
## before distortion
s = m.allocState({})
r = m.propagate(s, 0, len(m), observe=range(len(m)))
x, y = np.array([[r[i][1].moment0_env[0] for i in range(len(m))] for j in [0,2]])
pos = np.array([r[i][1].pos for i in range(len(m))])
fig1 = plt.figure(figsize=(10, 8), dpi=120)
ax1 = fig1.add_subplot(111)
linex, = ax1.plot(pos[observe_ids], x[observe_ids], 'r-',
alpha=0.6,
label='$\mathrm{ref\;orbit}$')
linex.set_lw(2)
## apply random kicks
N = 1
#corh_ids_enabled = np.random.choice(corh_ids, size=N)
#corh_val_enabled = 5e-3 * (np.random.random(size=N) * (2 - 1) + 1)
corh_ids_enabled = np.array([392])
corh_val_enabled = np.array([0.005])
for i, v in zip(corh_ids_enabled, corh_val_enabled):
m.reconfigure(i, {'theta_x': v})
"""
for i, v in zip(corh_sel, corh_val):
m.reconfigure(i, {'theta_x': v})
for i, v in zip(corv_sel, corv_val):
m.reconfigure(i, {'theta_y': v})
"""
s_tmp = m.allocState({})
r_tmp = m.propagate(s_tmp, 0, len(m), observe=range(len(m)))
x_tmp, y_tmp = np.array([[r_tmp[i][1].moment0_env[0] for i in range(len(m))] for j in [0,2]])
pos = np.array([r_tmp[i][1].pos for i in range(len(m))])
# data plot
linex_tmp, = ax1.plot(pos[observe_ids], x_tmp[observe_ids], 'b--',
alpha=0.8,
label='$\mathrm{kicked\;orbit}$')
linex_tmp.set_lw(2)
## mark the enabled kickers
corr = ax1.scatter(pos[corh_ids_enabled], x_tmp[corh_ids_enabled],
c='m', alpha=0.8, s=100,
label=r"$\mathrm{Kicker}$")
#plt.show()
## correct orbit
# define objective function to minimize
#def obj_func(cor_val, cor_ids):
# """ Objective function for `minimize`, calculate the distance
# to the ideal trajectory
#
# :param cor_val: corrector strength/values, list/array, [rad]
# :param cor_ids: corrector id numbers, list/array
# :return: sum of the square of the deviation between present
# ideal trajectory
# """
# for i, v in zip(cor_ids, cor_val):
# m.reconfigure(i, {'theta_x': v}) # horizontal only
#
# s_tmp = m.allocState({})
# r_tmp = m.propagate(s_tmp, 0, len(m), observe=range(len(m)))
# x_tmp, y_tmp = np.array([[r_tmp[i][1].moment0_env[j]
# for i in observe_ids]
# for j in [0, 2]])
# #return np.sum((x_tmp - x)**2)
# #return np.sum((x_tmp)**2)
# xsq = x_tmp * x_tmp
# return xsq.mean() * xsq.max()
# #return np.sum(xsq)
#
#print obj_func(corh_ids_enabled, corh_val_enabled)
def obj_func(cor_val, cor_ids):
""" Objective function for `minimize`, calculate the distance
to the ideal trajectory
:param cor_val: corrector strength/values, list/array, [rad]
:param cor_ids: corrector id numbers, list/array
:return: sum of the square of the deviation between present
ideal trajectory
"""
corh_val, corv_val = cor_val[0::2], cor_val[1::2]
corh_ids, corv_ids = cor_ids[0::2], cor_ids[1::2]
for i, v in zip(corh_ids, corh_val):
m.reconfigure(i, {'theta_x': v})
for i, v in zip(corv_ids, corv_val):
m.reconfigure(i, {'theta_y': v})
s_tmp = m.allocState({})
r_tmp = m.propagate(s_tmp, 0, len(m), observe=range(len(m)))
x_tmp, y_tmp = np.array([[r_tmp[i][1].moment0_env[j]
for i in observe_ids]
for j in [0, 2]])
#return np.sum((x_tmp - x)**2)
#return np.sum((x_tmp)**2)
xsq = x_tmp * x_tmp
return np.sum(xsq)
#return xsq.mean() * xsq.max()
# select correctors, H
#NC = 20
#corh_ids_se = np.random.choice(corh_ids, size=NC)
#corh_val_se = 0. * (np.random.random(size=NC) * (2 - 1) + 1)
#corh_ids_se = corh_ids_enabled
#corh_val_se = [0.005]
cor_ids_se = m.find(type='orbtrim')[45:61]
#cor_ids_se = m.find(type='orbtrim')[34:50]
#cor_ids_se = m.find(type='orbtrim')[44:50]
#print cor_ids_se
#import sys
#sys.exit(1)
#corh_ids_se = cor_ids_se[0::2]
#corv_ids_se = cor_ids_se[1::2]
cor_val_se = [1e-4]*len(cor_ids_se)
#corh_val_se = [1e-4] * len(corh_ids_se)
#corv_val_se = [1e-4] * len(corv_ids_se)
from scipy.optimize import minimize
res = minimize(obj_func, cor_val_se, args=(cor_ids_se,),
#method='Nelder-Mead',
method='L-BFGS-B', options={'disp':True}
#method='SLSQP', options={'maxiter':200, 'disp':True}
)
print res.x
cor_val = res.x
# show corrected result
corh_val, corv_val = cor_val[0::2], cor_val[1::2]
corh_ids, corv_ids = cor_ids_se[0::2], cor_ids_se[1::2]
for i, v in zip(corh_ids, corh_val):
m.reconfigure(i, {'theta_x': v})
for i, v in zip(corv_ids, corv_val):
m.reconfigure(i, {'theta_y': v})
s_oc = m.allocState({})
r_oc = m.propagate(s_oc, 0, len(m), observe=range(len(m)))
x_oc, y_oc = np.array([[r_oc[i][1].moment0_env[j]
for i in observe_ids]
for j in [0, 2]])
x_oc_all, y_oc_all = np.array([[r_oc[i][1].moment0_env[j]
for i in range(len(m))]
for j in [0, 2]])
pos_oc = np.array([r_oc[i][1].pos for i in observe_ids])
pos_oc_all = np.array([r_oc[i][1].pos for i in range(len(m))])
linex_oc, = ax1.plot(pos_oc, x_oc, 'g-',
alpha=0.9,
label='$\mathrm{corrected\;orbit}$')
linex_oc.set_lw(2)
# setup ax1
ax1.set_xlim([0,160])
ax1.set_title(r"$\mathrm{kick\;of}\;\theta_x = %.3f\;\mathrm{is\;applied\;at\;corrector\;id:}\;%d$" % (corh_val_enabled[0], corh_ids_enabled[0]), fontsize=18)
ax1.set_xlabel('$z\,\mathrm{[m]}$', fontsize=20)
ax1.set_ylabel('$x_{env}\,\mathrm{[mm]}$', fontsize=20)
ax1.legend(loc=3)
#ax1.text(20, 16,
# r'$\mathrm{Orbit\;is\;corrected\;back\;by\;applying}\;\theta_x=%.4f$' % (corh_val),
# fontdict={'fontsize':18})
corr1 = ax1.scatter(pos_oc_all[cor_ids_se], x_oc_all[cor_ids_se],
c='m', alpha=0.8, s=100,
label=r"$\mathrm{Kicker}$")
np.savetxt('zxy_scipy_3.dat', np.vstack((pos_oc, x_oc, y_oc)).T)
# show
plt.show()
import sys
sys.exit(1)
#corr1 = ax1.scatter(pos[corh_ids_se], x[corh_ids_se],
# c='k', alpha=0.8, s=80)
# show with x-rms
xrms_tmp, yrms_tmp = np.array([[r_tmp[i][1].moment0_rms[j]
for i in range(len(observe_ids))]
for j in [0, 2]])
fig_tmp = plt.figure()
ax_tmp = fig_tmp.add_subplot(111)
linex_tmp, = ax_tmp.plot(pos, x_tmp, 'r', lw=2)
fillx_tmp = ax_tmp.fill_between(pos, x_tmp - xrms_tmp,
x_tmp + xrms_tmp, alpha=0.2, color='b')
plt.show()
|
#!/usr/bin/python3
import io
from bot.bot import Bot
from bot.handler import MessageHandler, NewChatMembersHandler
from bot.types import Format
class Myteam():
def __init__(self, token, chat):
self.token = token
self.bot = Bot(token=self.token, api_url_base='https://api.internal.myteam.mail.ru/bot/v1')
self.chat = chat
def send_report_text(self, message):
self.bot.send_text(chat_id=self.chat, text=message, parse_mode="HTML")
def file_send(self, job_id,):
# file = open(f'out/2cf0a00c-85eb-4839-8198-623935169090_confluence.csv')
with io.StringIO() as file:
file.write(u'x' * 100)
file.name = "123123.txt"
file.seek(0)
response = self.bot.send_file(chat_id=self.chat, file=file.read(), caption=f"Report-{job_id}")
file_id = response.json()['fileId']
def run(self, message):
print("START MYTEAM")
print(message)
self.send_report_text(message)
if __name__ == '__main__':
print("Can't start script")
exit()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
FLAGS.instances_path = ''
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'mac_address': '02:12:34:46:56:67',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'instance_type': 'm1.small'}
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(project=self.project,
user=self.user)
instance_ref = db.instance_create(user_context, instance)
host = self.network.get_network_host(user_context.elevated())
network_ref = db.project_get_network(context.get_admin_context(),
self.project.id)
fixed_ip = {'address': self.test_ip,
'network_id': network_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, self.test_ip,
{'allocated': True,
'instance_id': instance_ref['id']})
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text, 'uml')]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text, 'linux')]),
}
for hypervisor_type in ['qemu', 'kvm', 'xen']:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
check = (lambda t: t.find('./os/kernel').text.split('/')[1],
'kernel.rescue')
check_list.append(check)
check = (lambda t: t.find('./os/initrd').text.split('/')[1],
'ramdisk.rescue')
check_list.append(check)
else:
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel')
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk')
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find(
'./devices/interface/filterref/parameter').get('name'), 'IP'),
(lambda t: t.find(
'./devices/interface/filterref/parameter').get(
'value'), '10.11.12.13'),
(lambda t: t.findall(
'./devices/interface/filterref/parameter')[1].get(
'name'), 'DHCPSERVER'),
(lambda t: t.findall(
'./devices/interface/filterref/parameter')[1].get(
'value'), '10.0.0.1'),
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[1].get('file').split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, expected_uri)
xml = conn.to_xml(instance_ref, rescue)
tree = xml_to_tree(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s failed check %d' % (xml, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
# This test is supposed to make sure we don't override a specifically
# set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
def tearDown(self):
super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.fw = libvirt_conn.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
in_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def test_static_filters(self):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'mac_address': '56:12:12:12:12:12'})
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
'fake')
fixed_ip = {'address': ip,
'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.instance_add_security_group(admin_ctxt, instance_ref['id'],
secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(cmd, process_input=None):
if cmd == 'sudo ip6tables-save -t filter':
return '\n'.join(self.in6_rules), None
if cmd == 'sudo iptables-save -t filter':
return '\n'.join(self.in_rules), None
if cmd == 'sudo iptables-restore':
self.out_rules = process_input.split('\n')
return '', ''
if cmd == 'sudo ip6tables-restore':
self.out6_rules = process_input.split('\n')
return '', ''
self.fw.execute = fake_iptables_execute
self.fw.prepare_instance_filter(instance_ref)
self.fw.apply_instance_filter(instance_ref)
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-d 10.11.12.13 -j' in rule:
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
security_group_chain in self.out_rules,
"ICMP acceptance rule wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
'8 -j ACCEPT' % security_group_chain in self.out_rules,
"ICMP Echo Request acceptance rule wasn't added")
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
'--dports 80:81 -j ACCEPT' % security_group_chain \
in self.out_rules,
"TCP port 80/81 acceptance rule wasn't added")
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext(self.user, self.project)
self.fake_libvirt_connection = Mock()
self.fw = libvirt_conn.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
xml = self.fw.security_group_to_nwfilter_xml(security_group.id)
dom = xml_to_dom(xml)
self.assertEqual(dom.firstChild.tagName, 'filter')
rules = dom.getElementsByTagName('rule')
self.assertEqual(len(rules), 1)
# It's supposed to allow inbound traffic.
self.assertEqual(rules[0].getAttribute('action'), 'accept')
self.assertEqual(rules[0].getAttribute('direction'), 'in')
# Must be lower priority than the base filter (which blocks everything)
self.assertTrue(int(rules[0].getAttribute('priority')) < 1000)
ip_conditions = rules[0].getElementsByTagName('tcp')
self.assertEqual(len(ip_conditions), 1)
self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0')
self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = xml_to_dom(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake'})
inst_id = instance_ref['id']
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
'fake')
fixed_ip = {'address': ip,
'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
def _ensure_all_called():
instance_filter = 'nova-instance-%s' % instance_ref['name']
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_id,
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
|
from model.group import Group
__author__ = "Grzegorz Holak"
testdata = [
Group(name="nazwa1", footer="stopka1", header="naglowek1"),
Group(name="nazwa2", header="naglowek2", footer="stopka2")
]
|
""" Calculate properties for BpForms modeled in Bouhaddou et al., PLoS Comput Biol, 2018.
This example uses both the Python API and the JSON REST API
Bouhaddou M, Barrette AM, Stern AD, Koch RJ, DiStefano MS, Riesel EA, Santos LC, Tan AL, Mertz AE & Birtwistle MR.
A mechanistic pan-cancer pathway model informed by multi-omics data interprets stochastic cell fate responses to drugs and mitogens.
PLoS Comput Biol 2018, 14(3): e1005985. doi: `10.1371/journal.pcbi.1005985 <http://dx.plos.org/10.1371/journal.pcbi.1005985>`_.
:Author: Jonathan Karr <karr@mssm.edu>
:Author: Marc Birtwistle <mbirtwi@clemson.edu>
:Author: Cemal Erdem <cemale@clemson.edu>
:Date: 2019-06-18
:Copyright: 2019, Karr Lab
:License: MIT
"""
from Bio import SeqIO
import bpforms
import csv
import os.path
import requests
IN_FILENAME = os.path.join('examples', 'bouhaddou_et_al_plos_comput_biol_2018.fasta')
OUT_FILENAME = os.path.join('examples', 'bouhaddou_et_al_plos_comput_biol_2018.tsv')
ENDPOINT = 'https://www.bpforms.org'
def run():
# read BpForms from FASTA file
seqs = []
with open(IN_FILENAME, "r") as file:
for record in SeqIO.parse(file, "fasta"):
seqs.append({'id': record.id, 'seq': str(record.seq)})
seq_props = calc_bpforms_props_with_python_api(seqs)
seq_props = calc_bpforms_props_with_rest_api(seqs)
# save computed properties to .tsv file
with open(OUT_FILENAME, 'w') as file:
writer = csv.DictWriter(file, fieldnames=['Species', 'Formula', 'Molecular weight', 'Charge', 'Length'], dialect='excel-tab')
writer.writeheader()
for seq_prop in seq_props:
writer.writerow(seq_prop)
def calc_bpforms_props_with_python_api(seqs):
# calculate properties
seq_props = []
for seq in seqs:
form = bpforms.ProteinForm().from_str(seq['seq'])
seq_props.append({
'Species': seq['id'],
'Formula': form.get_formula(),
'Molecular weight': form.get_mol_wt(),
'Charge': form.get_charge(),
'Length': len(form.seq),
})
return seq_props
def calc_bpforms_props_with_rest_api(seqs):
seq_props = []
for seq in seqs:
data = {
"alphabet": "protein",
"seq": seq['seq'],
"circular": False,
"major_tautomer": False,
"dearomatize": False,
}
response = requests.post(ENDPOINT + '/api/bpform/', json=data)
response.raise_for_status()
props = response.json()
seq_props.append({
'Species': seq['id'],
'Formula': props['formula'],
'Molecular weight': props['mol_wt'],
'Charge': props['charge'],
'Length': props['length'],
})
return seq_props
|
from spanet.network.jet_reconstruction import JetReconstructionModel
from spanet.dataset import JetReconstructionDataset
from spanet.options import Options
|
import os, requests, logging
def send_metric(measurement, tags, values, num_retries=0, must_succeed=False):
url = os.environ.get("DPP_INFLUXDB_URL")
db = os.environ.get("DPP_INFLUXDB_DB")
if tags and url and db:
line_tags = ",".join(["{}={}".format(k, v) for k, v in tags.items()])
line_values = ",".join(["{}={}".format(k, v) for k, v in values.items()])
old_logging_level = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
res = requests.post("{url}/write?db={db}".format(url=url, db=db),
'{measurement},{tags} {values}'.format(measurement=measurement,
tags=line_tags, values=line_values))
logging.getLogger().setLevel(old_logging_level)
if res.status_code == 404 and res.json()["error"].startswith("database not found:"):
if num_retries > 0:
raise Exception("Failed to create InfluxDB database")
logging.getLogger().setLevel(logging.ERROR)
res = requests.post("{url}/query".format(url=url), {"q": "CREATE DATABASE {db}".format(db=db)})
logging.getLogger().setLevel(old_logging_level)
res.raise_for_status()
return send_metric(measurement, tags, values, num_retries+1)
elif res.status_code == 200:
return True
else:
res.raise_for_status()
elif must_succeed:
raise Exception("missing required environment variables")
def send_metric_parameters(measurement, tags, values, parameters):
metric_tags = parameters.get("metric-tags", {})
if len(metric_tags) > 0:
tags.update(metric_tags)
return send_metric(measurement, tags, values)
|
import modulo_restaurante as mr
minha_cozinha = mr.Restaurante(
nome_restaurante='pão com ovo',
tipo_cozinha='normal'
)
minha_cozinha.descrição_restaurante()
minha_cozinha.restaurante_aberto()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements the interface to Denon AVR receivers.
:copyright: (c) 2016 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import logging
import time
from typing import Callable, Dict, List, Optional
import attr
import httpx
from .decorators import run_async_synchronously
from .foundation import DenonAVRFoundation, set_api_host, set_api_timeout
from .const import (
DENON_ATTR_SETATTR, MAIN_ZONE, VALID_ZONES)
from .exceptions import AvrCommandError
from .audyssey import DenonAVRAudyssey, audyssey_factory
from .input import DenonAVRInput, input_factory
from .soundmode import DenonAVRSoundMode, sound_mode_factory
from .tonecontrol import DenonAVRToneControl, tone_control_factory
from .volume import DenonAVRVolume, volume_factory
_LOGGER = logging.getLogger(__name__)
@attr.s(auto_attribs=True, on_setattr=DENON_ATTR_SETATTR)
class DenonAVR(DenonAVRFoundation):
"""
Representing a Denon AVR Device.
Initialize MainZone of DenonAVR.
:param host: IP or HOSTNAME.
:type host: str
:param name: Device name, if None FriendlyName of device is used.
:type name: str or None
:param show_all_inputs: If True deleted input functions are also shown
:type show_all_inputs: bool
:param timeout: Timeout when calling device APIs.
:type timeout: float
:param add_zones: Additional Zones for which an instance are created
:type add_zones: dict [str, str] or None
"""
_host: str = attr.ib(
converter=str, on_setattr=[*DENON_ATTR_SETATTR, set_api_host])
_name: Optional[str] = attr.ib(
converter=attr.converters.optional(str), default=None)
_show_all_inputs: bool = attr.ib(converter=bool, default=False)
_add_zones: Optional[Dict[str, str]] = attr.ib(
validator=attr.validators.optional(attr.validators.deep_mapping(
attr.validators.in_(VALID_ZONES),
attr.validators.optional(attr.validators.instance_of(str)),
attr.validators.instance_of(dict))),
default=None)
_timeout: float = attr.ib(
converter=float,
on_setattr=[*DENON_ATTR_SETATTR, set_api_timeout],
default=2.0)
_zones: Dict[str, DenonAVRFoundation] = attr.ib(
validator=attr.validators.deep_mapping(
attr.validators.in_(VALID_ZONES),
attr.validators.instance_of(DenonAVRFoundation),
attr.validators.instance_of(dict)),
default=attr.Factory(dict),
init=False)
audyssey: DenonAVRAudyssey = attr.ib(
validator=attr.validators.instance_of(DenonAVRAudyssey),
default=attr.Factory(audyssey_factory, takes_self=True),
init=False)
input: DenonAVRInput = attr.ib(
validator=attr.validators.instance_of(DenonAVRInput),
default=attr.Factory(input_factory, takes_self=True),
init=False)
soundmode: DenonAVRSoundMode = attr.ib(
validator=attr.validators.instance_of(DenonAVRSoundMode),
default=attr.Factory(sound_mode_factory, takes_self=True),
init=False)
tonecontrol: DenonAVRToneControl = attr.ib(
validator=attr.validators.instance_of(DenonAVRToneControl),
default=attr.Factory(tone_control_factory, takes_self=True),
init=False)
vol: DenonAVRVolume = attr.ib(
validator=attr.validators.instance_of(DenonAVRVolume),
default=attr.Factory(volume_factory, takes_self=True),
init=False)
def __attrs_post_init__(self) -> None:
"""Initialize special attributes."""
# Set host and timeout again to start its custom setattr function
self._host = self._host
self._timeout = self._timeout
# Add own instance to zone dictionary
self._zones[self._device.zone] = self
# Create instances of additional zones if requested
if self._device.zone == MAIN_ZONE and self._add_zones is not None:
self.create_zones(self._add_zones)
def create_zones(self, add_zones):
"""Create instances of additional zones for the receiver."""
for zone, zname in add_zones.items():
# Name either set explicitly or name of Main Zone with suffix
zonename = None
if zname is None and self._name is not None:
zonename = "{} {}".format(self._name, zone)
zone_device = attr.evolve(self._device, zone=zone)
zone_inst = DenonAVR(
host=self._host,
device=zone_device,
name=zonename,
timeout=self._timeout,
show_all_inputs=self._show_all_inputs)
self._zones[zone] = zone_inst
async def async_setup(self) -> None:
"""Ensure that configuration is loaded from receiver asynchronously."""
# Device setup
await self._device.async_setup()
if self._name is None:
self._name = self._device.friendly_name
# Setup other functions
self.input.setup()
await self.soundmode.async_setup()
self.tonecontrol.setup()
self.vol.setup()
self.audyssey.setup()
self._is_setup = True
@run_async_synchronously(async_func=async_setup)
def setup(self) -> None:
"""Ensure that configuration is loaded from receiver."""
async def async_update(self):
"""
Get the latest status information from device asynchronously.
Method executes the update method for the current receiver type.
"""
# Ensure that the device is setup
if self._is_setup is False:
await self.async_setup()
# Create a cache id for this global update
cache_id = time.time()
# Verify update method
await self._device.async_verify_avr_2016_update_method(
cache_id=cache_id)
# Update device
await self._device.async_update(global_update=True, cache_id=cache_id)
# Update other functions
await self.input.async_update(global_update=True, cache_id=cache_id)
await self.soundmode.async_update(
global_update=True, cache_id=cache_id)
await self.tonecontrol.async_update(
global_update=True, cache_id=cache_id)
await self.vol.async_update(global_update=True, cache_id=cache_id)
# AppCommand0300.xml interface is very slow, thus it is not included
# into main update
# await self.audyssey.async_update(
# global_update=True, cache_id=cache_id)
@run_async_synchronously(async_func=async_update)
def update(self):
"""
Get the latest status information from device.
Method executes the update method for the current receiver type.
"""
async def async_update_tonecontrol(self):
"""Get Tonecontrol settings."""
await self.tonecontrol.async_update()
@run_async_synchronously(async_func=async_update_tonecontrol)
def update_tonecontrol(self):
"""Get Tonecontrol settings."""
async def async_update_audyssey(self):
"""Get Audyssey settings."""
await self.audyssey.async_update()
@run_async_synchronously(async_func=async_update_audyssey)
def update_audyssey(self):
"""Get Audyssey settings."""
async def async_get_command(self, request: str) -> str:
"""Send HTTP GET command to Denon AVR receiver asynchronously."""
return await self._device.api.async_get_command(request)
@run_async_synchronously(async_func=async_get_command)
def get_command(self, request: str) -> str:
"""Send HTTP GET command to Denon AVR receiver."""
@run_async_synchronously(async_func=async_get_command)
def send_get_command(self, request: str) -> str:
"""Send HTTP GET command to Denon AVR receiver...for compatibility."""
##############
# Properties #
##############
@property
def zone(self) -> str:
"""Return Zone of this instance."""
return self._device.zone
@property
def zones(self) -> Dict[str, DenonAVRFoundation]:
"""Return all Zone instances of the device."""
zones = self._zones.copy()
return zones
@property
def name(self) -> Optional[str]:
"""Return the name of the device as string."""
return self._name
@property
def host(self) -> str:
"""Return the host of the device as string."""
return self._host
@property
def manufacturer(self) -> Optional[str]:
"""Return the manufacturer of the device as string."""
return self._device.manufacturer
@property
def model_name(self) -> Optional[str]:
"""Return the model name of the device as string."""
return self._device.model_name
@property
def serial_number(self) -> Optional[str]:
"""Return the serial number of the device as string."""
return self._device.serial_number
@property
def power(self) -> Optional[str]:
"""
Return the power state of the device.
Possible values are: "ON", "STANDBY" and "OFF"
"""
return self._device.power
@property
def state(self) -> Optional[str]:
"""
Return the state of the device.
Possible values are: "on", "off", "playing", "paused"
"playing" and "paused" are only available for input functions
in PLAYING_SOURCES.
"""
return self.input.state
@property
def muted(self) -> bool:
"""
Boolean if volume is currently muted.
Return "True" if muted and "False" if not muted.
"""
return self.vol.muted
@property
def volume(self) -> float:
"""
Return volume of Denon AVR as float.
Volume is send in a format like -50.0.
Minimum is -80.0, maximum at 18.0
"""
return self.vol.volume
@property
def input_func(self) -> Optional[str]:
"""Return the current input source as string."""
return self.input.input_func
@property
def input_func_list(self) -> List[str]:
"""Return a list of available input sources as string."""
return self.input.input_func_list
@property
def support_sound_mode(self) -> Optional[bool]:
"""Return True if sound mode supported."""
return self.soundmode.support_sound_mode
@property
def sound_mode(self) -> Optional[str]:
"""Return the matched current sound mode as a string."""
return self.soundmode.sound_mode
@property
def sound_mode_list(self) -> List[str]:
"""Return a list of available sound modes as string."""
return self.soundmode.sound_mode_list
@property
def sound_mode_map(self) -> Dict[str, str]: # returns an OrderedDict
"""Return a dict of available sound modes with their mapping values."""
return self.soundmode.sound_mode_map
@property
def sound_mode_map_rev(self) -> Dict[str, str]:
"""Return a dict to map each sound_mode_raw to matching sound_mode."""
return self.soundmode.sound_mode_map_rev
@property
def sound_mode_raw(self) -> Optional[str]:
"""Return the current sound mode as string as received from the AVR."""
return self.soundmode.sound_mode_raw
@property
def image_url(self) -> Optional[str]:
"""Return image URL of current playing media when powered on."""
return self.input.image_url
@property
def title(self) -> Optional[str]:
"""Return title of current playing media as string."""
return self.input.title
@property
def artist(self) -> Optional[str]:
"""Return artist of current playing media as string."""
return self.input.artist
@property
def album(self) -> Optional[str]:
"""Return album name of current playing media as string."""
return self.input.album
@property
def band(self) -> Optional[str]:
"""Return band of current radio station as string."""
return self.input.band
@property
def frequency(self) -> Optional[str]:
"""Return frequency of current radio station as string."""
return self.input.frequency
@property
def station(self) -> Optional[str]:
"""Return current radio station as string."""
return self.input.station
@property
def netaudio_func_list(self) -> List[str]:
"""Return list of network audio devices.
Those devices should react to play, pause, next and previous
track commands.
"""
return self.input.netaudio_func_list
@property
def playing_func_list(self) -> List[str]:
"""Return list of playing devices.
Those devices offer additional information about what they are playing
(e.g. title, artist, album, band, frequency, station, image_url).
"""
return self.input.playing_func_list
@property
def receiver_port(self) -> int:
"""Return the receiver's port."""
if self._device.receiver is None:
return None
return self._device.receiver.port
@property
def receiver_type(self) -> Optional[str]:
"""Return the receiver's type."""
if self._device.receiver is None:
return None
return self._device.receiver.type
@property
def show_all_inputs(self) -> Optional[bool]:
"""Indicate if all inputs are shown or just active one."""
return self._show_all_inputs
@property
def bass(self) -> Optional[int]:
"""Return value of bass."""
return self.tonecontrol.bass
@property
def bass_level(self) -> Optional[str]:
"""Return level of bass."""
return self.tonecontrol.bass_level
@property
def treble(self) -> Optional[int]:
"""Return value of treble."""
return self.tonecontrol.treble
@property
def treble_level(self) -> Optional[str]:
"""Return level of treble."""
return self.tonecontrol.treble_level
@property
def dynamic_eq(self) -> Optional[bool]:
"""Return value of Dynamic EQ."""
return self.audyssey.dynamic_eq
@property
def reference_level_offset(self) -> Optional[str]:
"""Return value of Reference Level Offset."""
return self.audyssey.reference_level_offset
@property
def reference_level_offset_setting_list(self) -> List[str]:
"""Return a list of available reference level offset settings."""
return self.audyssey.reference_level_offset_setting_list
@property
def dynamic_volume(self) -> Optional[str]:
"""Return value of Dynamic Volume."""
return self.audyssey.dynamic_volume
@property
def dynamic_volume_setting_list(self) -> List[str]:
"""Return a list of available Dynamic Volume settings."""
return self.audyssey.dynamic_volume_setting_list
@property
def multi_eq(self) -> Optional[str]:
"""Return value of MultiEQ."""
return self.audyssey.multi_eq
@property
def multi_eq_setting_list(self) -> List[str]:
"""Return a list of available MultiEQ settings."""
return self.audyssey.multi_eq_setting_list
async def async_dynamic_eq_off(self) -> None:
"""Turn DynamicEQ off."""
await self.audyssey.async_dynamiceq_off()
##########
# Setter #
##########
def set_async_client_getter(
self,
async_client_getter: Callable[[], httpx.AsyncClient]) -> None:
"""
Set a custom httpx.AsyncClient getter for this instance.
The function provided must return an instance of httpx.AsyncClient.
This is a non-blocking method.
"""
if not callable(async_client_getter):
raise AvrCommandError("Provided object is not callable")
self._device.api.async_client_getter = async_client_getter
@run_async_synchronously(async_func=async_dynamic_eq_off)
def dynamic_eq_off(self) -> None:
"""Turn DynamicEQ off."""
async def async_dynamic_eq_on(self) -> None:
"""Turn DynamicEQ on."""
await self.audyssey.async_dynamiceq_on()
@run_async_synchronously(async_func=async_dynamic_eq_on)
def dynamic_eq_on(self) -> None:
"""Turn DynamicEQ on."""
async def async_toggle_dynamic_eq(self) -> None:
"""Toggle DynamicEQ."""
await self.audyssey.async_toggle_dynamic_eq()
@run_async_synchronously(async_func=async_toggle_dynamic_eq)
def toggle_dynamic_eq(self) -> None:
"""Toggle DynamicEQ."""
async def async_set_input_func(self, input_func: str) -> None:
"""
Set input_func of device.
Valid values depend on the device and should be taken from
"input_func_list".
"""
await self.input.async_set_input_func(input_func)
@run_async_synchronously(async_func=async_set_input_func)
def set_input_func(self, input_func: str) -> None:
"""
Set input_func of device.
Valid values depend on the device and should be taken from
"input_func_list".
"""
async def async_set_sound_mode(self, sound_mode: str) -> None:
"""
Set sound_mode of device.
Valid values depend on the device and should be taken from
"sound_mode_list".
"""
await self.soundmode.async_set_sound_mode(sound_mode)
@run_async_synchronously(async_func=async_set_sound_mode)
def set_sound_mode(self, sound_mode: str) -> None:
"""
Set sound_mode of device.
Valid values depend on the device and should be taken from
"sound_mode_list".
"""
async def async_toggle_play_pause(self) -> None:
"""Toggle play pause media player."""
await self.input.async_toggle_play_pause()
@run_async_synchronously(async_func=async_toggle_play_pause)
def toggle_play_pause(self) -> None:
"""Toggle play pause media player."""
async def async_play(self) -> None:
"""Send play command to receiver command via HTTP post."""
await self.input.async_play()
@run_async_synchronously(async_func=async_play)
def play(self) -> None:
"""Send play command to receiver command via HTTP post."""
async def async_pause(self) -> None:
"""Send pause command to receiver command via HTTP post."""
await self.input.async_pause()
@run_async_synchronously(async_func=async_pause)
def pause(self) -> None:
"""Send pause command to receiver command via HTTP post."""
async def async_previous_track(self) -> None:
"""Send previous track command to receiver command via HTTP post."""
await self.input.async_previous_track()
@run_async_synchronously(async_func=async_previous_track)
def previous_track(self) -> None:
"""Send previous track command to receiver command via HTTP post."""
async def async_next_track(self) -> None:
"""Send next track command to receiver command via HTTP post."""
await self.input.async_next_track()
@run_async_synchronously(async_func=async_next_track)
def next_track(self) -> None:
"""Send next track command to receiver command via HTTP post."""
async def async_power_on(self) -> None:
"""Turn on receiver via HTTP get command."""
await self._device.async_power_on()
@run_async_synchronously(async_func=async_power_on)
def power_on(self) -> None:
"""Turn on receiver via HTTP get command."""
async def async_power_off(self) -> None:
"""Turn off receiver via HTTP get command."""
await self._device.async_power_off()
@run_async_synchronously(async_func=async_power_off)
def power_off(self) -> None:
"""Turn off receiver via HTTP get command."""
async def async_volume_up(self) -> None:
"""Volume up receiver via HTTP get command."""
await self.vol.async_volume_up()
@run_async_synchronously(async_func=async_volume_up)
def volume_up(self) -> None:
"""Volume up receiver via HTTP get command."""
async def async_volume_down(self) -> None:
"""Volume down receiver via HTTP get command."""
await self.vol.async_volume_down()
@run_async_synchronously(async_func=async_volume_down)
def volume_down(self) -> None:
"""Volume down receiver via HTTP get command."""
async def async_set_volume(self, volume: float) -> None:
"""
Set receiver volume via HTTP get command.
Volume is send in a format like -50.0.
Minimum is -80.0, maximum at 18.0
"""
await self.vol.async_set_volume(volume)
@run_async_synchronously(async_func=async_set_volume)
def set_volume(self, volume: float) -> None:
"""
Set receiver volume via HTTP get command.
Volume is send in a format like -50.0.
Minimum is -80.0, maximum at 18.0
"""
async def async_mute(self, mute: bool) -> None:
"""Mute receiver via HTTP get command."""
await self.vol.async_mute(mute)
@run_async_synchronously(async_func=async_mute)
def mute(self, mute: bool) -> None:
"""Mute receiver via HTTP get command."""
async def async_enable_tone_control(self) -> None:
"""Enable tone control to change settings like bass or treble."""
await self.tonecontrol.async_enable_tone_control()
@run_async_synchronously(async_func=async_enable_tone_control)
def enable_tone_control(self) -> None:
"""Enable tone control to change settings like bass or treble."""
async def async_disable_tone_control(self) -> None:
"""Disable tone control to change settings like bass or treble."""
await self.tonecontrol.async_disable_tone_control()
@run_async_synchronously(async_func=async_disable_tone_control)
def disable_tone_control(self) -> None:
"""Disable tone control to change settings like bass or treble."""
async def async_set_bass(self, value: int) -> None:
"""
Set receiver bass.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
await self.tonecontrol.async_set_bass(value)
@run_async_synchronously(async_func=async_set_bass)
def set_bass(self, value: int) -> None:
"""
Set receiver bass.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
async def async_bass_up(self) -> None:
"""
Increase level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
await self.tonecontrol.async_bass_up()
@run_async_synchronously(async_func=async_bass_up)
def bass_up(self) -> None:
"""
Increase level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
async def async_bass_down(self) -> None:
"""
Decrease level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
await self.tonecontrol.async_bass_down()
@run_async_synchronously(async_func=async_bass_down)
def bass_down(self) -> None:
"""
Decrease level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
async def async_set_treble(self, value: int) -> None:
"""
Set receiver treble.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
await self.tonecontrol.async_set_treble(value)
@run_async_synchronously(async_func=async_set_treble)
def set_treble(self, value: int) -> None:
"""
Set receiver treble.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
async def async_treble_up(self) -> None:
"""
Increase level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
await self.tonecontrol.async_treble_up()
@run_async_synchronously(async_func=async_treble_up)
def treble_up(self) -> None:
"""
Increase level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
async def async_treble_down(self) -> None:
"""
Decrease level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
await self.tonecontrol.async_treble_down()
@run_async_synchronously(async_func=async_treble_down)
def treble_down(self) -> None:
"""
Decrease level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active
"""
|
# coding:utf-8
###############################
# python代码加密与License控制例子
# 这是需要License控制的脚本
###############################
import socket, fcntl, datetime, os, struct
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
import time
class Get_License(object):
def __init__(self):
super(Get_License, self).__init__()
# 定义秘钥信息
self.seperateKey = "d#~0^38J:"
self.aesKey = "123456789abcdefg"
self.aesIv = "abcdefg123456789"
self.aesMode = AES.MODE_CBC
def getHwAddr(self, ifname):
"""
获取主机物理地址
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x' % ord(char) for char in info[18:24]])
def decrypt(self, text):
"""
从.lic中解密出主机地址
"""
try:
cryptor = AES.new(self.aesKey, self.aesMode, self.aesIv)
plain_text = cryptor.decrypt(a2b_hex(text))
return plain_text.rstrip('\0')
except:
return ""
def getLicenseInfo(self, filePath = None):
if filePath == None:
filePath = "./license.lic"
if not os.path.isfile(filePath):
print("请将 license.lic 文件放在当前路径下")
os._exit(0)
return False, 'Invalid'
encryptText = ""
with open(filePath, "r") as licFile:
encryptText = licFile.read()
licFile.close()
try:
hostInfo = self.getHwAddr('eth0')
except IOError:
hostInfo = self.getHwAddr('eno1')
decryptText = self.decrypt(encryptText)
pos = decryptText.find(self.seperateKey)
if -1 == pos:
return False, "Invalid"
licHostInfo = self.decrypt(decryptText[0:pos])
licenseStr = decryptText[pos + len(self.seperateKey):]
if licHostInfo == hostInfo:
return True, licenseStr
else:
return False, 'Invalid'
# 以下是Liceshi控制的一种实施例子,仅供参考
License = Get_License()
condition, LicInfo = License.getLicenseInfo()
class Today():
def get_time(self):
if condition==True and LicInfo=='Valid':
print(datetime.datetime.now())
else:
print('未权授!')
def say(self):
if condition==True and LicInfo=='Valid':
print('hello world!')
localtime = time.asctime( time.localtime(time.time()) )
print("The local time is now:", localtime)
else:
print('未权授!')
|
import random
import math
import numpy as np
from GraphEngine import GraphicEngine
class FourierTransform:
def __init__(self, input_signal_vector):
self.number_of_data = len(input_signal_vector)
self.number_of_transform = int(self.number_of_data / 2)
self.input_signal_vector = input_signal_vector
self.component_real = np.zeros((self.number_of_transform, 1), dtype=float)
self.component_imaginary = np.zeros((self.number_of_transform, 1), dtype=float)
self.component_phases = np.zeros((self.number_of_transform, 1), dtype=float)
self.component_magnitude = np.zeros((self.number_of_transform, 1), dtype=float)
self.synthesis_signal = np.zeros((self.number_of_data, 1), dtype=float)
self.graph_engine = GraphicEngine()
def fourier_transformation(self):
for k in range(self.number_of_transform):
self.component_real[k] = sum([self.input_signal_vector[i] * math.cos(2 * math.pi * k * i / self.number_of_data) for i in range(self.number_of_data)])
self.component_imaginary[k] = sum([-self.input_signal_vector[i] * math.sin(2 * math.pi * k * i / self.number_of_data) for i in range(self.number_of_data)])
def fourier_calculate_phase_and_mag(self):
# self.component_phases = math.atan(self.component_imaginary/self.component_real)
self.component_magnitude = [math.sqrt(self.component_real[i] ** 2 + self.component_imaginary[i] ** 2) for i in range(self.number_of_transform)]
def fourier_synthesis(self):
for i in range(self.number_of_data):
self.synthesis_signal[i] = sum(
[self.component_real[k] / self.number_of_data * math.cos(2 * math.pi * k * i / self.number_of_data) - self.component_imaginary[k] / self.number_of_data * math.sin(
2 * math.pi * k * i / self.number_of_data) for k in
range(self.number_of_transform)])
def plot_signal(self):
self.graph_engine.plot_signal([self.input_signal_vector, self.component_real, self.component_imaginary, self.synthesisize_signal], [-1, 1])
if __name__ == "__main__":
input_vector_signal = np.zeros((100, 1), dtype=float)
for i in range(100):
input_vector_signal[i] = random.randrange(-1, 1)
fourier_transform = FourierTransform(input_vector_signal)
fourier_transform.fourier_transformation()
fourier_transform.fourier_calculate_phase_and_mag()
fourier_transform.fourier_synthesis()
fourier_transform.plot_signal()
|
# Generic imports
from pathlib import Path
from math import log2, ceil
import numpy as np
# FPGA-specific imports
from msdsl import MixedSignalModel, VerilogGenerator, sum_op, clamp_op, to_uint, to_sint
from msdsl.expr.expr import array, concatenate
from msdsl.expr.extras import if_
from msdsl.expr.format import SIntFormat
from msdsl.function import PlaceholderFunction
# DragonPHY imports
from dragonphy import (Filter, get_file, get_dragonphy_real_type,
add_placeholder_inputs)
class AnalogSlice:
def __init__(self, filename=None, **system_values):
# set a fixed random seed for repeatability
np.random.seed(0)
module_name = Path(filename).stem
build_dir = Path(filename).parent
#This is a wonky way of validating this.. :(
assert (all([req_val in system_values for req_val in self.required_values()])), \
f'Cannot build {module_name}, Missing parameter in config file'
m = MixedSignalModel(module_name, dt=system_values['dt'], build_dir=build_dir,
real_type=get_dragonphy_real_type())
# Random number generator seeds (defaults generated with random.org)
m.add_digital_input('jitter_seed', width=32)
m.add_digital_input('noise_seed', width=32)
# Chunk of bits from the history; corresponding delay is bit_idx/freq_tx delay
m.add_digital_input('chunk', width=system_values['chunk_width'])
m.add_digital_input('chunk_idx', width=int(ceil(log2(system_values['num_chunks']))))
# Control code for the corresponding PI slice
m.add_digital_input('pi_ctl', width=system_values['pi_ctl_width'])
# Indicates sequencing of ADC slice within a bank (typically a static value)
m.add_digital_input('slice_offset', width=int(ceil(log2(system_values['slices_per_bank']))))
# Control codes that affect states in the slice
m.add_digital_input('sample_ctl')
m.add_digital_input('incr_sum')
m.add_digital_input('write_output')
# ADC sign and magnitude
m.add_digital_output('out_sgn')
m.add_digital_output('out_mag', width=system_values['n_adc'])
# Emulator clock and reset
m.add_digital_input('clk')
m.add_digital_input('rst')
# Noise controls
m.add_analog_input('jitter_rms')
m.add_analog_input('noise_rms')
# Create "placeholder function" that can be updated
# at runtime with the channel function
chan_func = PlaceholderFunction(
domain=system_values['func_domain'],
order=system_values['func_order'],
numel=system_values['func_numel'],
coeff_widths=system_values['func_widths'],
coeff_exps=system_values['func_exps']
)
# Check the function on a representative test case
chan = Filter.from_file(get_file('build/chip_src/adapt_fir/chan.npy'))
self.check_func_error(chan_func, chan.interp)
# Add digital inputs that will be used to reconfigure the function at runtime
wdata, waddr, we = add_placeholder_inputs(m=m, f=chan_func)
# Sample the pi_ctl code
m.add_digital_state('pi_ctl_sample', width=system_values['pi_ctl_width'])
m.set_next_cycle(m.pi_ctl_sample, m.pi_ctl, clk=m.clk, rst=m.rst, ce=m.sample_ctl)
# compute weights to apply to pulse responses
weights = []
for k in range(system_values['chunk_width']):
# create a weight value for this bit
weights.append(
m.add_analog_state(
f'weights_{k}',
range_=system_values['vref_tx']
)
)
# select a single bit from the chunk. chunk_width=1 is unfortunately
# a special case because some simulators don't support the bit-selection
# syntax on a single-bit variable
chunk_bit = m.chunk[k] if system_values['chunk_width'] > 1 else m.chunk
# write the weight value
m.set_next_cycle(
weights[-1],
if_(
chunk_bit,
system_values['vref_tx'],
-system_values['vref_tx']
),
clk=m.clk,
rst=m.rst
)
# Compute the delay due to the PI control code
delay_amt_pre = m.bind_name(
'delay_amt_pre',
m.pi_ctl_sample / ((2.0**system_values['pi_ctl_width'])*system_values['freq_rx'])
)
# Add jitter to the sampling time
if system_values['use_jitter']:
# create a signal to represent jitter
delay_amt_jitter = m.set_gaussian_noise(
'delay_amt_jitter',
std=m.jitter_rms,
lfsr_init=m.jitter_seed,
clk=m.clk,
ce=m.sample_ctl,
rst=m.rst
)
# add jitter to the delay amount (which might possibly yield a negative value)
delay_amt_noisy = m.bind_name('delay_amt_noisy', delay_amt_pre + delay_amt_jitter)
# make the delay amount non-negative
delay_amt = m.bind_name('delay_amt', if_(delay_amt_noisy >= 0.0, delay_amt_noisy, 0.0))
else:
delay_amt = delay_amt_pre
# Compute the delay due to the slice offset
t_slice_offset = m.bind_name('t_slice_offset', m.slice_offset/system_values['freq_rx'])
# Add the delay amount to the slice offset
t_samp_new = m.bind_name('t_samp_new', t_slice_offset + delay_amt)
# Determine if the new sampling time happens after the end of this period
t_one_period = m.bind_name('t_one_period', system_values['slices_per_bank']/system_values['freq_rx'])
exceeds_period = m.bind_name('exceeds_period', t_samp_new >= t_one_period)
# Save the previous sample time
t_samp_prev = m.add_analog_state('t_samp_prev', range_=system_values['slices_per_bank']/system_values['freq_rx'])
m.set_next_cycle(t_samp_prev, t_samp_new-t_one_period, clk=m.clk, rst=m.rst, ce=m.sample_ctl)
# Save whether the previous sample time exceeded one period
prev_exceeded = m.add_digital_state('prev_exceeded')
m.set_next_cycle(prev_exceeded, exceeds_period, clk=m.clk, rst=m.rst, ce=m.sample_ctl)
# Compute the sample time to use for this period
t_samp_idx = m.bind_name('t_samp_idx', concatenate([exceeds_period, prev_exceeded]))
t_samp = m.bind_name(
't_samp',
array(
[
t_samp_new, # 0b00: exceeds_period=0, prev_exceeded=0
t_samp_new, # 0b01: exceeds_period=0, prev_exceeded=1
0.0, # 0b10: exceeds_period=1, prev_exceeded=0
t_samp_prev # 0b11: exceeds_period=1, prev_exceeded=1
], t_samp_idx
)
)
# Evaluate the step response function. Note that the number of evaluation times is the
# number of chunks plus one.
f_eval = []
for k in range(system_values['chunk_width']+1):
# compute change time as an integer multiple of the TX period
chg_idx = m.bind_name(
f'chg_idx_{k}', (
system_values['slices_per_bank']*system_values['num_banks']
- (m.chunk_idx+1)*system_values['chunk_width']
+ k
)
)
# scale by TX period
t_chg = m.bind_name(f't_chg_{k}', chg_idx/system_values['freq_tx'])
# compute the kth evaluation time
t_eval = m.bind_name(f't_eval_{k}', t_samp - t_chg)
# evaluate the function (the last three inputs are used for updating the function contents)
f_eval.append(m.set_from_sync_func(f'f_eval_{k}', chan_func, t_eval, clk=m.clk, rst=m.rst,
wdata=wdata, waddr=waddr, we=we))
# Compute the pulse responses for each bit
pulse_resp = []
for k in range(system_values['chunk_width']):
pulse_resp.append(
m.bind_name(
f'pulse_resp_{k}',
weights[k]*(f_eval[k] - f_eval[k+1])
)
)
# sum up all of the pulse responses
pulse_resp_sum = m.bind_name('pulse_resp_sum', sum_op(pulse_resp))
# update the overall sample value
sample_value_pre = m.add_analog_state('analog_sample_pre', range_=5*system_values['vref_rx'])
m.set_next_cycle(
sample_value_pre,
if_(m.incr_sum, sample_value_pre + pulse_resp_sum, pulse_resp_sum),
clk=m.clk,
rst=m.rst
)
# add noise to the sample value
if system_values['use_noise']:
sample_noise = m.set_gaussian_noise(
'sample_noise',
std=m.noise_rms,
clk=m.clk,
rst=m.rst,
ce=m.write_output,
lfsr_init=m.noise_seed
)
sample_value = m.bind_name('sample_value', sample_value_pre + sample_noise)
else:
sample_value = sample_value_pre
# there is a special case in which the output should not be updated:
# when the previous cycle did not exceed the period, but this one did
# in that case the sample value should be held constant
should_write_output = m.bind_name('should_write_output',
(prev_exceeded | (~exceeds_period)) & m.write_output)
# determine out_sgn (note that the definition is opposite of the typical
# meaning; "0" means negative)
out_sgn = if_(sample_value < 0, 0, 1)
m.set_next_cycle(m.out_sgn, out_sgn, clk=m.clk, rst=m.rst, ce=should_write_output)
# determine out_mag
vref_rx, n_adc = system_values['vref_rx'], system_values['n_adc']
abs_val = m.bind_name('abs_val', if_(sample_value < 0, -1.0*sample_value, sample_value))
code_real_unclamped = m.bind_name('code_real_unclamped', (abs_val / vref_rx) * ((2**(n_adc-1))-1))
code_real = m.bind_name('code_real', clamp_op(code_real_unclamped, 0, (2**(n_adc-1))-1))
# TODO: clean this up -- since real ranges are not intervals, we need to tell MSDSL
# that the range of the signed integer is smaller
code_sint = to_sint(code_real, width=n_adc+1)
code_sint.format_ = SIntFormat(width=n_adc+1, min_val=0, max_val=(2**(n_adc-1))-1)
code_sint = m.bind_name('code_sint', code_sint)
code_uint = m.bind_name('code_uint', to_uint(code_sint, width=n_adc))
m.set_next_cycle(m.out_mag, code_uint, clk=m.clk, rst=m.rst, ce=should_write_output)
# generate the model
m.compile_to_file(VerilogGenerator())
self.generated_files = [filename]
@staticmethod
def check_func_error(placeholder, func):
# calculate coeffients
coeffs = placeholder.get_coeffs(func)
# determine test points
samp = np.random.uniform(placeholder.domain[0],
placeholder.domain[1],
1000)
# evaluate the function at those test points using
# the calculated coefficients
approx = placeholder.eval_on(samp, coeffs)
# determine the values the function should have at
# the test points
exact = func(samp)
# calculate the error
err = np.max(np.abs(exact-approx))
# display the error
print(f'Worst-case error: {err}')
@staticmethod
def required_values():
return ['dt', 'func_order', 'func_numel', 'chunk_width', 'num_chunks',
'slices_per_bank', 'num_banks', 'pi_ctl_width', 'vref_rx',
'vref_tx', 'n_adc', 'freq_tx', 'freq_rx', 'func_widths',
'func_exps', 'func_domain', 'use_jitter', 'use_noise']
|
from itertools import cycle
import dwave_networkx as dwnx
import networkx as nx
from matplotlib import pyplot as plt
from matplotlib.cm import get_cmap
from placeandroute.tilebased.heuristic import Constraint
def create(w):
ret = dwnx.chimera_graph(w, w)
return ret, dwnx.chimera_layout(ret)
def cnf_to_constraints(clauses, num_vars):
"""from each 3-cnf clause, generate a Constraint with a fresh ancilla variable"""
ancilla = num_vars + 1
for clause in clauses:
# first two vars third var + ancilla
clause = list(abs(l) for l in clause)
assert len(clause) == 3, clause
c = Constraint()
for literal in clause:
c.add_possible_placement([[ancilla, literal],[l for l in clause if l != literal]])
yield c
ancilla += 1
def show_result(cg, xdict, layout=dwnx.chimera_layout):
"""Display a Chimera embedding using matplotlib"""
layout = layout(cg)
color = cycle(get_cmap("tab20").colors)
nx.draw(cg, layout, node_color="gray", edge_color="gray")
for k, vs in xdict.items():
col = next(color)
subcg = cg.subgraph(vs)
nx.draw(subcg, layout, node_color=[col] * subcg.number_of_nodes(), edge_color=[col] * subcg.number_of_edges())
plt.savefig("result.png")
|
# coding=utf-8
# Copyright 2015 Brave Labs sp. z o.o.
# All rights reserved.
#
# This source code and all resulting intermediate files are CONFIDENTIAL and
# PROPRIETY TRADE SECRETS of Brave Labs sp. z o.o.
# Use is subject to license terms. See NOTICE file of this project for details.
# from celery import shared_task
import logging
from django.contrib import messages
from django.views.debug import CLEANSED_SUBSTITUTE, SafeExceptionReporterFilter
from django.views.generic import View
log = logging.getLogger(__name__)
def show_debug_toolbar(request):
from debug_toolbar.middleware import show_toolbar
return show_toolbar(request) or "debug" in request.GET or request.user.is_superuser
class ErrView(View):
"""Raise an error on purpose to test any health monitoring features"""
def get(self, request):
messages.warning(request, u"Error was tested ążźśęćńół")
from website.misc.signals import signal_raise_error
results = signal_raise_error.send_robust(self)
for receiver, response in results:
if isinstance(response, Exception):
log.warning(response, exc_info=(type(response), response, response.__traceback__))
raise Exception(u"Błąd ążśźęćńół")
class SaferExceptionReporterFilter(SafeExceptionReporterFilter):
"""Works even if tb_frame has no f_back attribute"""
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = getattr(tb_frame, 'f_back', None)
sensitive_variables = None
while current_frame is not None:
if current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals:
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals:
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
def debug_who_disabled_a_logger(): # pragma: no cover
"""
If you need to trace what code might set handler.disabled to True (it is 0, so false, by default),
you can replace the attribute it with a property
"""
# http://stackoverflow.com/a/28694704/260480
import logging
import sys
def get_disabled(self):
return self._disabled
def set_disabled(self, disabled):
# noinspection PyProtectedMember
frame = sys._getframe(1)
if disabled:
print('{}:{} disabled the {} logger'.format(
frame.f_code.co_filename, frame.f_lineno, self.name))
self._disabled = disabled
logging.Logger.disabled = property(get_disabled, set_disabled)
|
from model.board import Board, Location, NumberAssignmentError
from model.evaluator import Evaluator
from itertools import permutations
class Result:
complete = False
solution = Board()
success = False
class Solver:
def __init__(self):
self._debut_board = Board()
self._result = Result()
def _attempt(self):
missing_numbers = self._debut_board.get_missing_numbers()
empty_cells = self._debut_board.get_empty_cell_locations()
number_combinations = permutations(missing_numbers, len(missing_numbers))
for combo in number_combinations:
cloned_board = self._debut_board.clone()
cell_index = 0
for empty_cell in empty_cells:
try:
cloned_board.set_cell_value(empty_cell.get_x(), empty_cell.get_y(), combo[cell_index])
cell_index = cell_index + 1
except NumberAssignmentError:
break
if cloned_board.has_empty_cell():
continue
evaluator = Evaluator()
evaluator.evaluate(cloned_board)
if evaluator.is_complete() and evaluator.is_valid():
self._result.complete = True
self._result.success = True
self._result.solution = cloned_board
return
def solve(self, board: Board) -> Result:
self._result = Result()
self._debut_board = board
self._attempt()
return self._result
|
from matplotlib import pyplot as plt
import tensorflow as tf
import numpy as np
#hyper params
lr = 0.1
epochs = 100
x_train = np.linspace(-1, 1,101)
y_train = 2 * x_train + np.random.randn(*x_train.shape) * 0.33
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
def model(X,w):
return tf.multiply(X,w)
w = tf.Variable(0.0,name='weights')
y_model = model(X, w)
cost = tf.square(Y-y_model)
tf_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(epochs):
for (x,y) in zip(x_train,y_train):
sess.run(tf_op,feed_dict = {X:x,Y:y})
w_val = sess.run(w)
sess.close()
plt.scatter(x_train, y_train)
y_learned = x_train*w_val
plt.plot(x_train,y_learned,'r')
plt.show()
|
"""
Trellis Histogram
-----------------
This example shows how to make a basic trellis histogram.
https://vega.github.io/vega-lite/examples/trellis_bar_histogram.html
"""
# category: histograms
import altair as alt
from vega_datasets import data
source = data.cars()
(
alt.Chart(source)
.mark_bar()
.encode(alt.X("Horsepower:Q", bin=True), y="count()", row="Origin")
)
|
from ..converter import Converter
from ..oval_node import restore_dict_to_tree
from .client_html_output import ClientHtmlOutput
from .client_json_input import ClientJsonInput
START_OF_FILE_NAME = 'graph-of-'
class JsonToHtml(ClientHtmlOutput, ClientJsonInput):
def __init__(self, args):
super().__init__(args)
self.hide_passing_tests = self.arg.hide_passing_tests
self.oval_tree = None
def get_only_fail_rule(self, rules):
"""
Function processes array of matched IDs of rules in selected file.
Function retunes array of failed matched IDs of rules in selected file.
"""
raise NotImplementedError
def _get_rows_of_unselected_rules(self):
"""
Function retunes array of rows where is not selected IDs of rules in selected file.
"""
raise NotImplementedError
def _get_message(self):
return {
'description': 'Client for visualization of JSON created by command arf-to-json',
'source_filename': 'JSON file',
}
def load_json_to_oval_tree(self, rule):
dict_of_tree = self.json_data_file[rule]
try:
return restore_dict_to_tree(dict_of_tree)
except Exception:
raise ValueError('Data is not valid for OVAL tree.')
def create_dict_of_oval_node(self, oval_node):
converter = Converter(oval_node)
return converter.to_JsTree_dict(self.hide_passing_tests)
def create_dict_of_rule(self, rule):
self.oval_tree = self.load_json_to_oval_tree(rule)
return self.create_dict_of_oval_node(self.oval_tree)
def _put_to_dict_oval_trees(self, dict_oval_trees, rule):
dict_oval_trees[rule.replace(
START_OF_FILE_NAME, '')] = self.create_dict_of_rule(rule)
def _get_src_for_one_graph(self, rule):
return self.get_save_src(rule.replace(START_OF_FILE_NAME, ''))
def prepare_parser(self, parser):
super().prepare_parser(parser)
self.prepare_args_when_output_is_html(parser)
|
"""Ensemble module."""
import numpy as np
from tqdm import tqdm
from .parser import Parser
from ..utils.metrics import timing
from .conformation import Conformation
from .transform import Transform
class Ensemble:
"""Default class for Ensemble."""
def __init__(
self,
base_conformation: Conformation,
transform: Transform,
parser: Parser,
debug_mode: bool
):
"""Ensemble initialization."""
self.base_conformation = base_conformation
self.transform = transform
self.parser = parser
self.object_length = len(self.base_conformation.coords_in_center_of_mass[0])
self.conformations = []
self.debug_mode = debug_mode
def count_rotation_matrix(self, axis, theta):
"""
The function to calculate a rotation matrix using quaternion multiplication.
Parameters:
axis (A 3-by-1 numpy array):
The axis, around which the rotation is to be performed.
theta (Scalar):
Angle (in degrees), by which the desired 3D object
frame would be rotated around given axis.
Returns:
matrix (A 3-by-3 numpy array): Rotation matrix.
"""
c = np.cos(theta)
s = np.sin(theta)
matrix = np.array(
[
[
axis[0] * axis[0] * (1.0 - c) + c,
axis[0] * axis[1] * (1.0 - c) - axis[2] * s,
axis[0] * axis[2] * (1.0 - c) + axis[1] * s,
],
[
axis[1] * axis[0] * (1.0 - c) + axis[2] * s,
axis[1] * axis[1] * (1.0 - c) + c,
axis[1] * axis[2] * (1.0 - c) - axis[0] * s,
],
[
axis[2] * axis[0] * (1.0 - c) - axis[1] * s,
axis[2] * axis[1] * (1.0 - c) + axis[0] * s,
axis[2] * axis[2] * (1.0 - c) + c,
],
]
)
return matrix
@timing
def rmsd_angle(self, chosen_axis):
"""
The function to calculate the rotation angle for a given combination of partition and RMSD parameters.
Parameters:
chosen_axis (A 3-by-1 numpy array): The axis, which serves as a basis for performing rotation calculations.
Returns:
trans_angle (Float): The rotation angle, corresponding to the given partition and RMSD.
partition (Float): Redefined partition parameter,
changes in case there is no suitable rotation angle for a given set of parameters.
"""
partition = self.transform.partition
ntn = np.dot(
chosen_axis, np.dot(self.base_conformation.inertia_tensor, chosen_axis.T)
)
in_arcsin = np.sqrt(
np.power(self.transform.rmsd, 2)
* partition
* self.base_conformation.total_weight
/ (4 * ntn)
)
if in_arcsin > 1:
trans_angle = np.pi
partition = (4 / self.base_conformation.total_weight * ntn) / np.power(
self.transform.rmsd, 2
)
print(
"Warning: arcsin exceeds 1 for the given conformation, replacing with pi instead."
)
else:
trans_angle = 2 * np.arcsin(in_arcsin)
return trans_angle, partition
@timing
def generate_conformation(self, chosen_axis):
"""
The function to generate a conformation, which was transformed using the given partition and RMSD parameters.
Parameters:
chosen_axis (A 3-by-1 numpy array):
The axis, which serves as a basis for performing rotation calculations.
Returns:
conformation (An N-by-3 numpy array):
3D object point array, which was transformed using the given partition and RMSD parameters.
"""
angle, partition = self.rmsd_angle(chosen_axis)
rotation_matrix = self.count_rotation_matrix(chosen_axis, angle)
conformation = np.sum(
[
np.matmul(
self.base_conformation.coords_in_center_of_mass, rotation_matrix
),
self.transform.rmsd * np.sqrt(1 - partition) * chosen_axis,
],
axis=0,
)
if self.debug_mode:
self.check_rmsd(conformation)
conformation = conformation + self.base_conformation.center_of_mass
return conformation
@timing
def check_rmsd(self, conformation):
"""
The function which performs trivial RMSD calculation for comparison with Eurecon.
Parameters:
conformation (An N-by-3 numpy array): 3D object point array,
which was transformed using the given partition and RMSD parameters.
Returns:
None.
"""
trivial_rmsd = self.calc_trivial_rmsd(conformation)
if (
not self.transform.rmsd - 0.00001
< trivial_rmsd
< self.transform.rmsd + 0.00001
):
raise ValueError('Wrong RMSD')
def calc_trivial_rmsd(self, a):
"""
The function which calculates trivial RMSD.
Parameters:
a (An N-by-3 numpy array): 3D object point array,
which was transformed using the given partition and RMSD parameters.
Returns:
RMSD value.
"""
b = self.base_conformation.coords_in_center_of_mass
return np.sqrt(
(1 / self.base_conformation.object_length) * np.sum((a - b) ** 2)
)
def generate_ensemble(self, stdout_mode: bool = True):
"""
The function which generates an ensemble of conformations.
Parameters:
stdout_mode (Boolean): Parameter which allows/prohibits writing conformation ensembles into files.
Returns:
None.
"""
POINT_CLOUD = ("pcd", "pts", "xyz")
TRIANGLE_MESH = ("stl", "obj", "off", "gltf", "ply")
object_type = str(self.base_conformation.data_file_name)[:-3]
if object_type in POINT_CLOUD:
print_type = "Point Cloud"
else:
print_type = "Mesh"
print('Augmenting ' + str(self.base_conformation.data_file_name) + ' object' + ' | Type: ' + str(print_type) + ' | RMSD Value: ' + str(self.transform.rmsd))
name_of_bar = 'Processing conformations' if stdout_mode else 'Generating conformations'
bar = tqdm(total=len(self.transform.axes), desc=name_of_bar)
for counter, axis in enumerate(self.transform.axes):
new_conformation = self.generate_conformation(axis)
if stdout_mode:
self.parser.write_conformation(
self.base_conformation.data_object,
new_conformation,
self.base_conformation.data_file_name,
counter,
self.base_conformation.object_length,
)
else:
self.conformations.append(new_conformation)
bar.update()
bar.close()
def write(self):
"""
The function which performs the file generation.
Parameters:
None.
Returns:
None.
"""
self.parser.write_all_conformations(self.conformations, self.base_conformation)
|
'''
The MIT License (MIT)
Copyright (c) 2014 Stefan Lohmaier
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import json
import unittest
from slojsonrpc import SLOJSONRPC
from .api import sessionfaker
try:
import cherrypy
class fake_request:
def __init__(self, method, body):
self.method = method
class fake_body:
def __init__(self, body):
self.body = body
def read(self):
return self.body
self.body = fake_body(body)
class JSONRPCTestCherryPy(unittest.TestCase):
def setUp(self):
self.jsonrpc = SLOJSONRPC(sessionfaker())
def test_request(self):
cherrypy.request = fake_request(
'PUT', '{"jsonrpc": "2.0", "method": "ping", "id": 1}')
self.assertEqual(
json.loads('{"jsonrpc": "2.0", "id": 1, "result": "pong"}'),
json.loads(self.jsonrpc()))
def test_invalid_method(self):
cherrypy.request = fake_request(
'PUT', '{"jsonrpc": "2.0", "method": "ping", "id": 1}')
self.assertEqual(
json.loads('{"jsonrpc": "2.0", "id": 1, "result": "pong"}'),
json.loads(self.jsonrpc()))
cherrypy.request = fake_request(
'POST', '{"jsonrpc": "2.0", "method": "ping", "id": 1}')
self.assertEqual(
json.loads('{"jsonrpc": "2.0", "id": 1, "result": "pong"}'),
json.loads(self.jsonrpc()))
cherrypy.request = fake_request(
'PUTT', '{"jsonrpc": "2.0", "method": "ping", "id": 1}')
self.assertEqual('Method "PUTT" not allowed.', self.jsonrpc())
except:
pass
|
from frowns import Smiles
from frowns import Smarts
mol = Smiles.smilin("CCN")
pattern = Smarts.compile("CCN")
# simple match
match = pattern.match(mol)
assert match
index = 1
for path in match:
print "match", index
print "\tatoms", path.atoms
print "\tbond", path.bonds
index = index + 1
print "*"*33
# more complicated match
pattern = Smarts.compile("C*")
match = pattern.match(mol)
assert match
index = 1
for path in match:
print "match", index
print "\tatoms", path.atoms
print "\tbond", path.bonds
index = index + 1
print "*"*33
pattern = Smarts.compile("[!N]-[!C]")
match = pattern.match(mol)
assert match
index = 1
for path in match:
print "match", index
print "\tatoms", path.atoms
print "\tbond", path.bonds
index = index + 1
|
#
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import logging.config
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from ardana_configurationprocessor.cp.model.VariablePlugin \
import VariablePlugin
from ardana_configurationprocessor.cp.model.CPLogging \
import CPLogging as KenLog
LOG = logging.getLogger(__name__)
class RandomSshKeyVariable(VariablePlugin):
def __init__(self, instructions, models, controllers):
super(RandomSshKeyVariable, self).__init__(
2.0, instructions, models, controllers,
'random-sshkey-2.0')
LOG.info('%s()' % KenLog.fcn())
def calculate(self, payload=None):
LOG.info('%s()' % KenLog.fcn())
if not payload:
payload = dict()
if 'algorithm' not in payload:
payload['algorithm'] = 'RSA'
elif payload['algorithm'] not in ('RSA', 'DSA',):
self.add_error('algorithm must be one of RSA or DSA')
return None
if 'length' not in payload:
payload['length'] = 2048
if 'comment' not in payload:
payload['comment'] = None
if 'passphrase' not in payload:
payload['passphrase'] = None
return self._calculate(payload)
def is_immutable(self):
return True
def _calculate(self, payload):
LOG.info('%s()' % KenLog.fcn())
algorithm = payload['algorithm']
length = payload['length']
passphrase = payload['passphrase']
comment = payload['comment']
if algorithm == 'DSA':
new_key = dsa.generate_private_key(
key_size=length, backend=default_backend())
else:
new_key = rsa.generate_private_key(
public_exponent=3, key_size=length, backend=default_backend())
key_encryption = serialization.NoEncryption()
if passphrase:
key_encryption = serialization.BestAvailableEncryption(
passphrase)
private_key = new_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=key_encryption
)
public_key = new_key.public_key().public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH
)
value = {
'private': private_key,
'public': public_key,
}
if comment is not None:
value['public'] += " " + comment
return value
@property
def instructions(self):
return self._instructions
@property
def models(self):
return self._models
@property
def controllers(self):
return self._controllers
|
import json
from qc_bot.qc_bot import bot
from qc_bot import bot_commands, bot_events
# Modules bot_commands, bot_events are imported
# just to run the code in them
if __name__ == "__main__":
config = None
with open("config.json", "r") as config_file:
config = json.load(config_file)
discord_api_key = config["API_KEY"]
bot.run(discord_api_key)
|
#!/bin/python
from os import path
from shutil import copyfile, copy
from io import open
import fnmatch
import os
resxfiles = "oem_files.conf"
corpconf = "oem.properties"
rcconf = "oem_resources.conf"
src = path.abspath("resources")
dst = path.abspath("..\..")
resx=[]
def getResx(dir, resx):
for root, dirs, files in os.walk(dir):
for f in fnmatch.filter(files, '*.resx'):
resx.append(os.path.join(root, f))
getResx('..\..\XenAdmin', resx)
getResx('..\..\XenModel', resx)
# get oem name
old = []
new = []
cf = open(corpconf, encoding='utf-8')
for kv in cf:
if kv.startswith('#'):
continue
if kv =="":
continue
(k, v) = kv.split('=')
old.append(k)
new.append(v.rstrip())
cf.close()
print("---------------------------------------------------------")
for o, n in zip(old, new):
print(o + "=====>" + n)
print("---------------------------------------------------------")
for resx_file in resx:
print(resx_file)
try:
lines = []
with open(resx_file, encoding='utf-8') as infile:
for line in infile:
for o,n in zip(old, new):
line = line.replace(o, n)
lines.append(line)
with open(resx_file, 'w', encoding='utf-8') as outfile:
for line in lines:
outfile.write(line)
except:
lines = []
with open(resx_file) as infile:
for line in infile:
for o,n in zip(old, new):
line = line.replace(o, n)
lines.append(line)
with open(resx_file, 'w') as outfile:
for line in lines:
outfile.write(line)
# replace the resx files
ff = open(resxfiles)
encode = 'ascii'
for sf in ff:
f = path.join(dst, path.normpath(sf.rstrip()))
print(f)
try:
lines = []
with open(f, encoding='utf-8') as infile:
for line in infile:
for o,n in zip(old, new):
line = line.replace(o, n)
lines.append(line)
with open(f, 'w', encoding='utf-8') as outfile:
for line in lines:
outfile.write(line)
except:
lines = []
with open(f) as infile:
for line in infile:
for o,n in zip(old, new):
line = line.replace(o, n)
lines.append(line)
with open(f, 'w') as outfile:
for line in lines:
outfile.write(line)
ff.close()
# replace the images
rf = open(rcconf)
for pic in rf:
(k, v) = pic.split('=')
copyfile(path.join(src, path.normpath(k)), path.join(dst, path.normpath(v.rstrip())))
|
"""
Get records from the table
returns dict if `dictionary is True`
==> https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlconnection-cursor.html
"""
import json
from mysql.connector import Error as mysqlError
from typing import List
from helperFunc import getRecordsCount
# get all records
def readTableAll(db_connection,
table_name: str,
close_connection_afterward: bool = True) -> List[tuple]:
"""
Public function. Query full table data.
Args
---
db_connection: `(class) MySQLConnection`, Connection to a MySQL Server
table_name: `str`, the table you want to insert data in
close_connection_afterward: `bool`, default `True`. Choose to close `db_cursor` and `mysql connection` after operation.
Queried result from MySQL
---
`List[tuple]`: e.g. `[(id, column1, column2, ...), (...), ...]`
Return
---
`Iterable[dict]`, a `list of dict` in json format.
e.g.
[
{'Title': 'Breaking Bad', 'Year': 2008},
{'Title': 'Game of Thrones', 'Year': 2011},
...
]
Remark
------
`json.dumps(the_return_dict_list)` make return dict become JSON string.
"""
print(f'mysql> Reading records from `{table_name}` table in `{db_connection.database}` database... ', end='')
# creating a db_cursor to perform a sql operation
# returns dict list if `dictionary is True` ==> https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlconnection-cursor.html
db_cursor = db_connection.cursor(dictionary=True)
# sql query
query = f'''SELECT * FROM {table_name};'''
record = None
try:
count = getRecordsCount(cursor=db_cursor, table_name=table_name)
if count == 0:
print(f'\n\t==> Fail.')
print(f'\tmysql> No data present in `{table_name}` table in `{db_connection.database}` database.')
else:
# execute the command
db_cursor.execute(query)
db_cursor
record = db_cursor.fetchall()
print(f'==> Done!')
except(Exception, mysqlError) as error:
print(f'\n\t==> Fail.')
print(f'\t> Error = `{error}`')
if close_connection_afterward:
if db_connection.is_connected():
db_cursor.close()
db_connection.close()
print('mysql>>> MySQL connection is closed\n')
return record #json.dumps(record, indent=4, sort_keys=True, default=str)
# driver code
# if __name__ == '__main__':
# # connect to database and get all data
# readTableAll(connect())
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 10:29:38 2017
@author: neh69
"""
import os
import sys
import numpy as np
import pandas as pd
import lmfit as lm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from PyQt5 import QtCore, QtWidgets
import visionplot_widgets
import mriplotwidget
from ImageData import T2imageData
def openStudyDir():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getExistingDirectory(None, "Study Directory", "")
print("openStudyDir\n",returned_data, type(returned_data))
# tree_window.setRootIndex(tree_window.model.index(returned_data))
def openNiftiAnalyzeFile():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "MRI data nifti/analyze", procDataDirPath, "nii files (*.nii);;analyze files (*.img);;All files (*)")
print(returned_data)
def getH5file():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "select results file", procDataDirPath, "CSV files (*.csv);;All files (*)")
pathandfilename = returned_data[0]
#self.hd5_store = pd.HDFStore(pathandfilename)
if len(pathandfilename) > 0:
### attempt to extract details from data
print(pathandfilename)
imageData.readin_alldata_from_results_filename( os.path.abspath(pathandfilename))
if imageData.read_T2_img_hdr_files():
print("just before read_T2_data()")
if imageData.read_T2_data():
imageData.read_Dixon_data()
print("just after read_T2_data()")
mainWindow.setWindowTitle(imageData.T2resultsFilenameAndPath)
#### Update image displayed in window
imageData.overlayRoisOnImage(0, imageData.fittingParam)
# mri_window.update_plot(imageData.img1)
mri_window.update_plot(imageData.mriSliceIMG, imageData.maskedROIs)
print("type(imageData.ImageDataT2)",type(imageData.ImageDataT2))
hist_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
bar_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
#### set min max on sliders
mri_window.slicesSlider.setMinimum(0)
mri_window.slicesSlider.setMaximum(imageData.numSlicesT2-1)
mri_window.slicesSlider.setValue(0)
mri_window.echoesSlider.setMinimum(0)
mri_window.echoesSlider.setMaximum(imageData.numEchoesT2-1)
mri_window.slicesSlider.setValue(0)
else:
print(imageData.t2_image_hdr_pathfilename, " not found")
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
if __name__ == "__main__":
lmparams = {}
epgt2fitparams = lm.Parameters()
azzt2fitparams = lm.Parameters()
epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True )
epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True )
epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True )
epgt2fitparams.add('T1fat', value = 365.0, vary=False)
epgt2fitparams.add('T1muscle', value = 1400, vary=False)
epgt2fitparams.add('echo', value = 10.0, vary=False)
epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True )
azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None),
('Amuscle', 40.0, True, 0, 250, None),
('T2muscle', 40.0, True, 0, 100, None),
('c_l', 0.55, False, 0, 2000, None),
('c_s', 0.45, False, 0, 2000, None),
('t2_fl', 250.0, False, 0, 2000, None),
('t2_fs', 43.0, False, 0, 2000, None),
('echo', 10.0, False, 0, 2000, None))
lmparams['epgt2fitparams'] = epgt2fitparams
lmparams['azzt2fitparams'] = azzt2fitparams
params=azzt2fitparams
matplotlib.use('Qt5Agg')
plt.style.context('seaborn-colorblind')
sns.set(font_scale = 0.6)
# sns.set_palette("pastel")
procDataDirPath = r"/home/eric/Documents/projects/programming/2019/mri_progs/T2EPGviewer/studyData/testStudy/HC-001/sess-1/upperleg/T2/results/muscle/AzzEPG"
progname = os.path.basename(sys.argv[0])
qApp = QtWidgets.QApplication(sys.argv)
imageData = T2imageData()
print("imageData.fittingParam:",imageData.fittingParam)
mainWindow = QtWidgets.QMainWindow()
mainWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose)
mainWindow.setWindowTitle("application main window")
file_menu = QtWidgets.QMenu('&File', mainWindow)
# file_menu.addAction("&Open study Directory", openStudyDir)
file_menu.addAction('&Choose Study Results File', getH5file, QtCore.Qt.CTRL + QtCore.Qt.Key_H)
# file_menu.addAction('&Open nifti/analyze image File', openNiftiAnalyzeFile )
# file_menu.addAction('&Choose Rois', imageData.getRoiFiles, QtCore.Qt.CTRL + QtCore.Qt.Key_R)
# file_menu.addAction('&Quit', fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
mainWindow.menuBar().addMenu(file_menu)
main_widget = QtWidgets.QWidget(mainWindow)
mainlayout = QtWidgets.QHBoxLayout(main_widget)
# mainWindow.setCentralWidget(main_widget)
# plot_window1 = mri_widget(main_widget)
npts = 256*100
iii = np.random.permutation(np.arange(255*255))[:npts]
ddd = np.random.randn(npts)*100+500
data_df = pd.DataFrame({'iii': iii, 'ddd':ddd})
leftwindow = QtWidgets.QWidget()
rightwindow = QtWidgets.QWidget()
splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
hlayout = QtWidgets.QHBoxLayout(leftwindow)
vlayout = QtWidgets.QVBoxLayout(rightwindow)
mri_window = mriplotwidget.MRIPlotWidget( imageData=imageData)
rbtns_window = visionplot_widgets.radiobuttons_fitWidget(mri_window=mri_window)
t2plot_window = visionplot_widgets.T2PlotWidget( lmparams, showToolbar=False)
bar_window = visionplot_widgets.BarPlotWidget( showToolbar=False, data_df=data_df, image_size=256)
hist_window = visionplot_widgets.HistogramPlotWidget( mri_plot=mri_window, showToolbar=True,data_df=data_df, image_size=256)
mainlayout.addWidget(splitHwidget)
hlayout.addWidget(rbtns_window)
hlayout.addWidget(mri_window)
vlayout.addWidget(t2plot_window)
vlayout.addWidget(bar_window)
vlayout.addWidget(hist_window)
splitHwidget.addWidget(leftwindow)
splitHwidget.addWidget(rightwindow )
mri_window.register_PlotWidgets(t2plot_window, bar_window, hist_window, rbtns_window)
main_widget.setFocus()
mainWindow.setCentralWidget(main_widget)
mainWindow.show()
sys.exit(qApp.exec_())
|
import random
import time
import cPickle
import numpy as np
import subprocess as sub
class Optimizer(object):
def __init__(self, pop, selection_func, mutation_func, evaluation_func, num_rand_inds=1):
self.pop = pop
self.select = selection_func
self.mutate = mutation_func
self.evaluate = evaluation_func
self.num_rand_inds = num_rand_inds
self.continued_from_checkpoint = False
self.start_time = None
self.max_fitness = 1e10
self.autosuspended = False
def elapsed_time(self, units="s"):
if self.start_time is None:
self.start_time = time.time()
s = time.time() - self.start_time
if units == "s":
return s
elif units == "m":
return s / 60.0
elif units == "h":
return s / 3600.0
def save_checkpoint(self, directory, gen):
sub.call("mkdir {0}/pickledPops{1}".format(directory, self.pop.seed), shell=True)
random_state = random.getstate()
numpy_random_state = np.random.get_state()
data = [self, random_state, numpy_random_state]
with open('{0}/pickledPops{1}/Gen_{2}.pickle'.format(directory, self.pop.seed, gen), 'wb') as handle:
cPickle.dump(data, handle, protocol=cPickle.HIGHEST_PROTOCOL)
def run(self, max_hours_runtime, max_gens, checkpoint_every, directory="."):
self.start_time = time.time()
if self.autosuspended:
sub.call("rm %s/AUTOSUSPENDED" % directory, shell=True)
if not self.continued_from_checkpoint: # generation zero
self.evaluate(self.pop)
self.select(self.pop) # only produces dominated_by stats, no selection happening (population not replaced)
while self.pop.gen < max_gens:
if self.pop.gen % checkpoint_every == 0: # and self.pop.gen > 0
print "Saving checkpoint at generation {0}".format(self.pop.gen+1)
self.save_checkpoint(directory, self.pop.gen)
if self.elapsed_time(units="h") > max_hours_runtime or self.pop.best_fit_so_far == self.max_fitness:
self.autosuspended = True
print "Autosuspending at generation {0}".format(self.pop.gen+1)
self.save_checkpoint(directory, self.pop.gen)
# keep going but checkpoint every gen at this point
# break
self.pop.gen += 1
# update ages
self.pop.update_ages()
# mutation
print "Mutation starts"
new_children = self.mutate(self.pop)
print "Mutation ends: successfully generated %d new children." % (len(new_children))
# combine children and parents for selection
print "Now creating new population"
self.pop.append(new_children)
for _ in range(self.num_rand_inds):
print "Random individual added to population"
self.pop.add_random_individual()
print "New population size is %d" % len(self.pop)
# evaluate fitness
print "Starting fitness evaluation"
eval_timer = time.time()
self.evaluate(self.pop)
print "Fitness evaluation finished in {} seconds".format(time.time()-eval_timer)
# perform selection by pareto fronts
new_population = self.select(self.pop)
# replace population with selection
self.pop.individuals = new_population
print "Population size reduced to %d" % len(self.pop)
if not self.autosuspended: # print end of run stats
print "Finished {0} generations".format(self.pop.gen + 1)
print "DONE!"
|
import re
import struct
import unicodedata
from .regex import (
EMAIL_REGEX,
EMOJI_REGEX,
HASHTAG_REGEX,
MULTIWHITESPACE_REGEX,
NON_ALNUMWHITESPACE_REGEX,
TELEGRAM_LINK_REGEX,
URL_REGEX,
)
def add_surrogate(text):
return ''.join(
# SMP -> Surrogate Pairs (Telegram offsets are calculated with these).
# See https://en.wikipedia.org/wiki/Plane_(Unicode)#Overview for more.
''.join(chr(y) for y in struct.unpack('<HH', x.encode('utf-16le')))
if (0x10000 <= ord(x) <= 0x10FFFF) else x for x in text
)
def cast_string_to_single_string(s):
processed = MULTIWHITESPACE_REGEX.sub(' ', NON_ALNUMWHITESPACE_REGEX.sub(' ', s))
processed = processed.strip().replace(' ', '-')
return processed
def clean_text(text):
text = remove_markdown(remove_emoji(text))
text = remove_url(text)
text = despace_smart(text)
return text.strip()
def despace(text):
text = re.sub(r'\n+', '\n', text)
text = re.sub(r'[ \t]+', ' ', text)
text = re.sub(r'\n[ \t]+', '\n', text)
return text
def despace_full(text):
return re.sub(r'\s+', ' ', text).strip()
def despace_smart(text):
text = re.sub(r'\n\s*[-•]+\s*', r'\n', text)
text = re.sub(r'\n{2,}', r'\n', text).strip()
text = re.sub(r'\.?(\s+)?\n', r'. ', text)
text = re.sub(r'\s+', ' ', text)
return text
def escape_format(text):
text = text.replace("__", "_").replace("**", "*").replace("`", "'")
text = text.replace('[', r'`[`').replace(']', r'`]`')
return text
def remove_markdown(text):
text = re.sub('[*_~]{2,}', '', text)
text = re.sub('[`]+', '', text)
text = re.sub(r'\[\s*(.*?)(\s*)\]\(.*?\)', r'\g<1>\g<2>', text, flags=re.MULTILINE)
return text
def normalize_string(string):
string = re.sub('[^a-zA-Z0-9_\\-]+', '', string.lower().strip().replace(' ', '-'))
return unicodedata.normalize('NFKD', string).encode('ascii', 'ignore').decode('utf-8')
def remove_emails(text):
return re.sub(EMAIL_REGEX, '', text)
def remove_emoji(text):
text = re.sub(EMOJI_REGEX, '', text)
text = re.sub(u'\ufe0f', '', text)
return text
def remove_hashtags(text):
return re.sub(HASHTAG_REGEX, '', text)
def remove_url(text):
return re.sub(URL_REGEX, '', text)
def replace_telegram_link(text):
return re.sub(TELEGRAM_LINK_REGEX, r'@\1', text)
def split_at(s, pos):
if len(s) < pos:
return s
pos -= 10
pos = max(0, pos)
for p in range(pos, min(pos + 20, len(s) - 1)):
if s[p] in [' ', '\n', '.', ',', ':', ';', '-']:
return s[:p] + '...'
return s[:pos] + '...'
def unwind_hashtags(text):
return re.sub(HASHTAG_REGEX, r'\2', text)
|
from skimage.measure import compare_ssim
import imutils
import cv2
imageA = cv2.imread("images/t.png")
imageB = cv2.imread("images/t1.png")
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
thresh = cv2.threshold(diff, 0, 255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow("ImageA", imageA)
cv2.imshow("ImageB", imageB)
cv2.waitKey(0)
|
class MicroarrayModule:
def __init__(self, owner):
self.owner = owner
def get_parameter_set(self):
return self.owner.get_parameter_set()
def get_m_query_id(self):
return self.owner.get_m_query_id()
def get_t_metadata(self):
return self.owner.get_t_metadata()
def set_t_metadata(self,metadata):
return self.owner.set_t_metadata(metadata)
def get_t_gene_annotation(self):
return self.owner.get_t_gene_annotation()
|
class MessageBus(object):
def __init__(self):
self.handlers = {}
def register_handler(self, event, handler):
if event in self.handlers:
self.handlers[event].append(handler)
else:
self.handlers[event] = [handler]
def fire_event(self, event, **params):
if event not in self.handlers:
return
for handler in self.handlers[event]:
result = handler(**params)
if result:
return result
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Demonstration of how to estimate constituents of QC500 index based on the company fundamentals
### The algorithm creates a default tradable and liquid universe containing 500 US equities
### which are chosen at the first trading day of each month.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="fine universes" />
class ConstituentsQC500GeneratorAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.UniverseSettings.Resolution = Resolution.Daily
self.SetStartDate(2018, 1, 1) # Set Start Date
self.SetEndDate(2019, 1, 1) # Set End Date
self.SetCash(100000) # Set Strategy Cash
# Add QC500 Universe
self.AddUniverse(self.Universe.QC500)
|
"""
Slack Bot Commands Loader
"""
import os
from ebr_trackerbot import module_loader
module_loader.load(os.path.dirname(__file__), "ebr_trackerbot.storage")
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from cowpy import cow
import json
import sys
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
parsed_qs = parse_qs(parsed_path.query)
# import pdb; pdb.set_trace()
if parsed_path.path == '/':
self.send_response(200)
self.end_headers()
self.wfile.write(return_html_string())
return
elif parsed_path.path == '/cowsay':
self.send_response(200)
self.end_headers()
self.wfile.write(b'Helpful instructions about this application')
return
elif parsed_path.path == '/cow':
try:
# import pdb; pdb.set_trace()
msg = parsed_qs['msg'][0]
print(msg)
except (KeyError, json.decoder.JSONDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write(b'You did a bad thing')
return
cheese = cow.Moose(thoughts=True)
message = cheese.milk(msg)
self.send_response(200)
self.end_headers()
self.wfile.write(message.encode('utf8'))
return
else:
self.send_response(404)
self.end_headers()
self.wfile.write(b'Not Found')
def do_POST(self):
parsed_path = urlparse(self.path)
parsed_qs = parse_qs(parsed_path.query)
if parsed_path.path == '/cow':
try:
msg = parsed_qs['msg'][0]
cheese = cow.Moose(thoughts=True)
message = cheese.milk(msg)
post_dict = {}
post_dict['content'] = message
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(post_dict).encode('utf8'))
return
except (KeyError, json.decoder.JSONDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write(b'You did a bad thing')
return
def create_server():
return HTTPServer(('127.0.0.1', 3000), SimpleHTTPRequestHandler)
def run_forever():
server = create_server()
try:
print('Starting server on port 3000')
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
# sys.exit()
def return_html_string():
return b'''<!DOCTYPE html>
<html>
<head>
<title> cowsay </title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="/cowsay">cowsay</a></li>
</ul>
</nav>
<header>
<main>
<!-- project description -->
</main>
</body>
</html>'''
if __name__ == '__main__':
run_forever()
|
#!/usr/bin/python
def stringWrap(toWrap):
'''
@param {String} toWrap
@return ""toWrap""
Ie, if you call
print(self.stringWrap('mesh'));
> "\"mesh\""
'''
if not toWrap:
return '""'
return '"\\"' + toWrap + '\\""'
def numWrap(toWrap):
return str(toWrap);
##Note: this vector is ordered to agree with the order in which
##arguments for the scene.db file read by csvobjectfactory
DEFAULT_VAL_VECTOR = [
['objtype', 'mesh', stringWrap],
['pos_x', 0, numWrap],
['pos_y', 0, numWrap],
['pos_z', 0, numWrap],
['orient_x', 0, numWrap],
['orient_y', 0, numWrap],
['orient_z', 1, numWrap],
['orient_w', 0, numWrap],
['vel_x', 0, numWrap],
['vel_y', 0, numWrap],
['vel_z', 0, numWrap],
["rot_axis_x", 0, numWrap],
["rot_axis_y", 0, numWrap],
["rot_axis_z", 0, numWrap],
["rot_speed", 0, numWrap],
["meshURI", "meerkat:///test/multimtl.dae/original/0/multimtl.dae", stringWrap],
["scale", 1, numWrap],
["script_type", '', stringWrap],
["script_contents", '', stringWrap],
["solid_angle", 12, numWrap],
["physics", '', stringWrap]
];
def getCSVHeaderAsCSVString():
returner = '';
for s in DEFAULT_VAL_VECTOR:
returner += s[0] + ',';
return returner;
class Entity:
'''
An Entity object contains all information about how to
initialize an entity. This eventually gets output to a scene.db file.
'''
def __init__ (self, *args, **kwargs):
'''
@see DEFAULT_VAL_VECTOR for a list of argument names that we can use.
'''
for s in DEFAULT_VAL_VECTOR:
self.checkArgsLoadDefaults(s[0],kwargs,s[1],s[2]);
def checkArgsLoadDefaults(self,toCheckFor,toCheckIn,default,wrapFn):
toExec = 'self.' + toCheckFor + '=';
if (toCheckFor in toCheckIn):
toExec += wrapFn(toCheckIn[toCheckFor]);
else:
toExec += wrapFn(default);
exec (toExec);
'''
@return {String} returns a comma-separated string with all the
constructor info necessary for csvobjectfactory to construct an
entity with specified characteristcs.
'''
def getConstructorRow(self):
returner = '';
#iterate through default val vector to get order to write
#field names
for s in DEFAULT_VAL_VECTOR:
returner += str(self.getSelfData(s[0])) + ',';
return returner;
def getSelfData(self,fieldName):
exec ('returner = self.' + fieldName);
return returner;
#Only to test to ensure that this code is working correctly.
if __name__ == "__main__":
cInfo1= Entity(pos_x=-20,pos_y=5,pos_z=1.25, scale=20, meshURI='meerkat:///kittyvision/hedgehog.dae/optimized/0/hedgehog.dae');
print('\n\n');
print(getCSVHeaderAsCSVString());
print(cInfo1.getConstructorRow());
print('\n\n');
|
from django.apps import AppConfig
class PagesConfig(AppConfig):
name = 'glitter.pages'
label = 'glitter_pages'
verbose_name = 'Pages'
def ready(self):
super().ready()
from . import listeners # noqa
|
"""
Recursive Digit
We define super digit of an integer x using the following rules:
If x has only 1 digit, then its super digit is x.
Otherwise, the super digit of x is equal to the super digit of the sum of the digits of x.
For example, the super digit of x will be calculated as:
super_digit(9875) 9+8+7+5 = 29
super_digit(29) 2 + 9 = 11
super_digit(11) 1 + 1 = 2
super_digit(2) = 2
You are given two numbers n and k. The number p is created by concatenating the string times. Continuing the above example where n = 9875, assume your value k=4. Your initial p = 9875 9875 9875 9875 (spaces added for clarity).
super_digit(p) = super_digit(9875987598759875)
5+7+8+9+5+7+8+9+5+7+8+9+5+7+8+9 = 116
super_digit(p) = super_digit(116)
1+1+6 = 8
super_digit(p) = super_digit(8)
All of the digits of p sum to 116. The digits of 116 sum to 8. 8 is only one digit, so it's the super digit.
Complete the super_digit() method. It must return the calculated super digit as an integer.
superDigit has the following parameter(s):
n: a string representation of an integer.
k: an integer, the times to concatenate n to make p.
Examples
super_digit("148", 3) ➞ 3
super_digit("123", 3) ➞ 9
super_digit("99999999999999999999999999", 104500) ➞ 9
"""
def super_digit(n, k):
a, b =list(n), []
for i in a:
b.append(int(i))
tot = (sum(b))*k
n=str(tot)
while len(list(n)) >0:
a, b =list(n), []
for i in a:
b.append(int(i))
tot = (sum(b))
n=str(tot)
if len(n) == 1:
break
return int(n[0])
#super_digit("123", 3) #, 9)
#super_digit("9875", 4) #, 8)
#super_digit("148", 3) #, 3)
#super_digit("111", 10) #, 3)
#super_digit("543", 100) #, 3)
super_digit("99999999999999999999999999", 104500) #➞ 9
|
"""The tests for the litejet component."""
import logging
from openpeerpower.components import light
from openpeerpower.components.light import ATTR_BRIGHTNESS
from openpeerpower.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from . import async_init_integration
_LOGGER = logging.getLogger(__name__)
ENTITY_LIGHT = "light.mock_load_1"
ENTITY_LIGHT_NUMBER = 1
ENTITY_OTHER_LIGHT = "light.mock_load_2"
ENTITY_OTHER_LIGHT_NUMBER = 2
async def test_on_brightness(opp, mock_litejet):
"""Test turning the light on with brightness."""
await async_init_integration(opp)
assert opp.states.get(ENTITY_LIGHT).state == "off"
assert opp.states.get(ENTITY_OTHER_LIGHT).state == "off"
assert not light.is_on(opp, ENTITY_LIGHT)
await opp.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS: 102},
blocking=True,
)
mock_litejet.activate_load_at.assert_called_with(ENTITY_LIGHT_NUMBER, 39, 0)
async def test_on_off(opp, mock_litejet):
"""Test turning the light on and off."""
await async_init_integration(opp)
assert opp.states.get(ENTITY_LIGHT).state == "off"
assert opp.states.get(ENTITY_OTHER_LIGHT).state == "off"
assert not light.is_on(opp, ENTITY_LIGHT)
await opp.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT},
blocking=True,
)
mock_litejet.activate_load.assert_called_with(ENTITY_LIGHT_NUMBER)
await opp.services.async_call(
light.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_LIGHT},
blocking=True,
)
mock_litejet.deactivate_load.assert_called_with(ENTITY_LIGHT_NUMBER)
async def test_activated_event(opp, mock_litejet):
"""Test handling an event from LiteJet."""
await async_init_integration(opp)
# Light 1
mock_litejet.get_load_level.return_value = 99
mock_litejet.get_load_level.reset_mock()
mock_litejet.load_activated_callbacks[ENTITY_LIGHT_NUMBER]()
await opp.async_block_till_done()
mock_litejet.get_load_level.assert_called_once_with(ENTITY_LIGHT_NUMBER)
assert light.is_on(opp, ENTITY_LIGHT)
assert not light.is_on(opp, ENTITY_OTHER_LIGHT)
assert opp.states.get(ENTITY_LIGHT).state == "on"
assert opp.states.get(ENTITY_OTHER_LIGHT).state == "off"
assert opp.states.get(ENTITY_LIGHT).attributes.get(ATTR_BRIGHTNESS) == 255
# Light 2
mock_litejet.get_load_level.return_value = 40
mock_litejet.get_load_level.reset_mock()
mock_litejet.load_activated_callbacks[ENTITY_OTHER_LIGHT_NUMBER]()
await opp.async_block_till_done()
mock_litejet.get_load_level.assert_called_once_with(ENTITY_OTHER_LIGHT_NUMBER)
assert light.is_on(opp, ENTITY_LIGHT)
assert light.is_on(opp, ENTITY_OTHER_LIGHT)
assert opp.states.get(ENTITY_LIGHT).state == "on"
assert opp.states.get(ENTITY_OTHER_LIGHT).state == "on"
assert (
int(opp.states.get(ENTITY_OTHER_LIGHT).attributes.get(ATTR_BRIGHTNESS)) == 103
)
async def test_deactivated_event(opp, mock_litejet):
"""Test handling an event from LiteJet."""
await async_init_integration(opp)
# Initial state is on.
mock_litejet.get_load_level.return_value = 99
mock_litejet.load_activated_callbacks[ENTITY_OTHER_LIGHT_NUMBER]()
await opp.async_block_till_done()
assert light.is_on(opp, ENTITY_OTHER_LIGHT)
# Event indicates it is off now.
mock_litejet.get_load_level.reset_mock()
mock_litejet.get_load_level.return_value = 0
mock_litejet.load_deactivated_callbacks[ENTITY_OTHER_LIGHT_NUMBER]()
await opp.async_block_till_done()
# (Requesting the level is not strictly needed with a deactivated
# event but the implementation happens to do it. This could be
# changed to an assert_not_called in the future.)
mock_litejet.get_load_level.assert_called_with(ENTITY_OTHER_LIGHT_NUMBER)
assert not light.is_on(opp, ENTITY_OTHER_LIGHT)
assert not light.is_on(opp, ENTITY_LIGHT)
assert opp.states.get(ENTITY_LIGHT).state == "off"
assert opp.states.get(ENTITY_OTHER_LIGHT).state == "off"
|
import random
import threading
import time
try:
import Queue
except:
import queue as Queue
class producer:
def __init__(self):
self.food = ["ham", "soup", "salad"]
self.nexttime = 0
def run(self):
global q
while (time.clock() < 10):
if (self.nexttime < time.clock()):
f = self.food[random.randrange(len(self.food))]
q.put(f)
print("Adding " + f)
self.nexttime += random.random()
class consumer:
def __init__(self):
self.nexttime = 0
def run(self):
global q
while (time.clock() < 10):
if (self.nexttime < time.clock() and not q.empty()):
f = q.get()
print("Removing " + f)
self.nexttime += random.random() * 2
if __name__ == '__main__':
q = Queue.Queue(10)
p = producer()
c = consumer()
pt = threading.Thread(target=p.run, args=())
ct = threading.Thread(target=c.run, args=())
pt.start()
ct.start()
|
"""
JSON and meta-data blocks, primarily used for SEO purposes.
"""
import json
from django import forms
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from coderedcms import schema
from .base_blocks import MultiSelectBlock
class OpenHoursValue(blocks.StructValue):
"""
Renders selected days as a json list.
"""
@property
def days_json(self):
"""
Custom property to return days as json list instead of default python list.
"""
return json.dumps(self['days'])
class OpenHoursBlock(blocks.StructBlock):
"""
Holds day and time combination for business open hours.
"""
days = MultiSelectBlock(
required=True,
verbose_name=_('Days'),
help_text=_('For late night hours past 23:59, define each day in a separate block.'),
widget=forms.CheckboxSelectMultiple,
choices=(
('Monday', _('Monday')),
('Tuesday', _('Tuesday')),
('Wednesday', _('Wednesday')),
('Thursday', _('Thursday')),
('Friday', _('Friday')),
('Saturday', _('Saturday')),
('Sunday', _('Sunday')),
))
start_time = blocks.TimeBlock(verbose_name=_('Opening time'))
end_time = blocks.TimeBlock(verbose_name=_('Closing time'))
class Meta:
template = 'coderedcms/blocks/struct_data_hours.json'
label = _('Open Hours')
value_class = OpenHoursValue
class StructuredDataActionBlock(blocks.StructBlock):
"""
Action object from schema.org
"""
action_type = blocks.ChoiceBlock(
verbose_name=_('Action Type'),
required=True,
choices=schema.SCHEMA_ACTION_CHOICES
)
target = blocks.URLBlock(verbose_name=_('Target URL'))
language = blocks.CharBlock(
verbose_name=_('Language'),
help_text=_('If the action is offered in multiple languages, create separate actions for each language.'),
default='en-US'
)
result_type = blocks.ChoiceBlock(
required=False,
verbose_name=_('Result Type'),
help_text=_('Leave blank for OrderAction'),
choices=schema.SCHEMA_RESULT_CHOICES
)
result_name = blocks.CharBlock(
required=False,
verbose_name=_('Result Name'),
help_text=_('Example: "Reserve a table", "Book an appointment", etc.')
)
extra_json = blocks.RawHTMLBlock(
required=False,
verbose_name=_('Additional action markup'),
classname='monospace',
help_text=_('Additional JSON-LD inserted into the Action dictionary. Must be properties of https://schema.org/Action.')
)
class Meta:
template = 'coderedcms/blocks/struct_data_action.json'
label = _('Action')
|
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
import logging
import pathlib
import rlp
from eth_utils import ValidationError
from eth.abc import ChainAPI
from eth.db.atomic import AtomicDB
from eth.db.backends.level import LevelDB
from eth.exceptions import HeaderNotFound
from eth.vm.forks.frontier.blocks import FrontierBlock
from eth.vm.interrupt import EVMMissingData
from trinity.config import (
Eth1AppConfig,
TrinityConfig,
)
from trinity.extensibility import Application
from trinity.initialization import (
ensure_eth1_dirs,
initialize_database,
)
from .rlp_decode import decode_all
def get_chain(trinity_config: TrinityConfig) -> ChainAPI:
app_config = trinity_config.get_app_config(Eth1AppConfig)
ensure_eth1_dirs(app_config)
base_db = LevelDB(db_path=app_config.database_dir)
chain_config = app_config.get_chain_config()
chain = chain_config.full_chain_class(AtomicDB(base_db))
initialize_database(chain_config, chain.chaindb, base_db)
return chain
class ImportBlockComponent(Application):
"""
Import blocks an RLP encoded file.
"""
logger = logging.getLogger('trinity.components.BlockImport')
@classmethod
def configure_parser(cls,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
import_parser = subparser.add_parser(
'import',
help='Import blocks from a file (RLP encoded)',
)
import_parser.add_argument(
'file_path',
type=pathlib.Path,
help='Specify the file to import from'
)
import_parser.set_defaults(func=cls.run_import)
@classmethod
def run_import(cls, args: Namespace, trinity_config: TrinityConfig) -> None:
with open(args.file_path, 'rb') as import_file:
# This won't handle large files.
# TODO: Make this stream based: https://github.com/ethereum/trinity/issues/1282
file_bytes = import_file.read()
blocks = decode_all(file_bytes, sedes=FrontierBlock)
cls.logger.info("Importing %s blocks", len(blocks))
chain = get_chain(trinity_config)
for block in blocks:
try:
chain.import_block(block)
except (EVMMissingData, ValidationError) as exc:
cls.logger.error(exc)
cls.logger.error("Import failed")
else:
cls.logger.info("Successfully imported %s", block)
class ExportBlockComponent(Application):
"""
Export blocks to an RLP encoded file.
"""
logger = logging.getLogger('trinity.components.BlockExport')
@classmethod
def configure_parser(cls,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
export_parser = subparser.add_parser(
'export',
help='Export blocks to a file (RLP encoded)',
)
export_parser.add_argument(
'file_path',
type=pathlib.Path,
help='Specify the file to export to'
)
export_parser.add_argument(
'block_number',
type=int,
help='Specify the block number to be exported'
)
export_parser.add_argument(
"--append",
action="store_true",
help="Disable peer discovery",
)
export_parser.add_argument(
"--overwrite",
action="store_true",
help="Disable peer discovery",
)
export_parser.set_defaults(func=cls.run_export)
@classmethod
def run_export(cls, args: Namespace, trinity_config: TrinityConfig) -> None:
chain = get_chain(trinity_config)
try:
block = chain.get_canonical_block_by_number(args.block_number)
except HeaderNotFound:
cls.logger.error("Block number %s does not exist in the database", args.block_number)
return
cls.logger.info("Exporting %s", block)
block_bytes = rlp.encode(block)
if args.file_path.exists() and not (args.append or args.overwrite):
cls.logger.error(
"%s does exist. Must use `--append` or `--overwrite` to proceed.", args.file_path
)
return
parent_dir = args.file_path.parent
if not parent_dir.exists():
parent_dir.mkdir(parents=True)
write_mode = 'w+b' if not args.append else 'a+b'
cls.logger.info("Writing %s bytes to %s", len(block_bytes), args.file_path)
with open(args.file_path, write_mode) as export_file:
export_file.write(block_bytes)
cls.logger.info("Successfully exported %s", block)
|
__author__ = 'Andreas M. Wahl'
from autosim.SimulationAutomator import SimulationAutomator
from configurator.ranges.RangeGenerator import RangeGenerator
from configurator.visualization.Plotter import Plotter
from configurator.persistence.PersistenceManager import PersistenceManager
import yaml
from configurator.util.util import sanitize_results, sanitize_dict
class QuickTest:
def __init__(self, simulations_file, config_file):
self.config = yaml.load(open(config_file))
self.generator = RangeGenerator(yaml.load(open(simulations_file)))
self.automator = SimulationAutomator(self.config["SimulationAutomator"])
self.persistence = PersistenceManager(self.config["Persistence"]["database"])
self.plotter = Plotter(self.persistence)
def setup(self):
self.persistence.start_db()
def run_simulations(self, tag):
simulation_scenarios = self.generator.generate_memory()
#print len(simulation_scenarios)
automator_input = []
for scenario in simulation_scenarios:
automator_entry = {}
automator_entry["parameters"] = scenario
automator_entry["strategies"] = None
automator_input.append(automator_entry)
results, fails = self.automator.run(automator_input, tag)
self.persistence.initialise_database_client()
sanitized_results = sanitize_results(results)
#print sanitized_results
self.persistence.persist_to_database(self.config["Persistence"]["database"]["collection_name"], sanitized_results)
return results, fails
def plot2d(self, tag, x_param, y_param, filter, output_filename, title="", x_label="", y_label=""):
self.persistence.initialise_database_client()
self.plotter.plot2d_from_database(tag, x_param, y_param, filter, output_filename, title, x_label, y_label)
def plot3d(self, tag, x_param, y_param, z_param, filter, output_filename, title="", x_label="", y_label="", z_label=""):
self.persistence.initialise_database_client()
self.plotter.plot3d_from_database(tag, x_param, y_param, z_param, filter, output_filename, title, x_label, y_label, z_label)
def teardown(self):
self.persistence.shutdown_db()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 11:35:30 2020
@author: christian
"""
import speaknspell
import matplotlib.pyplot as plt
import numpy as np
from scipy.io.wavfile import read
plt.style.use('paper')
data = read("./Voice Recordings/Short _a_.wav")
y = data[1][:, 0]
x = np.arange(0, len(y), 1)
# plt.figure(0)
# plt.plot(x, y)
test = speaknspell.SnS(np.array([x, y]).T, "Short _a_"[:-1], 15)
test.synth_signal()
test.play_sound()
#filenames = ["Short _a_", "Short _e_", "Short _i_", "Short _o_",
# "Short _u_", "Long _a_", "Long _e_", "Long _i_",
# "Long _o_", "Long _u_", "_Ah_"]
#for filename in filenames:
# data = read("./Voice Recordings/" + filename + ".wav")
# y = data[1][:, 0]
# x = np.arange(0, len(y), 1)
## plt.figure(0)
## plt.plot(x, y)
# test = speaknspell.SnS(np.array([x, y]).T, filename[:-1], 15)
# test.synth_signal()
# test.play_sound()
#fnames = ['Long_a_15', 'Long_e_15', 'Long_i_15',
# 'Long_o_15', 'Long_u_15']
#compname = "AEIOU"
#test.synthspeech(fnames, compname)
|
from django.contrib.auth.models import User
from django.db import models
from Cinema.models import Movie
class Comment(models.Model):
user=models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
movie=models.ForeignKey(Movie,on_delete=models.CASCADE)
text=models.CharField(max_length=200)
date_created=models.DateTimeField(auto_now_add=True)
class Rating(models.Model):
user=models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
movie=models.ForeignKey(Movie,on_delete=models.CASCADE)
score=models.FloatField(default=0.0)
|
tot = barato = contamil = contador = 0
baratonome = ''
while True:
nome = str(input('Informe o nome do produto: '))
preco = float(input('Informe o preço do produto: R$'))
contador += 1
if contador == 1 or preco < barato:
barato = preco
baratonome = nome
tot += preco
if preco > 1000:
contamil += 1
esc = str(input('Deseja continuar: [S/N] ')).strip().upper()[0]
print('=-' * 20)
if esc not in 'SN':
while True:
esc = str(input('OPÇÃO INVÁLIDA! Deseja continuar: [S/N] ')).strip().upper()[0]
if esc in 'SN':
break
elif esc == 'N':
break
print(f'O total gasto na compra foi de: R${tot:.2f}')
print(f'{contamil} produtos custaram mais de R$1000.00.')
print(f'O produto mais barato foi: {baratonome}, com o valor de R${barato:.2f}.')
|
import pandas as pd
import os
from upload_dados import *
import seaborn as sns
import matplotlib.pyplot as plt
os.system('cls')
#3. Existem correlações entre as características de um anúncio e seu faturamento?
# a. Quais? Explique
# filtrando todos os dados somente dos anuncios alugados
data_df = data_df.loc[data_df['booked_on'] != 'blank']
#agrupando todos os dados pela soma do faturamento
billings = data_df[['ad_name','number_of_bedrooms','number_of_bathrooms','star_rating','is_superhost','price_string','number_of_reviews']].groupby('ad_name').sum()
print(billings.sort_values(by='price_string', ascending=False))
# chamar a matriz de correlação para análise
print('----- Correlação dos dados -----')
print(billings.corr())
sns.heatmap(billings.corr(), annot=True, vmin=-1, vmax=1, cmap='coolwarm')
plt.show()
print('--------------------------------')
print('Podemos ver que o faturamento está correlacionado ao número de quartos e número de banheiros, \
e podemos dizer que a nota do anuncio também tem uma certa relevância na análise.')
print('--------------------------------')
|
"""
* Copyright 2009 Mark Renouf
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHDIR
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
from pyjamas.media.Media import Media
from pyjamas import DOM
"""*
* An HTML5 AUDIO element
"""
class Audio(Media):
def __init__(self, src=None, **kwargs):
self.setElement(DOM.createElement("audio"))
if src:
self.setSrc(src)
Media.__init__(self, **kwargs)
|
"""
models/one_d.py
Author: Ankit Gupta
Implementations of the core DenseNet model
This module contains helper functions that define a DenseNet computational graph in Keras.
Note that these functions are not immediately usable for classification, as the outputs
are not softmaxed, and the functions have not been wrapped in keras.models.Model objects.
"""
from tensorflow.keras.layers import Conv1D, BatchNormalization, Activation, MaxPooling1D, GlobalAveragePooling1D
from densenet.blocks.one_d import dense_block, transition_block, squeeze_excite_block
def DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling,
se=False):
def f(x):
x = Conv1D(
initial_filters,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling1D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
if se:
x = squeeze_excite_block(x)
# Add all but the last dense block
for block_size in block_sizes[:-1]:
x = dense_block(
k,
block_size,
conv_kernel_width,
bottleneck_size,
se=se)(x)
x = transition_block(
pool_size=transition_pool_size,
stride=transition_pool_stride,
theta=theta,
se=se)(x)
# Add the last dense block
final_block_size = block_sizes[-1]
x = dense_block(
k,
final_block_size,
conv_kernel_width,
bottleneck_size)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
if use_global_pooling:
x = GlobalAveragePooling1D()(x)
return x
return f
def DenseNet121(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 24, 16]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet169(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 32, 32]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet201(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 48, 32]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
def DenseNet264(
k,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling):
block_sizes = [6, 12, 64, 48]
return DenseNet(
k,
block_sizes,
conv_kernel_width,
bottleneck_size,
transition_pool_size,
transition_pool_stride,
theta,
initial_conv_width,
initial_stride,
initial_filters,
initial_pool_width,
initial_pool_stride,
use_global_pooling)
|
from django.shortcuts import redirect
from django.http import HttpResponse
from django.template import loader
def index(request):
if request.user.is_authenticated:
user = request.user
context = {
'user': user,
}
template = loader.get_template('patienthistory/home.html')
return HttpResponse(template.render(context, request))
else:
return redirect('login')
|
from sparksampling.core.job.base_job import BaseJob
from pyspark.sql import DataFrame
class BasicStatisticsJob(BaseJob):
def __init__(self, *args, **kwargs):
super(BasicStatisticsJob, self).__init__()
def _statistics(self, df: DataFrame, *args, **kwargs) -> dict:
self.logger.info("Generating description...")
return df.describe().toPandas().to_dict('records')
|
# Copyright (c) 2020 the Eclipse BaSyx Authors
#
# This program and the accompanying materials are made available under the terms of the MIT License, available in
# the LICENSE file of this project.
#
# SPDX-License-Identifier: MIT
"""
Module for the creation of an example :class:`~aas.model.submodel.Submodel` template containing all kind of
:class:`SubmodelElements <aas.model.submodel.SubmodelElement>` where the kind is
always `TEMPLATE`.
"""
import logging
from ... import model
from ._helper import AASDataChecker
logger = logging.getLogger(__name__)
def create_example_submodel_template() -> model.Submodel:
"""
Creates an example :class:`~aas.model.submodel.Submodel` template containing all kind of
:class:`~aas.model.submodel.SubmodelElement` objects where the kind is always
`TEMPLATE`
:return: example submodel
"""
submodel_element_property = model.Property(
id_short='ExampleProperty',
value_type=model.datatypes.String,
value=None,
value_id=None, # TODO
category='CONSTANT',
description={'en-us': 'Example Property object',
'de': 'Beispiel Property Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Properties/ExampleProperty',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_multi_language_property = model.MultiLanguageProperty(
id_short='ExampleMultiLanguageProperty',
value=None,
value_id=None, # TODO
category='CONSTANT',
description={'en-us': 'Example MultiLanguageProperty object',
'de': 'Beispiel MulitLanguageProperty Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/MultiLanguageProperties/'
'ExampleMultiLanguageProperty',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_range = model.Range(
id_short='ExampleRange',
value_type=model.datatypes.Int,
min=None,
max=100,
category='PARAMETER',
description={'en-us': 'Example Range object',
'de': 'Beispiel Range Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Ranges/ExampleRange',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_range_2 = model.Range(
id_short='ExampleRange2',
value_type=model.datatypes.Int,
min=0,
max=None,
category='PARAMETER',
description={'en-us': 'Example Range object',
'de': 'Beispiel Range Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Ranges/ExampleRange',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_blob = model.Blob(
id_short='ExampleBlob',
mime_type='application/pdf',
value=None,
category='PARAMETER',
description={'en-us': 'Example Blob object',
'de': 'Beispiel Blob Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Blobs/ExampleBlob',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_file = model.File(
id_short='ExampleFile',
mime_type='application/pdf',
value=None,
category='PARAMETER',
description={'en-us': 'Example File object',
'de': 'Beispiel File Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Files/ExampleFile',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_reference_element = model.ReferenceElement(
id_short='ExampleReferenceElement',
value=None,
category='PARAMETER',
description={'en-us': 'Example Reference Element object',
'de': 'Beispiel Reference Element Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/ReferenceElements/ExampleReferenceElement',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_relationship_element = model.RelationshipElement(
id_short='ExampleRelationshipElement',
first=model.AASReference((model.Key(type_=model.KeyElements.PROPERTY,
local=True,
value='ExampleProperty',
id_type=model.KeyType.IDSHORT),),
model.Property),
second=model.AASReference((model.Key(type_=model.KeyElements.PROPERTY,
local=True,
value='ExampleProperty',
id_type=model.KeyType.IDSHORT),),
model.Property),
category='PARAMETER',
description={'en-us': 'Example RelationshipElement object',
'de': 'Beispiel RelationshipElement Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/RelationshipElements/'
'ExampleRelationshipElement',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_annotated_relationship_element = model.AnnotatedRelationshipElement(
id_short='ExampleAnnotatedRelationshipElement',
first=model.AASReference((model.Key(type_=model.KeyElements.PROPERTY,
local=True,
value='ExampleProperty',
id_type=model.KeyType.IDSHORT),),
model.Property),
second=model.AASReference((model.Key(type_=model.KeyElements.PROPERTY,
local=True,
value='ExampleProperty',
id_type=model.KeyType.IDSHORT),),
model.Property),
annotation=None,
category='PARAMETER',
description={'en-us': 'Example AnnotatedRelationshipElement object',
'de': 'Beispiel AnnotatedRelationshipElement Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/RelationshipElements/'
'ExampleAnnotatedRelationshipElement',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_operation_variable_input = model.OperationVariable(
value=submodel_element_property)
submodel_element_operation_variable_output = model.OperationVariable(
value=submodel_element_property)
submodel_element_operation_variable_in_output = model.OperationVariable(
value=submodel_element_property)
submodel_element_operation = model.Operation(
id_short='ExampleOperation',
input_variable=[submodel_element_operation_variable_input],
output_variable=[submodel_element_operation_variable_output],
in_output_variable=[submodel_element_operation_variable_in_output],
category='PARAMETER',
description={'en-us': 'Example Operation object',
'de': 'Beispiel Operation Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Operations/'
'ExampleOperation',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_capability = model.Capability(
id_short='ExampleCapability',
category='PARAMETER',
description={'en-us': 'Example Capability object',
'de': 'Beispiel Capability Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Capabilities/'
'ExampleCapability',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_basic_event = model.BasicEvent(
id_short='ExampleBasicEvent',
observed=model.AASReference((model.Key(type_=model.KeyElements.PROPERTY,
local=True,
value='ExampleProperty',
id_type=model.KeyType.IDSHORT),),
model.Property),
category='PARAMETER',
description={'en-us': 'Example BasicEvent object',
'de': 'Beispiel BasicEvent Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/Events/'
'ExampleBasicEvent',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_submodel_element_collection_ordered = model.SubmodelElementCollectionOrdered(
id_short='ExampleSubmodelCollectionOrdered',
value=(submodel_element_property,
submodel_element_multi_language_property,
submodel_element_range,
submodel_element_range_2),
category='PARAMETER',
description={'en-us': 'Example SubmodelElementCollectionOrdered object',
'de': 'Beispiel SubmodelElementCollectionOrdered Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/SubmodelElementCollections/'
'ExampleSubmodelElementCollectionOrdered',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_submodel_element_collection_unordered = model.SubmodelElementCollectionUnordered(
id_short='ExampleSubmodelCollectionUnordered',
value=(submodel_element_blob,
submodel_element_file,
submodel_element_reference_element),
category='PARAMETER',
description={'en-us': 'Example SubmodelElementCollectionUnordered object',
'de': 'Beispiel SubmodelElementCollectionUnordered Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/SubmodelElementCollections/'
'ExampleSubmodelElementCollectionUnordered',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel_element_submodel_element_collection_unordered_2 = model.SubmodelElementCollectionUnordered(
id_short='ExampleSubmodelCollectionUnordered2',
value=(),
category='PARAMETER',
description={'en-us': 'Example SubmodelElementCollectionUnordered object',
'de': 'Beispiel SubmodelElementCollectionUnordered Element'},
parent=None,
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/SubmodelElementCollections/'
'ExampleSubmodelElementCollectionUnordered',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
submodel = model.Submodel(
identification=model.Identifier(id_='https://acplt.org/Test_Submodel_Template',
id_type=model.IdentifierType.IRI),
submodel_element=(submodel_element_relationship_element,
submodel_element_annotated_relationship_element,
submodel_element_operation,
submodel_element_capability,
submodel_element_basic_event,
submodel_element_submodel_element_collection_ordered,
submodel_element_submodel_element_collection_unordered,
submodel_element_submodel_element_collection_unordered_2),
id_short='TestSubmodel',
category=None,
description={'en-us': 'An example submodel for the test application',
'de': 'Ein Beispiel-Teilmodell für eine Test-Anwendung'},
parent=None,
administration=model.AdministrativeInformation(version='0.9',
revision='0'),
semantic_id=model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/SubmodelTemplates/'
'ExampleSubmodel',
id_type=model.KeyType.IRI),)),
qualifier=None,
kind=model.ModelingKind.TEMPLATE)
return submodel
##############################################################################
# check functions for checking if an given object is the same as the example #
##############################################################################
def check_example_submodel(checker: AASDataChecker, submodel: model.Submodel) -> None:
expected_submodel = create_example_submodel_template()
checker.check_submodel_equal(submodel, expected_submodel)
def check_full_example(checker: AASDataChecker, obj_store: model.DictObjectStore) -> None:
example_data: model.DictObjectStore[model.Identifiable] = model.DictObjectStore()
example_data.add(create_example_submodel_template())
checker.check_object_store(example_data, obj_store)
|
from selenium import webdriver
from selenium.webdriver.support.select import Select
from bs4 import BeautifulSoup
import re
import Course
#Functions
def remove_html_tags(string):
return re.sub(r'<[^<]+?>', '', string)
# Place asterisk in course name and convert to upper
def prep_course(course_name):
g = re.match(r"(\D+)(\d+)", course_name, re.IGNORECASE)
ret = g.groups()[0].upper() + "*" + g.groups()[1]
return ret
# get the schedule for a course
def get_schedule(term, level, course_name, location):
# Setup return variable
ret = ""
# prep course_name
course_name = prep_course(course_name)
# Parse subject
subject = course_name.split("*")[0]
#Setup browser
driver = './env/bin/geckodriver'
options = webdriver.FirefoxOptions()
options.add_argument('-headless')
browser = webdriver.Firefox(executable_path=driver, options=options)
browser.get('http://es.unb.ca/apps/timetable/')
# Select website parameters
select_term = Select(browser.find_element_by_id('term'))
select_term.select_by_value(term)
select_level = Select(browser.find_element_by_id('level'))
select_level.select_by_value(level)
select_subject = Select(browser.find_element_by_id('subject'))
select_subject.select_by_value(subject)
select_subject = Select(browser.find_element_by_id('location'))
select_subject.select_by_value(location)
submit = browser.find_element_by_xpath('/html/body/div[1]/div/div/div/div/div/div[4]/form/input[4]')
submit.click()
# Get page source
source = browser.page_source
#Save html to file
#with open("page.html", "w") as f:
# f.write(source)
browser.quit()
# Get main table
soup = BeautifulSoup(source, 'html.parser')
table = soup.find('table', id='course-list')
# Find what index the CourseID is under
headings = table.find('thead').find_all('th')
for i in range(len(headings)-1):
if 'ID' in headings[i]:
indID = i
elif 'Course' in headings[i]:
indCourse = i
elif 'Section' in headings[i]:
indSection = i
elif 'Title' in headings[i]:
indTitle = i
elif 'Instructor' in headings[i]:
indInstructor = i
elif 'Days' in headings[i]:
indDays = i
elif 'Times' in headings[i]:
indTimes = i
elif 'Room' in headings[i]:
indRoom = i
# Get all rows
rows = table.find('tbody').find_all('tr')
# Find the row containing the course_name specified
coursefound = False
courseduplicate = False
course_list = list()
for i in range(len(rows)-1):
# if this course if offered multiple times
if coursefound and course_name in rows[i].get_text():
courseduplicate = True
elif course_name in rows[i].get_text() and not coursefound:
ret += '\nTimes for ' + course_name + ':\n'
coursefound = True
cols = rows[i].find_all('td')
# Place days and times into lists
days = str(cols[indDays]).split("<br/>")
times = str(cols[indTimes]).split("<br/>")
# remove html tags
for index in range(len(days)):
days[index] = remove_html_tags(days[index])
for index in range(len(times)):
times[index] = remove_html_tags(times[index])
# Place days and times in list
sessions = []
for index in range(len(days)):
sessions.append([days[index], times[index]])
# Create course
course_list.append(Course.Course(course_name))
# Print class time slots
ret += 'Lecture'
for session in sessions:
ret += '\t' + session[0]
ret += '\t' + session[1] + '\n'
course_list[len(course_list)-1].add_time('Lecture', session[0], session[1])
# Print following time slots
# Do while the current line has a time slot within it:
j=i
checkNext = True
while checkNext:
checkNext = False
# If the row contains a time slot BUT not a courseCode then read the row
if re.search(r'\d\d:\d\d..-\d\d:\d\d..', rows[j+1].get_text()) and not course_name in rows[j+1].get_text():
# This row should be printed and the next should be checked
checkNext = True
# Column indices
secCols = rows[j+1].find_all('td')
# Place days and times into lists
secdays = str(secCols[2]).split("<br/>")
sectimes = str(secCols[3]).split("<br/>")
# remove html tags
for index in range(len(secdays)):
secdays[index] = remove_html_tags(secdays[index])
for index in range(len(sectimes)):
sectimes[index] = remove_html_tags(sectimes[index])
# Place days and times in list
sessions = []
for index in range(len(secdays)):
sessions.append([secCols[0].get_text(), secdays[index], sectimes[index]])
for session in sessions:
ret += session[0] + '\t'
ret += session[1] + '\t'
ret += session[2] + '\n'
course_list[len(course_list)-1].add_time(session[0], session[1], session[2])
j = j + 1
print(course_list[len(course_list)-1].to_string())
if courseduplicate:
ret += "This course is a duplicate"
return ret
# #Input courseIDs
# input_line = input("Enter course IDs: ")
# # Separate input by each course
# courses = input_line.split()
# # Place asterisk betweene each course_name
# for index in range(len(courses)):
# courses[index] = prep_course(courses[index])
# # Testing loop
# for course in courses:
# get_schedule('2019/FA','UG', course, 'FR')
|
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import (
url_changes
)
url = 'https://selenium.dunossauro.live/aula_10_c.html'
browser = Firefox()
browser.get(url)
wdw = WebDriverWait(browser, 10)
link = browser.find_element_by_css_selector('.body_b a')
link.click()
wdw.until(
url_changes(url),
'URL não mudou'
)
print(url, browser.current_url)
|
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from .base_swfilter import BaseSWFilter
class NormSWFilter(BaseSWFilter):
"""
...
"""
__logger = logging.getLogger(__name__)
class Options(object):
"""
Initial config for filter-options.
"""
min_size = 2
strict_windows = False
def aggregate_window_item(self, window, **kwargs):
"""
:param Iterable window:
:param kwargs:
:return:
"""
item_list = list(window)
value = item_list[0]
max_item = max(item_list)
min_item = min(item_list)
# noinspection PyUnresolvedReferences
rng = max_item - min_item
new_value = (value - min_item) / rng
# print (value, new_value, max_item, min_item, item_list)
return new_value
|
# SPDX-License-Identifier: Apache-2.0
from onnx import onnx_pb as onnx_proto
from ...common._registration import register_converter, register_shape_calculator
from ...common.utils import check_input_and_output_numbers
from ...common.data_types import *
def convert_sparkml_vector_indexer(scope, operator, container):
feature_count = operator.raw_operator.numFeatures
category_map = operator.raw_operator.categoryMaps
split_output_names = [ scope.get_unique_variable_name('split_tensor_%d' % i) for i in range(0, feature_count)]
if feature_count > 1:
container.add_node('Split', operator.inputs[0].full_name, split_output_names,
name=scope.get_unique_operator_name('Split'),
op_version=2,
axis=1,
split=[1]*feature_count)
else:
split_output_names = operator.input_full_names
concat_inputs = split_output_names.copy()
for i in category_map.keys():
converted_output = scope.get_unique_variable_name('converted_tensor_%d' % i)
container.add_node('Cast', split_output_names[i], converted_output,
name=scope.get_unique_operator_name('Cast'),
op_version=9,
to=onnx_proto.TensorProto.STRING)
attrs = {
'name': scope.get_unique_operator_name('LabelEncoder'),
'classes_strings': ['{0:g}'.format(c) for c in category_map[i].keys()],
'default_string': '__unknown__'
}
encoded_output_name = scope.get_unique_variable_name('indexed_tensor_%d' % i)
container.add_node('LabelEncoder', converted_output, encoded_output_name,
op_domain='ai.onnx.ml',
**attrs)
converted_float_output = scope.get_unique_variable_name('converted_float_tensor_%d' % i)
if feature_count == 1:
converted_float_output = operator.output_full_names[0]
container.add_node('Cast', encoded_output_name, converted_float_output,
name=scope.get_unique_operator_name('Cast'),
op_version=9,
to=onnx_proto.TensorProto.FLOAT)
concat_inputs[i] = converted_float_output
# add the final Concat
if feature_count > 1:
container.add_node('Concat', concat_inputs, operator.output_full_names[0],
name=scope.get_unique_operator_name('Concat'),
op_version=4,
axis=1)
register_converter('pyspark.ml.feature.VectorIndexerModel', convert_sparkml_vector_indexer)
def calculate_vector_indexer_shapes(operator):
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
N = operator.inputs[0].type.shape[0]
operator.outputs[0].type = FloatTensorType([N, operator.raw_operator.numFeatures])
register_shape_calculator('pyspark.ml.feature.VectorIndexerModel', calculate_vector_indexer_shapes)
|
"""
All constants, imports and functions.
by Bobobert
"""
### Imports
import matplotlib.pyplot as plt
import numpy as np
from abc import ABC
from math import ceil, floor
FLOAT_DEFT = np.float32
UINT_DEFT = np.uint8
INT_DEFT = np.int32
|
from typing import *
@overload
def date_range(start: Literal["2000-01-01"], periods: int):
"""
usage.xarray: 27
"""
...
@overload
def date_range(start: Literal["1999-01-05"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-02-01"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2010-01-01"], periods: int, freq: Literal["1D"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-02-01"], periods: int, freq: Literal["A"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-02-01"], periods: int, freq: Literal["M"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-02-01"], periods: int, freq: Literal["D"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000"], periods: int):
"""
usage.dask: 1
usage.xarray: 5
"""
...
@overload
def date_range(
start: Literal["2000-01-02T01:03:51"], periods: int, freq: Literal["1777S"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["8003D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["6H"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["3D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["11D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["3MS"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["7M"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["43QS-AUG"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["11Q-JUN"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["3AS-MAR"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01T12:07:01"], periods: int, freq: Literal["7A-MAY"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2004-01-01T12:07:01"], periods: int, freq: Literal["3D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["1970-01-01"], periods: int, freq: Literal["h"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["h"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2001-04-01-00"], end: Literal["2001-04-30-23"], freq: Literal["H"]
):
"""
usage.xarray: 2
"""
...
@overload
def date_range(
start: Literal["2001-04-01"], end: Literal["2001-04-05"], freq: Literal["D"]
):
"""
usage.xarray: 2
"""
...
@overload
def date_range(
start: Literal["2001-05-01"], end: Literal["2001-05-05"], freq: Literal["D"]
):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-01-16"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["MS"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2100"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["D"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["6H"]):
"""
usage.xarray: 20
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, freq: Literal["D"]):
"""
usage.dask: 1
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["1D"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2007-02-01"], end: Literal["2007-03-01"], freq: Literal["D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2016-01-01"], end: Literal["2016-03-31"], freq: Literal["1D"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["20130101"], periods: int):
"""
usage.dask: 2
usage.xarray: 5
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, freq: Literal["2MS"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["H"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["1/1/2011"], periods: int, freq: Literal["H"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["M"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["20100101"], periods: int):
"""
usage.dask: 2
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-01-01"], periods: int, freq: Literal["3H"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["20130101"], periods: int, tz: Literal["US/Eastern"]):
"""
usage.dask: 2
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["15/12/1999"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, freq: Literal["7D"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(
start: Literal["2010-08-01"], end: Literal["2010-08-15"], freq: Literal["15min"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["2010-08-01"], end: Literal["2010-08-15"], freq: Literal["24H"]
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["01-01-2001"], periods: int, freq: Literal["D"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["01-01-2001"], periods: int, freq: Literal["H"]):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2001-01-01"], periods: int, freq: Literal["H"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["20000101"], periods: int):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-01-01"], end: Literal["2000-01-10"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(start: Literal["2000-1-1"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["15/12/1999"],
periods: int,
freq: pandas._libs.tslibs.offsets.DateOffset,
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2011-09-01"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["10-09-2010"], periods: int, freq: Literal["1y"]):
"""
usage.xarray: 2
"""
...
@overload
def date_range(
start: Literal["2000-01-01"], periods: int, freq: Literal["1h"], tz: object
):
"""
usage.xarray: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: None,
name: Literal["Date"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2015"], freq: Literal["6M"]):
"""
usage.dask: 2
"""
...
@overload
def date_range(
start: Literal["2000"],
end: Literal["2000"],
freq: Literal["1H"],
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2015"], freq: Literal["3M"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01"], end: Literal["2000-12-31"], freq: Literal["1M"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2001"], freq: Literal["3M"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Hour,
tz: None,
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2001"], freq: Literal["6M"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["1D"],
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["20120101"], periods: int):
"""
usage.dask: 2
"""
...
@overload
def date_range(start: Literal["2019-01-01"], periods: int, freq: Literal["1T"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: None,
name: Literal["date"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2011-01-01"], periods: int, freq: Literal["D"]):
"""
usage.dask: 3
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: None,
name: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, freq: Literal["B"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"], periods: int, freq: None, tz: None, name: None
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.BusinessDay,
tz: None,
name: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["15s"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2015-01-01 00:00"],
end: Literal[" 2015-05-01 23:50"],
freq: Literal["10min"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["1M"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2016-01-01"], periods: int):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2015-01-01"], periods: int, freq: Literal["1T"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, freq: Literal["H"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Hour,
tz: None,
name: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1/1/2000"], end: Literal["1/1/2001"], freq: Literal["12h"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1/1/2000"], end: Literal["1/1/2001"], freq: Literal["D"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["01.01.2015"], end: Literal["05.05.2015"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01"], end: Literal["2000-01-31"], freq: Literal["1d"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["1s"],
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: int, periods: int):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01"], end: Literal["2000-04-01"], freq: Literal["1D"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2016-01-01"], end: Literal["2016-01-31"], freq: Literal["12h"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2011-01-01"], periods: int, freq: Literal["H"]):
"""
usage.dask: 5
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: None,
name: Literal["time"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: None,
name: Literal["time"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2019-08-01"], periods: int, freq: Literal["1D"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2016-01-01 00:00:00"], periods: int, freq: Literal["1s"]
):
"""
usage.dask: 2
"""
...
@overload
def date_range(
start: Literal["1970-01-01"], periods: int, freq: None, tz: None, name: Literal["a"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: None,
name: Literal["a"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2004"], freq: Literal["1M"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["2H"],
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: None,
name: Literal["timestamp"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: None,
name: Literal["notz"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: object,
name: Literal["tz"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"], periods: int, freq: None, tz: None, name: Literal["x"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], periods: int, tz: Literal["US/Central"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: object,
name: Literal["A"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(start: Literal["2000"], end: Literal["2001"], freq: Literal["1M"]):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2016-01-01"], periods: int, tz: Literal["America/New_York"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: object,
name: Literal["foo"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1-1-2000"], end: Literal["2-15-2000"], freq: Literal["h"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["4-15-2000"], end: Literal["5-15-2000"], freq: Literal["h"]
):
"""
usage.dask: 2
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Minute,
name: None,
closed: Literal["left"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Minute,
name: None,
closed: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Hour,
name: None,
closed: Literal["left"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Hour,
name: None,
closed: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Day,
name: None,
closed: Literal["left"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Day,
name: None,
closed: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Week,
name: None,
closed: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.Week,
name: None,
closed: Literal["left"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.MonthEnd,
name: None,
closed: Literal["left"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: pandas._libs.tslibs.offsets.MonthEnd,
name: None,
closed: None,
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2000-01-01"], end: Literal["2000-02-15"], freq: Literal["h"]
):
"""
usage.dask: 4
"""
...
@overload
def date_range(
start: Literal["1-1-2000"], end: Literal["2-15-2000"], freq: Literal["D"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["4-15-2000"], end: Literal["5-15-2000"], freq: Literal["D"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: None,
tz: pytz.UTC,
name: Literal["Time"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"],
periods: int,
freq: pandas._libs.tslibs.offsets.Day,
tz: pytz.UTC,
name: Literal["Time"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2012-01-02"], end: Literal["2012-02-02"], freq: Literal["H"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2012-03-02"], end: Literal["2012-04-02"], freq: Literal["H"]
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2012-01-02 00:00:00"],
end: Literal["2012-01-02 01:00:00"],
freq: Literal["T"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2012-01-02 06:00:00"],
end: Literal["2012-01-02 08:00:00"],
freq: Literal["T"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["2020-08-17T23:31:00.596478"],
end: Literal["2020-09-06T23:31:00.596478"],
freq: Literal["D"],
):
"""
usage.dask: 1
"""
...
@overload
def date_range(
start: Literal["1970-01-01"], periods: int, freq: None, tz: object, name: None
):
"""
usage.dask: 1
"""
...
def date_range(
start: Union[pandas._libs.tslibs.timestamps.Timestamp, int, str],
periods: int = ...,
end: Union[pandas._libs.tslibs.timestamps.Timestamp, str] = ...,
freq: object = ...,
tz: object = ...,
name: Union[None, str] = ...,
closed: Union[None, Literal["left"]] = ...,
):
"""
usage.dask: 94
usage.xarray: 122
"""
...
class DatetimeIndex:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 2
__name__: ClassVar[object]
# usage.dask: 1
array: object
# usage.dask: 1
day: object
# usage.xarray: 1
dayofweek: object
# usage.xarray: 1
dayofyear: object
# usage.dask: 4
# usage.xarray: 5
dtype: object
# usage.dask: 4
freq: None
# usage.xarray: 1
hour: object
# usage.dask: 1
is_all_dates: object
# usage.dask: 2
# usage.xarray: 2
is_monotonic: object
# usage.xarray: 2
is_unique: object
# usage.dask: 4
# usage.xarray: 1
month: object
# usage.dask: 17
# usage.xarray: 5
name: Union[None, Literal["timestamp", "index"]]
# usage.dask: 1
names: object
# usage.xarray: 1
ndim: object
# usage.xarray: 1
shape: object
# usage.xarray: 5
size: object
# usage.xarray: 1
time: object
# usage.dask: 2
tz: object
# usage.xarray: 16
values: object
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.Hour, /):
"""
usage.dask: 1
usage.xarray: 1
"""
...
@overload
def __add__(self, _0: datetime.timedelta, /):
"""
usage.xarray: 1
"""
...
@overload
def __add__(self, _0: Union[numpy.ndarray, numpy.timedelta64, numpy.datetime64], /):
"""
usage.pandas: 103
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.Nano, /):
"""
usage.dask: 1
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.Minute, /):
"""
usage.dask: 1
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.Day, /):
"""
usage.dask: 2
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.Week, /):
"""
usage.dask: 1
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.MonthEnd, /):
"""
usage.dask: 1
"""
...
@overload
def __add__(self, _0: pandas._libs.tslibs.offsets.QuarterEnd, /):
"""
usage.dask: 1
"""
...
def __add__(self, _0: object, /):
"""
usage.dask: 8
usage.pandas: 103
usage.xarray: 2
"""
...
def __contains__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.dask: 2
"""
...
def __eq__(self, _0: Union[numpy.ndarray, numpy.datetime64], /):
"""
usage.pandas: 32
"""
...
def __ge__(self, _0: Union[numpy.ndarray, numpy.datetime64], /):
"""
usage.pandas: 3
"""
...
@overload
def __getitem__(self, _0: int, /):
"""
usage.dask: 18
usage.xarray: 6
"""
...
@overload
def __getitem__(self, _0: slice[None, int, None], /):
"""
usage.xarray: 1
"""
...
@overload
def __getitem__(self, _0: slice[int, None, int], /):
"""
usage.xarray: 2
"""
...
@overload
def __getitem__(self, _0: slice[int, int, int], /):
"""
usage.dask: 2
usage.xarray: 1
"""
...
@overload
def __getitem__(self, _0: numpy.ndarray, /):
"""
usage.xarray: 2
"""
...
@overload
def __getitem__(self, _0: slice[None, None, None], /):
"""
usage.dask: 1
usage.xarray: 7
"""
...
@overload
def __getitem__(self, _0: slice[None, None, None], /):
"""
usage.xarray: 1
"""
...
def __getitem__(
self,
_0: Union[
int,
numpy.ndarray,
slice[Union[None, int], Union[int, None], Union[None, int]],
],
/,
):
"""
usage.dask: 21
usage.xarray: 20
"""
...
@overload
def __gt__(self, _0: Union[numpy.ndarray, numpy.datetime64], /):
"""
usage.pandas: 2
"""
...
@overload
def __gt__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.dask: 1
"""
...
def __gt__(
self,
_0: Union[
pandas._libs.tslibs.timestamps.Timestamp, numpy.datetime64, numpy.ndarray
],
/,
):
"""
usage.dask: 1
usage.pandas: 2
"""
...
def __iadd__(self, _0: Union[numpy.ndarray, numpy.timedelta64], /):
"""
usage.pandas: 4
"""
...
def __isub__(self, _0: Union[numpy.ndarray, numpy.timedelta64], /):
"""
usage.pandas: 3
"""
...
def __le__(self, _0: Union[numpy.ndarray, numpy.datetime64], /):
"""
usage.pandas: 5
"""
...
@overload
def __lt__(self, _0: numpy.ndarray, /):
"""
usage.pandas: 1
"""
...
@overload
def __lt__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.dask: 1
"""
...
def __lt__(
self, _0: Union[pandas._libs.tslibs.timestamps.Timestamp, numpy.ndarray], /
):
"""
usage.dask: 1
usage.pandas: 1
"""
...
def __ne__(self, _0: numpy.ndarray, /):
"""
usage.pandas: 2
"""
...
def __radd__(
self, _0: Union[numpy.datetime64, numpy.timedelta64, numpy.ndarray], /
):
"""
usage.pandas: 59
"""
...
def __rsub__(
self, _0: Union[numpy.ndarray, numpy.timedelta64, numpy.datetime64], /
):
"""
usage.pandas: 32
"""
...
@overload
def __sub__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.xarray: 3
"""
...
@overload
def __sub__(self, _0: numpy.timedelta64, /):
"""
usage.xarray: 1
"""
...
@overload
def __sub__(self, _0: Union[numpy.ndarray, numpy.timedelta64, numpy.datetime64], /):
"""
usage.pandas: 75
"""
...
def __sub__(
self,
_0: Union[
numpy.datetime64,
numpy.timedelta64,
numpy.ndarray,
pandas._libs.tslibs.timestamps.Timestamp,
],
/,
):
"""
usage.pandas: 75
usage.xarray: 4
"""
...
def ceil(self, /, *args: Literal["v", "t"]):
"""
usage.xarray: 3
"""
...
def copy(self, /, deep: bool):
"""
usage.xarray: 1
"""
...
def equals(self, /, other: pandas.core.indexes.datetimes.DatetimeIndex):
"""
usage.xarray: 2
"""
...
def floor(self, /, *args: Literal["v", "t"]):
"""
usage.xarray: 3
"""
...
@overload
def get_indexer(self, /, target: numpy.ndarray, method: None, tolerance: None):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self, /, target: numpy.ndarray, method: Literal["pad"], tolerance: None
):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self, /, target: numpy.ndarray, method: Literal["backfill"], tolerance: None
):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self, /, target: numpy.ndarray, method: Literal["nearest"], tolerance: None
):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self,
/,
target: numpy.ndarray,
method: Literal["pad"],
tolerance: Literal["12H"],
):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self,
/,
target: numpy.ndarray,
method: Literal["backfill"],
tolerance: Literal["12H"],
):
"""
usage.xarray: 1
"""
...
@overload
def get_indexer(
self,
/,
target: numpy.ndarray,
method: Literal["nearest"],
tolerance: Literal["6H"],
):
"""
usage.xarray: 1
"""
...
def get_indexer(
self,
/,
target: numpy.ndarray,
method: Union[Literal["nearest", "backfill", "pad"], None],
tolerance: Union[Literal["6H", "12H"], None],
):
"""
usage.xarray: 7
"""
...
@overload
def get_loc(self, /, key: numpy.datetime64, method: Literal["nearest"]):
"""
usage.xarray: 2
"""
...
@overload
def get_loc(self, /, key: Literal["2000-01-01"], method: None, tolerance: None):
"""
usage.xarray: 1
"""
...
def get_loc(
self,
/,
key: Union[Literal["2000-01-01"], numpy.datetime64],
method: Union[None, Literal["nearest"]],
tolerance: None = ...,
):
"""
usage.xarray: 3
"""
...
def max(self, /):
"""
usage.dask: 1
"""
...
def min(self, /):
"""
usage.dask: 1
"""
...
def round(self, /, *args: Literal["v", "t"]):
"""
usage.xarray: 3
"""
...
@overload
def shift(self, /, periods: int, freq: Literal["S"]):
"""
usage.dask: 3
"""
...
@overload
def shift(self, /, periods: int, freq: Literal["W"]):
"""
usage.dask: 3
"""
...
@overload
def shift(self, /, periods: int, freq: pandas._libs.tslibs.timedeltas.Timedelta):
"""
usage.dask: 3
"""
...
@overload
def shift(self, /, periods: int, freq: None):
"""
usage.dask: 2
"""
...
def shift(
self,
/,
periods: int,
freq: Union[None, Literal["W", "S"], pandas._libs.tslibs.timedeltas.Timedelta],
):
"""
usage.dask: 11
"""
...
@overload
def to_frame(self, /, index: bool, name: None):
"""
usage.dask: 1
"""
...
@overload
def to_frame(self, /, name: Literal["foo"]):
"""
usage.dask: 1
"""
...
@overload
def to_frame(self, /, index: bool, name: Literal["foo"]):
"""
usage.dask: 1
"""
...
def to_frame(self, /, name: Union[Literal["foo"], None], index: bool = ...):
"""
usage.dask: 3
"""
...
def to_numpy(self, /, dtype: Literal["datetime64[ns]"]):
"""
usage.xarray: 1
"""
...
def to_series(self, /):
"""
usage.dask: 1
"""
...
@overload
def tz_localize(self, /, tz: None, nonexistent: Literal["shift_forward"]):
"""
usage.dask: 1
"""
...
@overload
def tz_localize(self, /, tz: object, nonexistent: Literal["shift_forward"]):
"""
usage.dask: 1
"""
...
def tz_localize(self, /, tz: object, nonexistent: Literal["shift_forward"]):
"""
usage.dask: 2
"""
...
|
# GUI reader side: like pipes-gui1, but make root window and mainloop explicit
from tkinter import *
from PP4E.Gui.Tools.guiStreams import redirectedGuiShellCmd
def launch():
redirectedGuiShellCmd('python -u pipe-nongui.py')
window = Tk()
Button(window, text='GO!', command=launch).pack()
window.mainloop()
|
from . import builtin
|
import requests
import json
parameters = {
# 'address' : '19.108914019118583, 72.86535472954193'
'address' : '19.1067657, 72.8639412'
}
response = requests.get("https://plus.codes/api", params=parameters)
r = json.loads(response.text)
print(r['plus_code']['global_code'])
|
import argparse
import pprint
from typing import Any
import torch
from transformers import AutoTokenizer, BartForConditionalGeneration
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def pretty_convert(x: Any) -> Any:
if isinstance(x, torch.Tensor):
return x.tolist()
else:
return x
def pretty_print(x: dict) -> None:
y = {k: pretty_convert(v) for k, v in x.items()}
pp = pprint.PrettyPrinter(indent=4, compact=True, width=120)
pp.pprint(y)
def main(args=None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="facebook/bart-base")
parser.add_argument("--input", required=True)
args = parser.parse_args()
pretty_print(args.__dict__)
tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(args.model)
tokenized_inputs = tokenizer(
[args.input],
padding="longest",
return_tensors="pt",
add_special_tokens=True,
return_attention_mask=False,
)
pretty_print(tokenized_inputs)
back_decoded = tokenizer.batch_decode(
tokenized_inputs["input_ids"],
skip_special_tokens=False,
clean_up_tokenization_spaces=False,
)
pretty_print({"back_decoded": back_decoded})
if __name__ == "__main__":
main()
|
"""
fs.appdirfs
===========
A collection of filesystems that map to application specific locations.
These classes abstract away the different requirements for user data across platforms,
which vary in their conventions. They are all subclasses of :class:`fs.osfs.OSFS`,
all that differs from `OSFS` is the constructor which detects the appropriate
location given the name of the application, author name and other parameters.
Uses `appdirs` (https://github.com/ActiveState/appdirs), written by Trent Mick and Sridhar Ratnakumar <trentm at gmail com; github at srid name>
"""
from fs.osfs import OSFS
from fs.appdirs import AppDirs
__all__ = ['UserDataFS',
'SiteDataFS',
'UserCacheFS',
'UserLogFS']
class UserDataFS(OSFS):
"""A filesystem for per-user application data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
"""
:param appname: the name of the application
:param appauthor: the name of the author (used on Windows)
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserDataFS, self).__init__(app_dirs.user_data_dir, create=create)
class SiteDataFS(OSFS):
"""A filesystem for application site data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
"""
:param appname: the name of the application
:param appauthor: the name of the author (not used on linux)
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(SiteDataFS, self).__init__(app_dirs.site_data_dir, create=create)
class UserCacheFS(OSFS):
"""A filesystem for per-user application cache data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
"""
:param appname: the name of the application
:param appauthor: the name of the author (not used on linux)
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserCacheFS, self).__init__(app_dirs.user_cache_dir, create=create)
class UserLogFS(OSFS):
"""A filesystem for per-user application log data."""
def __init__(self, appname, appauthor=None, version=None, roaming=False, create=True):
"""
:param appname: the name of the application
:param appauthor: the name of the author (not used on linux)
:param version: optional version string, if a unique location per version of the application is required
:param roaming: if True, use a *roaming* profile on Windows, see http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx
:param create: if True (the default) the directory will be created if it does not exist
"""
app_dirs = AppDirs(appname, appauthor, version, roaming)
super(UserLogFS, self).__init__(app_dirs.user_log_dir, create=create)
if __name__ == "__main__":
udfs = UserDataFS('exampleapp', appauthor='pyfs')
print udfs
udfs2 = UserDataFS('exampleapp2', appauthor='pyfs', create=False)
print udfs2
|
#!/usr/bin/env python3
import re
import urllib.request
import os
from dockerfile_parse import DockerfileParser
from whalelinter.app import App
from whalelinter.utils import DockerfileCommand
class Parser(DockerfileParser):
def __init__(self, filename):
DockerfileParser.__init__(self, cache_content=True)
if self.is_url(filename) is not None:
response = urllib.request.urlopen(filename)
if self.is_content_type_plain_text(response):
self.content = response.read().decode('utf-8')
App._dockerfile['is_remote'] = True
App._dockerfile['url'] = filename
else:
print('ERROR: file format not supported. Plain text expected\n')
exit(-1)
elif os.path.isfile(filename):
self.dockerfile_path = filename
self.content = open(filename, encoding='utf-8').read()
App._dockerfile['is_remote'] = False
elif self.is_github_repo(filename):
filename = 'https://raw.githubusercontent.com/' + filename + '/master/Dockerfile'
self.content = urllib.request.urlopen(filename).read().decode('utf-8')
App._dockerfile['is_remote'] = True
App._dockerfile['url'] = filename
else:
print('ERROR: file format not supported\n')
exit(-1)
self.commands = self.dict_to_command_object(self.structure)
@property
def content(self):
pass
@content.setter
def content(self, content):
pass
def is_github_repo(self, filename):
regex = re.compile(r'^[-_.0-9a-z]+/[-_.0-9a-z]+$', re.IGNORECASE)
return True if regex.match(filename) is not None else False
def is_url(self, filename):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
return regex.match(filename)
def is_content_type_plain_text(self, response):
content_type = response.getheader('Content-Type')
regex = re.compile(r'text/plain')
return True if regex.search(content_type) is not None else False
def dict_to_command_object(self, lst):
commands = []
for element in lst:
command = DockerfileCommand()
command.instruction = element.get('instruction').upper()
command.arguments = element.get('value').split(' ')
command.line = element.get('startline') + 1
command._raw = element.get('content')
commands.append(command)
return commands
|
import six
import json
import threading
from rdflib import URIRef, BNode, Literal, Namespace
from rdflib.namespace import RDF, XSD
from ckanext.dcat.profiles import RDFProfile, VCARD, DCAT, DCT, FOAF, SKOS, ADMS, SPDX, LOCN, GSP
from ckanext.dcat.utils import resource_uri, url_quote
from ckan.plugins import toolkit as p
import logging
log = logging.getLogger(__name__)
ADFI = Namespace('http://avoindata.fi/ns#')
GEODCAT = Namespace('http://data.europa.eu/930/#')
namespaces = {
'dct': DCT,
'dcat': DCAT,
'vcard': VCARD,
'foaf': FOAF,
'skos': SKOS,
'adms': ADMS,
'xsd': XSD,
'spdx': SPDX,
'adfi': ADFI,
'locn': LOCN,
'gsp': GSP,
'geodcat': GEODCAT,
}
def as_dict(value):
if type(value) is dict:
return value
elif type(value) in (str, six.text_type):
return as_dict(json.loads(value))
else:
raise ValueError()
def get_dict(d, key):
return as_dict(d.get(key, {}))
def uriref(uri, *args, **kwargs):
return URIRef(url_quote(uri.encode('utf-8')), *args, **kwargs)
thread_local = threading.local()
thread_local.organization_list = []
thread_local.group_list = []
def get_organization(org_id):
if hasattr(thread_local, 'organization_list'):
organization = next((organization for organization in thread_local.organization_list
if organization['id'] == org_id), None)
else:
organization = None
# If organization does not exist in previously fetched list, use organization_show to fetch it
if not organization:
organization = p.get_action('organization_show')({}, {'id': org_id,
'include_users': False,
'include_dataset_count': False,
'include_groups': False,
'include_tags': False,
'include_followers': False})
return organization
def get_group(group_id):
if hasattr(thread_local, 'group_list'):
group = next((group for group in thread_local.group_list if group['id'] == group_id), None)
else:
group = None
# If group does not exist in previously fetched list, use group_show to fetch it
if not group:
group = p.get_action('group_show')({}, {'id': group_id})
return group
class AvoindataDCATAPProfile(RDFProfile):
'''
An RDF profile for Avoindata based on DCAT-AP 2.0.0 extension at
<https://www.avoindata.fi/ns/#>
Supported dataset fields:
Mandatory: dct:title, dct:description
Recommended: dcat:contactPoint, dcat:distribution, dcat:keyword
dct:publisher, dcat:theme
Optional: dcat:landingPage, dct:spatial, dct:accuralPeriodicity, dct:type,
dct:identifier, dct:temporal, dct:issued
Supported distribution fields:
Mandatory: dct:accessUrl
Recommended: dct:description
Optional: dct:title, dct:downloadUrl, adms:status, dct:license, dct:format,
dcat:byteSize, dcat:temporalResolution, spdx:checksum, dct:rights,
dct:conformsTo
'''
#
def parse_dataset(self, dataset_dict, dataset_ref):
return dataset_dict
def graph_from_dataset(self, dataset_dict, dataset_ref):
g = self.g
for prefix, namespace in namespaces.iteritems():
g.bind(prefix, namespace)
g.add((dataset_ref, RDF.type, DCAT.Dataset))
# Flatten extras
for extra in dataset_dict.get('extras', []):
key = extra['key']
if key not in dataset_dict:
dataset_dict[key] = extra['value']
# dct:title
titles = set(t for t in get_dict(dataset_dict, 'title_translated').values() if t)
if not titles:
titles.add(dataset_dict.get('title', ''))
for title in titles:
g.add((dataset_ref, DCT.title, Literal(title)))
# dct:description
descriptions = set(d for d in get_dict(dataset_dict, 'notes_translated').values() if d)
if not descriptions:
descriptions.add(dataset_dict.get('notes', ''))
for description in descriptions:
g.add((dataset_ref, DCT.description, Literal(description)))
# dct:contactPoint
contact_details = BNode()
g.add((contact_details, RDF.type, VCARD.Organization))
g.add((dataset_ref, DCAT.contactPoint, contact_details))
self._add_triple_from_dict(dataset_dict, contact_details, VCARD.fn, 'maintainer')
self._add_triple_from_dict(dataset_dict, contact_details, VCARD.hasEmail, 'maintainer_email',
_type=URIRef, value_modifier=self._add_mailto)
self._add_triple_from_dict(dataset_dict, contact_details, VCARD.hasUrl, 'maintainer_website', _type=URIRef)
contact_email = dataset_dict.get('contact_email')
if contact_email:
g.add((dataset_ref, DCAT.contactPoint, uriref(contact_email)))
# dcat:distribution
for resource_dict in dataset_dict.get('resources', []):
resource_dict = as_dict(resource_dict)
distribution = BNode()
g.add((distribution, RDF.type, DCAT.Distribution))
g.add((dataset_ref, DCAT.distribution, distribution))
# dct:title
titles = set(t for t in get_dict(resource_dict, 'name_translated').values() if t)
if not titles:
titles.add(resource_dict.get('title', ''))
for title in titles:
g.add((distribution, DCT.title, Literal(title)))
# dct:description
descriptions = set(d for d in get_dict(resource_dict, 'description_translated').values() if d)
if not descriptions:
descriptions.add(dataset_dict.get('description', ''))
for description in descriptions:
g.add((distribution, DCT.description, Literal(description)))
# dct:rights
copyright_notices = set(n for n in get_dict(dataset_dict, 'copyright_notice_translated').values() if n)
if copyright_notices:
rights_statement = BNode()
g.add((rights_statement, RDF.type, DCT.RightsStatement))
g.add((rights_statement, DCT.description, Literal('\n\n'.join(copyright_notices))))
g.add((distribution, DCT.rights, rights_statement))
# dcat:accessUrl
g.add((distribution, DCAT.accessURL, uriref(resource_uri(resource_dict))))
# dcat:downloadUrl
resource_url = resource_dict.get('url')
if resource_url:
g.add((distribution, DCAT.downloadURL, uriref(resource_url)))
# adms:status
maturity = resource_dict.get('maturity')
if maturity:
status = BNode()
g.add((status, RDF.type, SKOS.Concept))
g.add((status, SKOS.prefLabel, Literal(maturity)))
g.add((distribution, ADMS.status, status))
# dct:license
license_url = dataset_dict.get('license_url')
if license_url:
license_ref = uriref(license_url)
g.add((license_ref, RDF.type, DCT.LicenseDocument))
g.add((distribution, DCT.license, license_ref))
# dct:format
file_format = resource_dict.get('format')
if file_format:
media_type = BNode()
g.add((media_type, RDF.type, DCT.MediaTypeOrExtent))
g.add((media_type, RDF.value, Literal(file_format)))
g.add((distribution, DCT['format'], media_type))
# dct:conformsTo
position_info = resource_dict.get('position_info')
if position_info:
standard = BNode()
g.add((standard, RDF.type, DCT.Standard))
g.add((standard, DCT.identifier, Literal(position_info)))
g.add((distribution, DCT.conformsTo, standard))
spatial_reference_system = dataset_dict.get('spatial-reference-system')
if spatial_reference_system:
standard = BNode()
g.add((standard, RDF.type, DCT.Standard))
g.add((standard, DCT.identifier, Literal(spatial_reference_system)))
g.add((distribution, DCT.conformsTo, standard))
# dcat:byteSize
file_size = resource_dict.get('size')
if file_size:
g.add((distribution, DCAT.byteSize, Literal(file_size, datatype=XSD.decimal)))
# spdx:checksum
checksum_value = resource_dict.get('sha256')
if checksum_value:
checksum = BNode()
g.add((checksum, RDF.type, SPDX.Checksum))
g.add((checksum, SPDX.checksumValue, Literal(checksum_value)))
g.add((checksum, SPDX.algorithm, SPDX.checksumAlgorithm_sha1))
g.add((distribution, SPDX.checksum, checksum))
# dct:temporal
temporal_coverage_from = resource_dict.get('temporal_coverage_from')
temporal_coverage_to = resource_dict.get('temporal_coverage_to')
if temporal_coverage_from or temporal_coverage_to:
period = BNode()
g.add((distribution, DCT.temporal, period))
g.add((period, RDF.type, DCT.PeriodOfTime))
if temporal_coverage_from:
g.add((period, DCAT.startDate, Literal(temporal_coverage_from)))
if temporal_coverage_to:
g.add((period, DCAT.endDate, Literal(temporal_coverage_to)))
# dcat:temporalResolution
temporal_granularities = set(t for lang in get_dict(resource_dict, 'temporal_granularity').values()
for t in lang if t)
if temporal_granularities:
g.add((distribution, DCAT.temporalResolution,
Literal(', '.join(temporal_granularities), datatype=XSD.duration)))
# dcat:keyword
keywords = set(
keyword
for keyword_language in get_dict(dataset_dict, 'keywords').values()
for keyword in keyword_language)
for keyword in keywords:
g.add((dataset_ref, DCAT.keyword, Literal(keyword)))
# dct:publisher
organization = get_organization(dataset_dict['owner_org'])
# If organization is not approved, it won't be available in organization list
if organization:
publisher = URIRef(p.url_for(controller='organization', action='read', id=organization['id'], qualified=True))
g.add((publisher, RDF.type, FOAF.Agent))
g.add((dataset_ref, DCT.publisher, publisher))
organization_titles = (t for t in get_dict(organization, 'title_translated').values() if t)
for title in organization_titles:
g.add((publisher, FOAF.name, Literal(title)))
self._add_triple_from_dict(organization, publisher, FOAF.homepage, 'homepage')
# dcat:theme
groups = dataset_dict.get('groups', [])
for group_item in groups:
group_dict = get_group(group_item['id'])
theme = URIRef(p.url_for(controller='group', action='read', id=group_dict['id'], qualified=True))
g.add((theme, RDF.type, SKOS.Concept))
g.add((dataset_ref, DCAT.theme, theme))
group_titles = (t for t in get_dict(group_dict, 'title_translated').values() if t)
for title in group_titles:
g.add((theme, SKOS.prefLabel, Literal(title)))
# dcat:landingPage
external_urls = (u for u in dataset_dict.get('external_urls', []) if u)
for external_url in external_urls:
# some external urls have whitespace in them
external_url = external_url.strip()
document = URIRef(url_quote(external_url.encode('utf-8')))
g.add((document, RDF.type, FOAF.Document))
g.add((dataset_ref, DCAT.landingPage, document))
# dct:spatial
locations = []
geographical_coverages = set(g for g in dataset_dict.get('geographical_coverage', []) if g)
for geographical_coverage in geographical_coverages:
locations.append((DCT.identifier, Literal(geographical_coverage)))
bbox_field_names = ['bbox-south-lat', 'bbox-west-long', 'bbox-north-lat', 'bbox-east-long']
bbox_fields = [dataset_dict.get(field) for field in bbox_field_names]
if all(bbox_fields):
log.debug('bbox fields are present: %s', bbox_fields)
from lxml import etree
gml = 'http://www.opengis.net/gml/3.2.1#'
nsmap = {'gml': gml}
envelope = etree.Element(etree.QName(gml, 'Envelope'),
srsName='http://www.opengis.net/def/crs/OGC/1.3/CRS84',
nsmap=nsmap)
lowerCorner = etree.SubElement(envelope, etree.QName(gml, 'lowerCorner'), nsmap=nsmap)
upperCorner = etree.SubElement(envelope, etree.QName(gml, 'upperCorner'), nsmap=nsmap)
lowerCorner.text = ' '.join(bbox_fields[:2])
upperCorner.text = ' '.join(bbox_fields[2:])
locations.append((DCAT.bbox, Literal(etree.tostring(envelope))))
spatial_field = dataset_dict.get('spatial')
if spatial_field:
log.debug('spatial field is present: %s', spatial_field)
locations.append((LOCN.geometry, Literal(spatial_field, datatype=GSP.geoJSONLiteral)))
if dataset_dict.get('name') == 'keski-suomen-maakuntakaavayhdistelma7':
from pprint import pformat
log.debug(pformat(dataset_dict))
if locations:
location = BNode()
g.add((dataset_ref, DCT.spatial, location))
g.add((location, RDF.type, DCT.Location))
for prop, value in locations:
g.add((location, prop, value))
# geodcat:custodian
responsible_party = dataset_dict.get('responsible-party')
if responsible_party:
try:
custodians_data = json.loads(responsible_party)
custodians_data = custodians_data if type(custodians_data) is list else [custodians_data]
for custodian_data in custodians_data:
custodian = BNode()
g.add((custodian, RDF.type, FOAF.Agent))
custodian_fields = [('name', FOAF.name), ('email', VCARD.hasEmail)]
for field, prop in custodian_fields:
value = custodian_data.get(field)
if value:
g.add((custodian, prop, Literal(value)))
g.add((dataset_ref, GEODCAT.custodian, custodian))
except ValueError:
log.debug('Invalid JSON value in field responsible-party')
# dct:accuralPeriodicity
update_frequencies = set(u for lang in get_dict(dataset_dict, 'update_frequency').values() for u in lang if u)
spatial_frequency_of_update = dataset_dict.get('frequency-of-update')
if spatial_frequency_of_update:
update_frequencies.add(spatial_frequency_of_update)
if update_frequencies:
accrual_periodicity = BNode()
g.add((accrual_periodicity, RDF.type, DCT.Frequency))
g.add((accrual_periodicity, RDF.value, Literal(', '.join(update_frequencies))))
g.add((dataset_ref, DCT.accrualPeriodicity, accrual_periodicity))
# dct:type
content_types = set(t for lang in get_dict(dataset_dict, 'content_type').values() for t in lang if t)
if content_types:
concept = BNode()
g.add((concept, RDF.type, SKOS.Concept))
for content_type in content_types:
g.add((concept, SKOS.prefLabel, Literal(content_type)))
g.add((dataset_ref, DCT.type, concept))
# dct:identifier
g.add((dataset_ref, DCT.identifier, Literal(dataset_dict.get('id'))))
# dct:temporal
valid_from = dataset_dict.get('valid_from') or dataset_dict.get('temporal-extent-begin')
valid_till = dataset_dict.get('valid_till') or dataset_dict.get('temporal-extent-end')
if valid_from or valid_till:
period = BNode()
g.add((dataset_ref, DCT.temporal, period))
g.add((period, RDF.type, DCT.PeriodOfTime))
if valid_from:
g.add((period, DCAT.startDate, Literal(valid_from)))
if valid_till:
g.add((period, DCAT.endDate, Literal(valid_till)))
# dct:issued
date_released = dataset_dict.get('dataset-reference-date') or dataset_dict.get('metadata_created')
if date_released:
issued_date = Literal(date_released)
g.add((dataset_ref, DCT.issued, issued_date))
def graph_from_catalog(self, catalog_dict, catalog_ref):
# Fetch organization list for graph_from_dataset to use
context = {'user': p.c.user}
thread_local.organization_list = \
p.get_action('organization_list')(context, {"all_fields": True, "include_extras": True,
"include_dataset_count": False})
thread_local.group_list = p.get_action('group_list')(context, {"all_fields": True, "include_extras": True,
"include_dataset_count": False})
g = self.g
for prefix, namespace in namespaces.iteritems():
g.bind(prefix, namespace)
g.add((catalog_ref, RDF.type, DCAT.Catalog))
# Basic fields
title = p.config.get('ckan.site_title', '')
g.add((catalog_ref, DCT.title, Literal(title)))
description = p.config.get('ckan.site_description', '')
g.add((catalog_ref, DCT.description, Literal(description)))
homepage = URIRef(p.config.get('ckan.site_url', ''))
g.add((catalog_ref, FOAF.homepage, homepage))
language = p.config.get('ckan.locale_default', 'en')
linguistic_system = URIRef('http://id.loc.gov/vocabulary/iso639-1/%s' % language)
g.add((linguistic_system, RDF.type, DCT.LinguisticSystem))
g.add((catalog_ref, DCT.language, linguistic_system))
publisher = BNode()
g.add((publisher, RDF.type, FOAF.Organization))
g.add((publisher, FOAF.hasSite, URIRef(p.config.get('ckan.site_url', ''))))
g.add((publisher, FOAF.name, Literal(p.config.get('ckan.site_title'))))
g.add((catalog_ref, DCT.publisher, publisher))
# Dates
modified = self._last_catalog_modification()
if modified:
self._add_date_triple(catalog_ref, DCT.modified, modified)
|
import numpy as np
class Problem:
def __init__(self):
self.states = []
def p(self, s, next_s, action):
return 0
def r(self, s, next_s, action):
return 0
def a(self, s):
return []
def value_iteration(problem, gamma=0.9, theta=0.001):
value = np.zeros(len(problem.states))
p = problem.p
r = problem.r
while True:
delta = 0
for s in problem.states:
actions = problem.a(s)
v = value[s]
value[s] = max([sum([p(s, next_s, a) * (r(s, next_s, a) + gamma * value[next_s])
for next_s in problem.states]) for a in actions])
delta = max(delta, abs(v - value[s]))
if delta < theta:
break
policy = np.zeros(len(problem.states), dtype=int)
for s in problem.states:
actions = problem.a(s)
policy[s] = actions[np.argmax(
[sum([p(s, next_s, a) * (r(s, next_s, a) + gamma * value[next_s]) for next_s in problem.states]) for a in actions])]
return policy, value
def eval_policy(problem, policy, gamma=0.9, theta=0.01):
value = np.zeros(len(problem.states))
p = problem.p
r = problem.r
while True:
delta = 0
for s in problem.states:
v = value[s]
value[s] = sum([p(s, next_s, policy[s]) * (r(s, next_s, policy[s]) + gamma * value[next_s]) for next_s in problem.states])
delta = max(delta, abs(v - value[s]))
if delta < theta:
return value
def improve_policy(problem, policy, value, gamma=0.9):
p = problem.p
r = problem.r
stable = True
for s in problem.states:
actions = problem.a(s)
b = policy[s]
policy[s] = actions[np.argmax(
[sum([p(s, next_s, a) * (r(s, next_s, a) + gamma * value[next_s]) for next_s in problem.states]) for a in actions])]
if b != policy[s]:
stable = False
return stable
def policy_iteration(problem, gamma=0.9, theta=0.01):
policy = np.array([np.random.choice(problem.a(s)) for s in problem.states])
stable = False
while not stable:
values = eval_policy(problem, policy, gamma, theta)
stable = improve_policy(problem, policy, values, gamma)
return policy, values
|
from ceci import PipelineStage
from descformats import TextFile, HDFFile, YamlFile
import subprocess
import os
# This class runs the python3 version of BPZ from the command line
class BPZpipeStage1(PipelineStage):
"""Pipeline stage to run the BPZpy3 version of BPZ
The inputs and outputs are named in the associated yml file used by ceci
inputs:
-------
input_photometry_file: hdf5 file
file containing the ugrizy photometry and errors
configuration options:
----------------------
param_file: ascii file containing the BPZ parameters other than INTERP, infile, and outfiles.
Many of the parameters will be the same run to run, so we can use a default file for many
healpix pixels' of data.
INTERP: integer (though read in as a string):
the number of interpolated SEDs to be created
in between the grid. The default value is 0.
path_to_bpz: string
OUTPUT_CUT: float (read in as string)
If OUTPUT_CUT is set, then BPZ masks values with M_0 (the reference band used in the prior)
and only computes and outputs photo-z's for galaxies with M_0 < OUTPUT_CUT
path_to_BPZ: string
The path to the installed BPZpy3 version, will be set as the $BPZPY3PATH env. variable
outputs:
--------
photoz_pointfile: ascii file usually ending in ".bpz" that contains the point estimate data
created by BPZ
photoz_probsfile: ascii file usually ending in "_probs.out" containing the p(z) for each object
in the catalog
-----
For now, the code assumes that this is being run on cori and using the version of BPZ installed
there, you can specify a different installation via the "path_to_bpz" config option.
"""
name = "BPZpipeStage1"
#
inputs = [
('input_photometry_file', HDFFile),
]
outputs = [
('output_photoz_pointfile', HDFFile),
('output_photoz_probsfile', HDFFile)
]
config_options = {
'INTERP': '0', #Interpolate between input templates N times
'OUTPUT_CUT': "Null", #if you want to only compute and output M_0 < OUTPUT_CUT set here
'param_file': "test.pars",
'path_to_bpz': "/global/projecta/projectdirs/lsst/groups/PZ/BPZ/BPZpy3/pz_pdf/pz/BPZ"
}
def run(self):
interp = self.config['INTERP']
pfile = self.config['param_file']
infile = self.get_input('input_photometry_file')
bpz_path = self.config['path_to_bpz']
outfile_point = self.get_output('output_photoz_pointfile')
outfile_probs= self.get_output('output_photoz_probsfile')
#BPZ uses paths relevant to an environment variable, set this
os.environ["BPZPY3PATH"]=bpz_path
#BPZ is so old that it has some leftover references to NUMERIX
os.environ["NUMERIX"]="numpy"
print ("Running BPZ...")
#Set up the command line command to run bpz_py3_hdf5. The format is
#bpz_py3_hdf5.py [infile] -P [parsfile] [specific BPZ keywords not in pars file, e.g. -INTERP]
bpz_exec = bpz_path+"/bpz_py3_readwritehdf5.py"
args = ['python3',
bpz_exec,
infile,
'-P',pfile,'-OUTPUT',
outfile_point,'-PROBS_LITE',outfile_probs]
subprocess.Popen(args)
# You would normally call some other function or method
# here to generate some output. You can use self.comm,
# self.rank, and self.size to use MPI.
print ("finished")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.