content
stringlengths 5
1.05M
|
|---|
import pyOcean_cpu as ocean
a = ocean.asTensor([1,2,3])
s = a.storage
b = ocean.int8(9)
x = ocean.ensure(a, a.dtype)
print(x.obj == a.obj)
print(x.storage.obj == a.storage.obj)
x = ocean.ensure(a, ocean.float)
print(x)
ocean.ensure(a, ocean.int8, ocean.cpu, True)
print(a)
x = ocean.ensure(s, s.dtype)
print(x is s)
print(x.obj == s.obj)
x = ocean.ensure(s, ocean.half)
print(x)
ocean.ensure(s, ocean.cfloat, True)
print(s)
print(a)
x = ocean.ensure(b, b.dtype)
print(x)
x = ocean.ensure(b, ocean.double)
print(x)
ocean.ensure(b, ocean.float)
print(b)
|
import os
import shutil
import build_utils
import build_config
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
else:
return []
def get_dependencies_for_target(target):
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
def get_download_info():
return 'Libs/libpvr_win32'
def _build_win32(working_directory_path, root_project_path):
source_folder_path=os.path.join(root_project_path, get_download_info())
(build_folder_x86,build_folder_x64)=build_utils.build_and_copy_libraries_win32_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
"PVRTexLib.sln",
"PVRTexLib",
"PVRTexLib.lib", "PVRTexLib.lib",
"PVRTexLib.lib", "PVRTexLib.lib",
"PVRTexLib.lib", "PVRTexLib.lib",
static_runtime=True)
# PVRTexLib is a DLL library which wraps original static library PVRTexLib.lib to
# make msvc2017 linker happy. So use only release version of DLL and import library
lib_path_x86=os.path.join(root_project_path, 'Libs/lib_CMake/win/x86')
build_folder_x86=os.path.join(build_folder_x86, 'Release')
shutil.copyfile(os.path.join(build_folder_x86, 'PVRTexLib.lib'), os.path.join(lib_path_x86, 'Debug/PVRTexLib.lib'))
shutil.copyfile(os.path.join(build_folder_x86, 'PVRTexLib.lib'), os.path.join(lib_path_x86, 'Release/PVRTexLib.lib'))
shutil.copyfile(os.path.join(build_folder_x86, 'PVRTexLib.dll'), os.path.join(lib_path_x86, 'Release/PVRTexLib.dll'))
lib_path_x64=os.path.join(root_project_path, 'Libs/lib_CMake/win/x64')
build_folder_x64=os.path.join(build_folder_x64, 'Release')
shutil.copyfile(os.path.join(build_folder_x64, 'PVRTexLib.lib'), os.path.join(lib_path_x64, 'Debug/PVRTexLib.lib'))
shutil.copyfile(os.path.join(build_folder_x64, 'PVRTexLib.lib'), os.path.join(lib_path_x64, 'Release/PVRTexLib.lib'))
shutil.copyfile(os.path.join(build_folder_x64, 'PVRTexLib.dll'), os.path.join(lib_path_x64, 'Release/PVRTexLib.dll'))
|
"""Tests for full relationship schema checking."""
import pytest
from open_alchemy.schemas.validation.property_.relationship import full
TESTS = [
pytest.param(
{
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(False, "source schema :: every model must define x-tablename"),
id="many-to-many source no tablename",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-secondary": "schema_ref_schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(False, "referenced schema :: every model must define x-tablename"),
id="many-to-many referenced no tablename",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer"}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"type": "object",
"x-tablename": "ref_schema",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(False, "source schema :: schema must have a primary key"),
id="many-to-many source no primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"type": "object",
"x-tablename": "ref_schema",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(
False,
"source schema :: id property :: malformed schema :: Every property "
"requires a type. ",
),
id="many-to-many source invalid primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string", "x-primary-key": True},
},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(
False,
"source schema :: many-to-many relationships currently only support single "
"primary key schemas",
),
id="many-to-many source multiple primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer"}},
}
},
(False, "referenced schema :: schema must have a primary key"),
id="many-to-many referenced no primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"x-primary-key": True}},
}
},
(
False,
"referenced schema :: id property :: malformed schema :: Every property "
"requires a type. ",
),
id="many-to-many referenced invalid primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string", "x-primary-key": True},
},
}
},
(
False,
"referenced schema :: many-to-many relationships currently only support "
"single primary key schemas",
),
id="many-to-many referenced multiple primary key property",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"type": "object",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
}
},
(True, None),
id="many-to-many valid",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"allOf": [
{
"x-tablename": "ref_schema",
"x-inherits": True,
"x-secondary": "schema_ref_schema",
"properties": {"other": {}},
},
{"$ref": "#/components/schemas/ParentSchema"},
]
},
"ParentSchema": {
"type": "object",
"x-tablename": "parent_schema",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
},
(False, "referenced schema :: schema must have a primary key"),
id="many-to-many joined table inheritance",
),
pytest.param(
{
"x-tablename": "schema",
"type": "object",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schemas",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"allOf": [
{
"x-inherits": True,
"x-secondary": "schema_ref_schema",
"properties": {"other": {}},
},
{"$ref": "#/components/schemas/ParentSchema"},
]
},
"ParentSchema": {
"type": "object",
"x-tablename": "parent_schema",
"x-secondary": "schema_ref_schema",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
},
(True, None),
id="many-to-many single table inheritance",
),
]
@pytest.mark.parametrize(
"parent_schema, property_name, property_schema, schemas, expected_result",
TESTS,
)
@pytest.mark.schemas
def test_check(parent_schema, property_name, property_schema, schemas, expected_result):
"""
GIVEN schemas, the parent and property schema and the expected result
WHEN check is called with the schemas and parent and property schema
THEN the expected result is returned.
"""
returned_result = full.check(schemas, parent_schema, property_name, property_schema)
assert returned_result == expected_result
|
# -*- coding: utf-8 -*-
"""Module with init command."""
from argparse import Namespace
import six
from termius.account.commands import LoginCommand
from termius.cloud.commands import PullCommand, PushCommand
from termius.core.commands import AbstractCommand
from termius.porting.commands import SSHImportCommand
class InitCommand(AbstractCommand):
"""initialize the Termius CLI"""
# pylint: disable=no-self-use
def prompt_username(self):
"""Ask username prompt."""
self.log.info('Please enter your Termius credentials\n')
return six.moves.input("Username: ")
# pylint: disable=no-self-use
def prompt_authy_token(self):
"""Ask authy token prompt."""
return six.moves.input('Authy token: ')
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument('-u', '--username', metavar='USERNAME')
parser.add_argument('-p', '--password', metavar='PASSWORD')
return parser
def init_namespace(self, parsed_args, username, password):
"""Make authenticated Namespace instance."""
return Namespace(
log_file=parsed_args.log_file,
username=username,
password=password
)
def login(self, parsed_args):
"""Wrapper for login command."""
command = LoginCommand(self.app, self.app_args, self.cmd_name)
command.take_action(parsed_args)
def pull(self, parsed_args):
"""Wrapper for pull command."""
command = PullCommand(self.app, self.app_args, self.cmd_name)
command.take_action(parsed_args)
def import_ssh(self, parsed_args):
"""Wrapper for sync command."""
command = SSHImportCommand(self.app, self.app_args, self.cmd_name)
command.take_action(parsed_args)
def push(self, parsed_args):
"""Wrapper for push command."""
command = PushCommand(self.app, self.app_args, self.cmd_name)
command.take_action(parsed_args)
def take_action(self, parsed_args):
"""Process command call."""
self.log.info('Initializing Termius CLI...\n')
username = parsed_args.username or self.prompt_username()
password = parsed_args.password or self.prompt_password()
namespace = self.init_namespace(
parsed_args, username, password
)
self.login(namespace)
self.log.info('\nCollecting data from the Termius Cloud...')
self.pull(namespace)
self.log.info('\nImporting ~/.ssh/config...')
self.import_ssh(namespace)
self.log.info('\nPushing data to the Termius Cloud...')
self.push(namespace)
self.log.info('\nTermius CLI successfully initialized.')
|
#!/usr/bin/env python3
import pytest
import yaml
from romanlengths.roman_conversion import convert_to_numeral
numerals = yaml.load(open("tests/roman-numerals.yml", 'r'))
@pytest.mark.parametrize('value', range(1, 5000))
def test_conversion(value):
assert convert_to_numeral(value) == numerals[value]
|
import torch
import torch.nn as nn
import copy
import time
import numpy as np
import torch.optim as optim
from .meta_pruner import MetaPruner
class Pruner(MetaPruner):
def __init__(self, model, args):
super(Pruner, self).__init__(model, args)
def prune(self):
self._get_kept_wg_L1()
self._prune_and_build_new_model()
|
# Generated by Django 4.0.2 on 2022-03-07 05:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0011_orderitem_complete_orderitem_customer'),
]
operations = [
migrations.AddField(
model_name='shippingaddress',
name='default',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='shippingaddress',
name='payment_option',
field=models.CharField(choices=[('S', 'Stripe'), ('P', 'Paypal')], default='P', max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='shippingaddress',
name='save_info',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='shippingaddress',
name='use_default',
field=models.BooleanField(default=False),
),
]
|
# -*- coding: utf-8 -*-
from typing import Callable, Awaitable, List
from mypy_extensions import Arg
from aiohttp.web import Request
from mtpylon.income_message import IncomeMessage
from mtpylon.middlewares import MiddleWareFunc
from mtpylon.message_sender import MessageSender
HandleStrategy = Callable[
[
Arg(List[MiddleWareFunc], 'middlewares'), # noqa: F821
Arg(MessageSender, 'sender'), # noqa: F821
Arg(Request, 'request'), # noqa: F821
Arg(IncomeMessage, 'message'), # noqa: F821
],
Awaitable[None]
]
|
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy as _l
from qatrack.faults import models
from qatrack.qatrack_core.dates import format_datetime
from qatrack.reports import filters
from qatrack.reports.reports import BaseReport
from qatrack.units import models as umodels
def format_fault_types(fault):
return ', '.join(ft.code for ft in fault.fault_types.all())
class FaultReportMixin:
category = _l("Faults")
def get_queryset(self):
return models.Fault.objects.select_related(
"unit",
"unit__site",
"created_by",
"modified_by",
"modality",
).prefetch_related(
"fault_types",
"related_service_events",
"faultreviewinstance_set",
"faultreviewinstance_set__reviewed_by",
"faultreviewinstance_set__fault_review_group",
)
def filter_form_valid(self, filter_form):
nfaults = self.filter_set.qs.count()
if nfaults > self.MAX_FAULTS:
filter_form.add_error(
"__all__", "This report can only be generated with %d or fewer Faults"
" Your filters are including %d. Please reduce the "
"number of Faults." % (self.MAX_FAULTS, nfaults)
)
return filter_form.is_valid()
def get_unit_details(self, val):
units = umodels.Unit.objects.filter(pk__in=val).select_related("site")
return (
"Unit(s)", ', '.join("%s%s" % ("%s - " % unit.site.name if unit.site else "", unit.name) for unit in units)
)
def get_unit__site_details(self, sites):
return ("Site(s)", (', '.join(s.name if s != 'null' else _("Other") for s in sites)).strip(", "))
def get_review_status_details(self, val):
return (_("Review Status"), dict(self.filter_set.form.fields['review_status'].choices)[val] if val else "")
def get_faults_for_site(self, qs, site):
"""Get Test List Instances from filtered queryset for input site"""
faults = qs.filter(unit__site=site)
faults = faults.order_by(
"unit__%s" % settings.ORDER_UNITS_BY,
"occurred",
).select_related(
"created_by",
"modified_by",
).prefetch_related(
"fault_types",
"related_service_events",
"faultreviewinstance_set",
"faultreviewinstance_set__reviewed_by",
"faultreviewinstance_set__fault_review_group",
)
return faults
class FaultSummaryReport(FaultReportMixin, BaseReport):
report_type = "fault_summary"
name = _l("Fault Summary")
filter_class = filters.FaultSummaryFilter
description = mark_safe(_l(
"This report includes a summary of all faults from a given time period for selected units"
))
template = "reports/faults/summary.html"
MAX_FAULTS = 3000
def get_filename(self, report_format):
return "%s.%s" % (slugify(self.name or "faults-summary"), report_format)
def get_context(self):
context = super().get_context()
# since we're grouping by site, we need to handle sites separately
form = self.get_filter_form()
reviewed = form.cleaned_data.get("review_status")
qs = self.filter_set.qs
if reviewed == "unreviewed":
qs = qs.filter(faultreviewinstance=None)
elif reviewed == "reviewed":
qs = qs.exclude(faultreviewinstance=None)
sites = qs.order_by(
"unit__site__name",
).values_list(
"unit__site",
flat=True,
).distinct()
sites_data = []
for site in sites:
if site: # site can be None here since not all units may have a site
site = umodels.Site.objects.get(pk=site)
sites_data.append((site.name if site else "", []))
for fault in self.get_faults_for_site(qs, site):
sites_data[-1][-1].append({
'id': fault.id,
'fault_type': format_fault_types(fault),
'unit_name': fault.unit.name,
'modality': fault.modality.name if fault.modality else _("Not specified"),
'occurred': format_datetime(fault.occurred),
'link': self.make_url(fault.get_absolute_url(), plain=True),
})
context['sites_data'] = sites_data
return context
def to_table(self, context):
rows = super().to_table(context)
rows.append([])
header = [
_("Fault ID"),
_("Occurred"),
_("Site"),
_("Unit"),
_("Fault Type"),
_("Modality"),
_("Link"),
]
rows.append(header)
for site, faults in context['sites_data']:
for fault in faults:
row = [
fault['id'],
fault['occurred'],
site,
fault['unit_name'],
fault['fault_type'],
fault['modality'],
fault['link'],
]
rows.append(row)
return rows
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 17:40:37 2017
@author: karja
"""
from math import sqrt
# The best formula for GCD that can be found in wikipedia
def gcd(a,b):
while b != 0:
a, b = b, a % b
return a
# Class that set the dict that represent the
# range of pythagores that have integer values.
class Pytha_tri:
def __init__(self,num1):
self.num1 = num1
def pair_em(self):
# This taken from the book
# Learning Python from PACKT by (thanks to) Fabrizio Romano.
legs = [(a,b,sqrt(a**2 + b**2)) for a in range(1,self.num1) for b in range(a,self.num1)]
legs = [(a,b,int(c)) for a, b, c in legs if c.is_integer()]
for m in range(1,int(self.num1**.5)+1):
for n in range(1,m):
# Alternate additional condition (and (m-n)%2 == 1:)
if gcd(m,n) == 1:
# Eucledean formula for finding triple pythagorean
# (a,b,c) => a= m**2 - n**2, b = 2mn , c =m**2 + n**2
# Where (m,n) have GCD of 1
if m > n:
a = m**2 - n**2
else:
a = n**2 - m**2
b = 2*m*n
c = m**2 + n**2
legs.append((a,b,c))
# I made this class to compliment the pythagores triple that
# thought in the book.
check = [((a**2+b**2)==c**2) for a, b, c in legs ]
check = {a:b for a,b in zip(legs,check)}
return check
print(check)
# Globals that can be called within the functions.
pairs = {}
pytha_num = None
# Function that give max values to Pytha_tri and assign globals variables.
def call_p(nums):
global pairs,pytha_num
pairs = Pytha_tri(nums).pair_em()
pytha_num = Pytha_tri(nums)
import unicodedata as uni
# Function that will test your knowledge about Pythagores formulas in fun way
# because the result will true if integer and will be false if float.
def gpyt():
# Translating and printing the a² + b² = c²
sup2 = str.maketrans('2',uni.normalize('NFC',u'\u00B2'))
print('Pythagorean triple with total records of', len(pairs),'\nFormula of a2 + b2 = c2'.translate(sup2))
# The try out of Phytagorean Triple
while True:
try:
triple = tuple([int(b) for b in input('input 3 numbers e.g. 1 2 3 : ').split()])
except:
print('Please input number e.g 1 2 3')
continue
else:
try:
a, b, c = triple
except:
print('Please input number e.g 1 2 3')
continue
else:
# Checking the answer if is True or False
print (triple, triple[0]**2 + triple[1]**2, '=', triple[2]**2, pairs.get(triple,False))
# If is true then the record is deleted
if pairs.get(triple,False) == True:
pairs.pop(triple,True)
break
else:
break
# The game begin with How many records that can be generated
# and try to guess them all out.
def run_gpyt():
while True:
# Create records
in_num = input('Create list of Pythagorean Triple, give numbers' +
'\ngreater than 5 and less then 1000: ')
try:
int(in_num)
except:
print('Please Key integer numbers')
continue
else:
if int(in_num) < 5:
print('Numbers are too small')
continue
elif int(in_num) > 1000:
print('Numbers are too big')
continue
else:
call_p(int(in_num))
break
# Start playing
gpyt()
while True:
try:
# To continue or end
quest = str(input('Continue? [Y]es or Enter: ').upper())
except:
print('Please key in "Y" to continue or Enter to end!')
continue
else:
if quest == 'Y':
# Checking the records
if len(pairs) > 0:
gpyt()
continue
else:
# If record is 0 then game is finished and end.
print('Congratulation, you have finished the game.')
input()
break
else:
if len(pairs) == 0:
print('Congratulation, you have finished the game.')
input()
break
else:
break
# Self-started
run_gpyt()
|
from social_core.backends.mendeley import MendeleyMixin, MendeleyOAuth, \
MendeleyOAuth2
|
import brownie
from brownie import Wei, accounts, Contract, config
import math
def test_cloning(
gov,
token,
vault,
strategist,
whale,
strategy,
keeper,
rewards,
chain,
StrategyFraxUniswapUSDC,
guardian,
amount,
tests_using_tenderly,
):
# tenderly doesn't work for "with brownie.reverts"
if tests_using_tenderly:
## clone our strategy
tx = strategy.cloneFraxUni(
vault,
strategist,
rewards,
keeper,
{"from": gov},
)
newStrategy = StrategyFraxUniswapUSDC.at(tx.return_value)
else:
# Shouldn't be able to call initialize again
with brownie.reverts():
strategy.initialize(
vault,
strategist,
rewards,
keeper,
{"from": gov},
)
## clone our strategy
tx = strategy.cloneFraxUni(
vault,
strategist,
rewards,
keeper,
{"from": gov},
)
newStrategy = StrategyFraxUniswapUSDC.at(tx.return_value)
# Shouldn't be able to call initialize again
with brownie.reverts():
newStrategy.initialize(
vault,
strategist,
rewards,
keeper,
{"from": gov},
)
## shouldn't be able to clone a clone
with brownie.reverts():
newStrategy.cloneFraxUni(
vault,
strategist,
rewards,
keeper,
{"from": gov},
)
# revoke and send all funds back to vault
vault.revokeStrategy(strategy, {"from": gov})
strategy.harvest({"from": gov})
# attach our new strategy and approve it on the proxy
vault.addStrategy(newStrategy, 10_000, 0, 2**256 - 1, 1_000, {"from": gov})
# setup our NFT on our new strategy, IMPORTANT***
token.transfer(newStrategy, 100e6, {"from": whale})
newStrategy.mintNFT({"from": gov})
assert vault.withdrawalQueue(1) == newStrategy
assert vault.strategies(newStrategy)["debtRatio"] == 10_000
assert vault.withdrawalQueue(0) == strategy
assert vault.strategies(strategy)["debtRatio"] == 0
## deposit to the vault after approving; this is basically just our simple_harvest test
before_pps = vault.pricePerShare()
startingWhale = token.balanceOf(whale)
token.approve(vault, 2**256 - 1, {"from": whale})
vault.deposit(amount, {"from": whale})
# harvest, store asset amount
tx = newStrategy.harvest({"from": gov})
old_assets_dai = vault.totalAssets()
assert old_assets_dai > 0
assert newStrategy.estimatedTotalAssets() > 0
print("\nStarting Assets: ", old_assets_dai / (10 ** token.decimals()))
# simulate one day of earnings
chain.sleep(86400)
chain.mine(1)
# harvest after a day, store new asset amount
newStrategy.harvest({"from": gov})
new_assets_dai = vault.totalAssets()
# we can't use strategyEstimated Assets because the profits are sent to the vault
# if we're not making profit, check that we didn't lose too much on conversions
assert new_assets_dai >= old_assets_dai
print("\nAssets after 2 days: ", new_assets_dai / (10 ** token.decimals()))
# Display estimated APR
print(
"\nEstimated APR: ",
"{:.2%}".format(
((new_assets_dai - old_assets_dai) * (365))
/ (newStrategy.estimatedTotalAssets())
),
)
# simulate a day of waiting for share price to bump back up
chain.sleep(86400)
chain.mine(1)
# withdraw and check on our losses (due to slippage on big swaps in/out)
tx = vault.withdraw(amount, whale, 10_000, {"from": whale})
loss = startingWhale - token.balanceOf(whale)
print("Losses from withdrawal slippage:", loss / (10 ** token.decimals()))
assert vault.pricePerShare() > 10 ** token.decimals()
print("Vault share price", vault.pricePerShare() / (10 ** token.decimals()))
|
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Tests for polynomial fitters.
:author: Ludwig Schwardt
:license: Modified BSD
"""
from __future__ import division
from builtins import range
import numpy as np
from numpy.testing import (TestCase, assert_equal, assert_almost_equal,
run_module_suite)
from scikits.fitting import (Polynomial1DFit, Polynomial2DFit,
PiecewisePolynomial1DFit, NotFittedError)
from scikits.fitting.poly import _stepwise_interp, _linear_interp
class TestPolynomial1DFit(TestCase):
"""Fit a 1-D polynomial to data from a known polynomial, and compare."""
def setUp(self):
self.poly = np.array([1.0, -2.0, 1.0])
# Zero mean case
self.x = np.arange(-3.0, 4.0, 1.0)
self.y = np.polyval(self.poly, self.x)
# Non-zero mean case
self.x2 = np.arange(0., 10.0, 1.0)
self.y2 = np.polyval(self.poly, self.x2)
self.randx = np.random.randn(100)
self.randp = np.random.randn(4)
def test_fit_eval(self):
"""Polynomial1DFit: Basic function fitting + evaluation (zero-mean)."""
interp = Polynomial1DFit(2)
self.assertRaises(NotFittedError, interp, self.x)
interp.fit(self.x, self.y)
y = interp(self.x)
self.assertAlmostEqual(interp._mean, 0.0, places=10)
assert_almost_equal(interp.poly, self.poly, decimal=10)
assert_almost_equal(y, self.y, decimal=10)
def test_fit_eval2(self):
"""Polynomial1DFit: Basic fitting and evaluation (non-zero-mean)."""
interp = Polynomial1DFit(2)
interp.fit(self.x2, self.y2)
y2 = interp(self.x2)
assert_almost_equal(interp.poly, self.poly, decimal=10)
assert_almost_equal(y2, self.y2, decimal=10)
def test_cov_params(self):
"""Polynomial1DFit: Compare parameter stats to covariance matrix."""
interp = Polynomial1DFit(2)
std_y = 1.3
M = 200
poly_set = np.zeros((len(self.poly), M))
for n in range(M):
yn = self.y2 + std_y * np.random.randn(len(self.y2))
interp.fit(self.x2, yn, std_y)
poly_set[:, n] = interp.poly
mean_poly = poly_set.mean(axis=1)
norm_poly = poly_set - mean_poly[:, np.newaxis]
cov_poly = np.dot(norm_poly, norm_poly.T) / M
std_poly = np.sqrt(np.diag(interp.cov_poly))
self.assertTrue(
(np.abs(mean_poly - self.poly) / std_poly < 0.5).all(),
"Sample mean coefficient vector differs too much from true value")
self.assertTrue(
(np.abs(cov_poly - interp.cov_poly) /
np.abs(interp.cov_poly) < 0.5).all(),
"Sample coefficient covariance matrix differs too much")
def test_vs_numpy(self):
"""Polynomial1DFit: Compare fitter to np.polyfit and np.polyval."""
x, p = self.randx, self.randp
y = p[0] * (x ** 3) + p[1] * (x ** 2) + p[2] * x + p[3]
interp = Polynomial1DFit(3)
interp.fit(x, y)
interp_y = interp(x)
np_poly = np.polyfit(x, y, 3)
np_y = np.polyval(np_poly, x)
self.assertAlmostEqual(interp._mean, self.randx.mean(), places=10)
assert_almost_equal(interp.poly, np_poly, decimal=10)
assert_almost_equal(interp_y, np_y, decimal=10)
# pylint: disable-msg=R0201
def test_reduce_degree(self):
"""Polynomial1DFit: Reduce polynomial degree if too few data points."""
interp = Polynomial1DFit(2)
interp.fit([1.0], [1.0])
assert_almost_equal(interp.poly, [1.0], decimal=10)
class TestPolynomial2DFit(TestCase):
"""Fit a 2-D polynomial to data from a known polynomial, and compare."""
def setUp(self):
self.poly = np.array([0.1, -0.2, 0.3, -0.4, 0.5, -0.6])
self.degrees = (1, 2)
# Zero mean case
x1 = np.arange(-1., 1.1, 0.1)
x2 = np.arange(-1., 1.2, 0.2)
xx1, xx2 = np.meshgrid(x1, x2)
self.x = X = np.vstack((xx1.ravel(), xx2.ravel()))
A = np.c_[X[0] * X[1]**2,
X[0] * X[1],
X[0],
X[1]**2,
X[1],
np.ones(X.shape[1])].T
self.y = np.dot(self.poly, A)
# Non-zero mean (and uneven scale) case
x1 = np.arange(0., 10.)
x2 = np.arange(0., 5.)
xx1, xx2 = np.meshgrid(x1, x2)
self.x2 = X = np.vstack((xx1.ravel(), xx2.ravel()))
A = np.c_[X[0] * X[1]**2,
X[0] * X[1],
X[0],
X[1]**2,
X[1],
np.ones(X.shape[1])].T
self.y2 = np.dot(self.poly, A)
def test_fit_eval(self):
"""Polynomial2DFit: Basic function fitting + evaluation (zero-mean)."""
interp = Polynomial2DFit(self.degrees)
self.assertRaises(NotFittedError, interp, self.x)
interp.fit(self.x, self.y)
y = interp(self.x)
assert_almost_equal(interp._mean, [0.0, 0.0], decimal=10)
assert_almost_equal(interp._scale, [1.0, 1.0], decimal=10)
assert_almost_equal(interp.poly, self.poly, decimal=10)
assert_almost_equal(y, self.y, decimal=10)
def test_fit_eval2(self):
"""Polynomial2DFit: Basic fitting and evaluation (non-zero-mean)."""
interp = Polynomial2DFit(self.degrees)
interp.fit(self.x2, self.y2)
y2 = interp(self.x2)
assert_almost_equal(interp.poly, self.poly, decimal=10)
assert_almost_equal(y2, self.y2, decimal=10)
def test_cov_params(self):
"""Polynomial2DFit: Compare parameter stats to covariance matrix."""
interp = Polynomial2DFit(self.degrees)
std_y = 1.7
M = 200
poly_set = np.zeros((len(self.poly), M))
for n in range(M):
yn = self.y2 + std_y * np.random.randn(len(self.y2))
interp.fit(self.x2, yn, std_y)
poly_set[:, n] = interp.poly
mean_poly = poly_set.mean(axis=1)
norm_poly = poly_set - mean_poly[:, np.newaxis]
cov_poly = np.dot(norm_poly, norm_poly.T) / M
std_poly = np.sqrt(np.diag(interp.cov_poly))
self.assertTrue(
(np.abs(mean_poly - self.poly) / std_poly < 0.5).all(),
"Sample mean coefficient vector differs too much from true value")
self.assertTrue(
(np.abs(cov_poly - interp.cov_poly) /
np.abs(interp.cov_poly) < 1.0).all(),
"Sample coefficient covariance matrix differs too much")
class TestPiecewisePolynomial1DFit(TestCase):
"""Fit a 1-D piecewise polynomial to data from a known polynomial."""
def setUp(self):
self.poly = np.array([1.0, 2.0, 3.0, 4.0])
self.x = np.linspace(-3.0, 2.0, 100)
self.y = np.polyval(self.poly, self.x)
def test_fit_eval(self):
"""PiecewisePolynomial1DFit: Basic function fitting and evaluation."""
interp = PiecewisePolynomial1DFit(max_degree=3)
self.assertRaises(NotFittedError, interp, self.x)
self.assertRaises(ValueError, interp.fit, [0, 0], [1, 2])
interp.fit(self.x[::2], self.y[::2])
y = interp(self.x)
assert_almost_equal(y[5:-5], self.y[5:-5], decimal=10)
# Fit a single data point
interp.fit(self.x[0], self.y[0])
y = interp(self.x)
assert_equal(y, np.tile(self.y[0], self.x.shape))
def test_stepwise_interp(self):
"""PiecewisePolynomial1DFit: Test underlying 0th-order interpolator."""
x = np.sort(np.random.rand(100)) * 4. - 2.5
y = np.random.randn(100)
interp = PiecewisePolynomial1DFit(max_degree=0)
interp.fit(x, y)
assert_almost_equal(interp(x), y, decimal=10)
assert_almost_equal(interp(x + 1e-15), y, decimal=10)
assert_almost_equal(interp(x - 1e-15), y, decimal=10)
assert_almost_equal(_stepwise_interp(x, y, x), y, decimal=10)
assert_almost_equal(interp(self.x), _stepwise_interp(x, y, self.x),
decimal=10)
def test_linear_interp(self):
"""PiecewisePolynomial1DFit: Test underlying 1st-order interpolator."""
x = np.sort(np.random.rand(100)) * 4. - 2.5
y = np.random.randn(100)
interp = PiecewisePolynomial1DFit(max_degree=1)
interp.fit(x, y)
assert_almost_equal(interp(x), y, decimal=10)
assert_almost_equal(_linear_interp(x, y, x), y, decimal=10)
assert_almost_equal(interp(self.x), _linear_interp(x, y, self.x),
decimal=10)
if __name__ == "__main__":
run_module_suite()
|
'''Views and logic for github OAuth.'''
from flask import url_for, request, session, redirect
from flask_oauthlib.client import OAuth
import github
def install_github_oauth(app):
oauth = OAuth(app)
github_auth = oauth.remote_app(
'github',
consumer_key=app.config['GITHUB_CLIENT_ID'],
consumer_secret=app.config['GITHUB_CLIENT_SECRET'],
request_token_params={'scope': 'repo,user'},
base_url='https://api.github.com/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize'
)
@app.route('/login')
def login():
return github_auth.authorize(callback=url_for('authorized', _external=True, next=request.args.get('next') or request.referrer or None))
@app.route('/logout')
def logout():
session.pop('token', None)
return redirect(url_for('index'))
@app.route('/oauth_callback')
@github_auth.authorized_handler
def authorized(resp):
next_url = request.args.get('next') or url_for('index')
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['token'] = resp['access_token']
user_info = github.get_current_user_info(session['token'])
if not user_info:
return "Unable to get user info."
session['login'] = user_info['login']
return redirect(next_url)
@github_auth.tokengetter
def get_github_oauth_token():
token = session.get('token')
if token:
return (token, '')
else:
return token
|
#
# WIEN2k.py derived from OpenMX.py
#
# Interface to WIEN2k (http://susi.theochem.tuwien.ac.at/)
#
# Copyright (c) 2001 P. Blaha and K. Schwarz
#
import numpy as np
class Wien2kParser(object):
def __init__(self):
self._prefix = None
self._lattice_vector = None
self._inverse_lattice_vector = None
self._atomic_kinds = None
self._element_list = []
self._element_dict = {}
self._initial_charges = None
self._common_settings = None
self._nat = 0
self._x_fractional = None
self._counter = 1
self._kmesh = None
self._nzerofills = 0
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
self._force_conversion_factor = 1.0
self._initial_structure_loaded = False
self._print_disp = True
self._print_force = True
self._print_energy = False
self._print_born = False
self._BOHR_TO_ANGSTROM = 0.5291772108
self._RYDBERG_TO_EV = 13.60569253
self._kd = []
def load_initial_structure(self, file_original):
#search_target = [
# "atoms.number",
# "atoms.speciesandcoordinates.unit",
# "<atoms.speciesandcoordinates",
# "atoms.speciesandcoordinates>",
# "atoms.unitvectors.unit",
# "<atoms.unitvectors",
# "atoms.unitvectors>",
# "scf.kgrid"
#]
search_target = [
"P",
"MODE OF CALC=RELA unit=",
"ATOM",
]
nat = None
lavec = []
common_settings = []
kd = []
zkd = []
x_frac0 = []
initial_charges = []
kgrid = []
zline = 4
# read original struct file and pull out some information
with open(file_original, 'r') as f:
lines = f.read().splitlines()
for i, line in enumerate(lines):
#if search_target[0] in line.strip().split()[0]:
if i == 1:
nat = int(line.strip().split()[2])
#elif search_target[1] in line:
elif i == 2:
lavec_unit = line.strip().split("=")[2]
coord_unit = "au"
elif i == 3:
lavec.append([float(line.strip().split()[0]), float(0), float(0)])
lavec.append([float(0), float(line.strip().split()[1]), float(0)])
lavec.append([float(0), float(0), float(line.strip().split()[2])])
elif search_target[2] in line:
line_split = line.strip().split()
line_split[2] = line_split[2].strip("X=")
line_split[3] = line_split[3].strip("Y=")
line_split[4] = line_split[4].strip("Z=")
#kd.append(line_split[1])
#x_frac0.append([float(t) for t in line_split[2:5]])
x_frac0.append([float(t) for t in line_split[2:5]])
#initial_charges.append([float(t) for t in line_split[5:7]])
initial_charges.append([float(0),float(0)])
zline = i + 2
elif i == zline:
#kd.append(line.strip().split()[0])
kd.append(line.strip().split()[0])
zkd.append(line.strip().split()[7])
#elif search_target[1] in line.lower():
# coord_unit = line.strip().split()[1].lower()
#elif search_target[2] in line.lower():
# ipos_coord = i + 1
#elif search_target[3] in line.lower():
# fpos_coord = i
#elif search_target[4] in line.lower():
# lavec_unit = line.strip().split()[1].lower()
#elif search_target[5] in line.lower():
# ipos_lavec = i + 1
#elif search_target[6] in line.lower():
# fpos_lavec = i
#elif search_target[7] in line.lower():
# kgrid.extend([int(t) for t in line.strip().split()[1:]])
if nat is None:
raise RuntimeError("Failed to extract the LATTICE,NONEQUIV.ATOMS value from the file.")
#if nat != (fpos_coord - ipos_coord):
# raise RuntimeError("The number of entries in Atoms.SpeciesAndCoordinates does not match"
# "with the Atoms.Number value.")
#for line in lines[ipos_coord:fpos_coord]:
# line_split = line.strip().split()
# kd.append(line_split[1])
# x_frac0.append([float(t) for t in line_split[2:5]])
# initial_charges.append([float(t) for t in line_split[5:7]])
#for line in lines[ipos_lavec:fpos_lavec]:
# lavec.append([float(t) for t in line.strip().split()])
#if ipos_lavec > ipos_coord:
# common_settings.extend(lines[:ipos_coord-1])
# common_settings.extend(lines[fpos_coord+1:ipos_lavec-1])
# common_settings.extend(lines[fpos_lavec+1:])
#else:
# common_settings.extend(lines[:ipos_lavec-1])
# common_settings.extend(lines[fpos_lavec+1:ipos_coord-1])
# common_settings.extend(lines[fpos_coord+1:])
common_settings.extend(lines[:5])
x_frac0 = np.array(x_frac0)
lavec = np.array(lavec).transpose()
lavec_inv = np.linalg.inv(lavec)
#initial_charges = np.array(initial_charges)
#kgrid = np.array(kgrid)
# convert the unit of lattice vectors to angstrom if necessary
if lavec_unit == "au":
lavec *= self._BOHR_TO_ANGSTROM
lavec_inv = np.linalg.inv(lavec)
# convert to frac coordinate
if coord_unit == "ang":
for i in range(nat):
#x_frac0[i] = np.dot(x_frac0[i], lavec_inv)
pass
elif coord_unit == "au":
for i in range(nat):
#x_frac0[i] = np.dot(x_frac0[i], lavec_inv) * self._BOHR_TO_ANGSTROM
pass
kd_uniq = []
for entry in kd:
if entry not in kd_uniq:
kd_uniq.append(entry)
self._element_list = kd_uniq
counter = 0
for entry in kd_uniq:
self._element_dict[entry] = counter
counter += 1
self._lattice_vector = lavec
self._inverse_lattice_vector = lavec_inv
self._nat = nat
self._x_fractional = x_frac0
self._atomic_kinds = [self._element_dict[elem] for elem in kd]
self._initial_charges = initial_charges
self._kmesh = kgrid
self._common_settings = common_settings
self._initial_structure_loaded = True
self._kd = kd
self._zkd = zkd
def generate_structures(self, prefix, header_list, disp_list):
self._set_number_of_zerofill(len(disp_list))
self._prefix = prefix
self._counter = 1
if len(self._initial_charges) < self._nat:
raise RuntimeError("The length of initial_charges is not nat. "
"It should be updated as well.")
for header, disp in zip(header_list, disp_list):
self._generate_input(header, disp)
def parse(self, initial_dat, out_files, out_file_offset, str_unit,
output_flags, filter_emin=None, filter_emax=None):
if not self._initial_structure_loaded:
self.load_initial_structure(initial_dat)
self._set_unit_conversion_factor(str_unit)
self._set_output_flags(output_flags)
if self._print_disp or self._print_force:
self._print_displacements_and_forces(out_files,
out_file_offset,
filter_emin,
filter_emax)
elif self._print_energy:
self._print_energies(out_files, out_file_offset)
def _generate_input(self, header, disp):
filename = self._prefix + str(self._counter).zfill(self._nzerofills) + ".struct"
with open(filename, 'w') as f:
for line in self._common_settings:
if "blebleble" in line:
f.write("blebleble\n")
f.write("P LATTICE,NONEQUIV.ATOMS %3d 1 P1\n" % self._nat)
elif "MODE" in line.strip().split()[0]:
f.write("MODE OF CALC=RELA unit=bohr\n")
f.write(" %9.6f" % (self._lattice_vector[0, 0]))
f.write(" %9.6f" % (self._lattice_vector[1, 1]))
f.write(" %9.6f" % (self._lattice_vector[2, 2]))
f.write(" 90.000000 90.000000 90.000000\n")
#elif "scf.kgrid" in line.lower():
# f.write("scf.Kgrid %d %d %d\n" % (self._kmesh[0], self._kmesh[1], self._kmesh[2]))
elif "ATOM" in line.strip().split()[0]:
for i in range(self._nat):
#f.write("%4d %3s" % (i + 1, self._element_list[self.kd[i]]))
#x_cartesian_disp = np.dot(self._x_fractional[i, :] + disp[i, :],
# self._lattice_vector.transpose())
x_cartesian_disp = self._x_fractional[i, :] + disp[i, :]
#for j in range(3):
# f.write("%21.16f" % x_cartesian_disp[j])
f.write("ATOM %3d:" % -(i + 1))
f.write(" X=%10.8f" % x_cartesian_disp[0])
f.write(" Y=%10.8f" % x_cartesian_disp[1])
f.write(" Z=%10.8f" % x_cartesian_disp[2])
f.write("\n")
f.write(" MULT= 1 ISPLIT=15\n")
#f.write("%2s" % (self._element_list[self._atomic_kinds[i]]))
f.write("%2s" % (self._kd[i]))
f.write(" NPT= 781 R0=.000100000 RMT= 2.00000 Z: %9.5f\n" %(float(self._zkd[i])))
f.write("LOCAL ROT MATRIX: 1.0000000 0.0000000 0.0000000\n")
f.write(" 0.0000000 1.0000000 0.0000000\n")
f.write(" 0.0000000 0.0000000 1.0000000\n")
#for j in range(2):
# f.write("%6.2f" % (self._initial_charges[i, j]))
f.write(" 0 NUMBER OF SYMMETRY OPERATIONS\n")
#elif "atoms.unitvectors.unit" in line.lower():
# f.write("Atoms.UnitVectors.Unit Ang\n")
# f.write("<Atoms.UnitVectors\n")
# for i in range(3):
# for j in range(3):
# f.write("%21.16f" % (self._lattice_vector[j, i]))
# f.write('\n')
# f.write("Atoms.UnitVectors>\n")
#else:
# f.write("%s\n" % line)
self._counter += 1
def _print_displacements_and_forces(self, out_files,
file_offset, filter_emin, filter_emax):
# vec_refold = np.vectorize(refold)
lavec_transpose = self._lattice_vector.transpose()
x0 = np.round(self._x_fractional, 8)
if file_offset is None:
disp_offset = np.zeros((self._nat, 3))
force_offset = np.zeros((self._nat, 3))
epot_offset = 0.0
else:
x0_offset, force_offset = self._get_coordinate_and_force_outfile(file_offset)
try:
x0_offset = np.reshape(x0_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many position entries" % file_offset)
disp_offset = x0_offset - x0
try:
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many force entries" % file_offset)
epot_offset = self._get_energies_outfile(file_offset)
for search_target in out_files:
x, force = self._get_coordinate_and_force_outfile(search_target)
epot = self._get_energies_outfile(search_target)
epot -= epot_offset
epot *= self._RYDBERG_TO_EV * 2.0
ndata = 1
for idata in range(ndata):
if filter_emin is not None:
if filter_emin > epot[idata]:
continue
if filter_emax is not None:
if filter_emax < epot[idata]:
continue
if self._print_disp:
disp = x - x0 - disp_offset
disp[disp > 0.96] -= 1.0
for i in range(self._nat):
disp[i] = np.dot(disp[i], lavec_transpose)
disp[np.absolute(disp) < 1e-5] = 0.0
disp *= self._disp_conversion_factor
if self._print_force:
f = force - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" %
(search_target, idata + 1, epot[idata]))
if self._print_disp and self._print_force:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
elif self._print_disp:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
elif self._print_force:
for i in range(self._nat):
print("%15.8E %15.8E %15.8E" % (f[i, 0],
f[i, 1],
f[i, 2]))
def _print_energies(self, out_files, file_offset):
if file_offset is None:
etot_offset = 0.0
else:
etot_offset = self._get_energies_outfile(file_offset)
print("# Etot")
for search_target in out_files:
etot = self._get_energies_outfile(search_target)
for idata in range(len(etot)):
val = etot[idata] - etot_offset
val *= self._energy_conversion_factor
print("%19.11E" % val)
def _set_number_of_zerofill(self, npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
self._nzerofills = nzero
def _set_unit_conversion_factor(self, str_unit):
if str_unit == "ev":
disp_conv_factor = 1.0
energy_conv_factor = 2.0 * self._RYDBERG_TO_EV
force_conv_factor = energy_conv_factor / self._BOHR_TO_ANGSTROM
elif str_unit == "rydberg":
disp_conv_factor = 1.0 / self._BOHR_TO_ANGSTROM
energy_conv_factor = 2.0
force_conv_factor = 2.0
elif str_unit == "hartree":
disp_conv_factor = 1.0 / self._BOHR_TO_ANGSTROM
energy_conv_factor = 1.0
force_conv_factor = 1.0
else:
raise RuntimeError("This cannot happen")
self._disp_conversion_factor = disp_conv_factor
self._force_conversion_factor = force_conv_factor
self._energy_conversion_factor = energy_conv_factor
def _set_output_flags(self, output_flags):
self._print_disp, self._print_force, \
self._print_energy, self._print_born = output_flags
@property
def nat(self):
return self._nat
@nat.setter
def nat(self, nat):
self._nat = nat
@property
def lattice_vector(self):
return self._lattice_vector
@property
def inverse_lattice_vector(self):
return self._inverse_lattice_vector
@lattice_vector.setter
def lattice_vector(self, lattice_vector):
self._lattice_vector = lattice_vector
self._inverse_lattice_vector = np.linalg.inv(lattice_vector)
@property
def kmesh(self):
return self._kmesh
@kmesh.setter
def kmesh(self, kmesh):
self._kmesh = kmesh
@property
def atomic_kinds(self):
return self._atomic_kinds
@property
def atomic_kinds_in_str(self):
return [self._element_list[i] for i in self._atomic_kinds]
@atomic_kinds.setter
def atomic_kinds(self, kd):
self._atomic_kinds = [self._element_dict[elem] for elem in kd]
@property
def x_fractional(self):
return self._x_fractional
@x_fractional.setter
def x_fractional(self, x_fractional):
self._x_fractional = x_fractional
@property
def initial_charges(self):
return self._initial_charges
@initial_charges.setter
def initial_charges(self, initial_charges):
self._initial_charges = initial_charges
def _get_coordinate_and_force_outfile(self, out_file):
"""
Return fractional coordinates and atomic forces in units of Hartree/Bohr
"""
search_flag = "<coordinates.forces"
f = open(out_file, 'r')
line = f.readline()
found_tag = False
x = np.zeros((self._nat, 3))
force = np.zeros((self._nat, 3))
while line:
if search_flag in line:
found_tag = True
f.readline() # skip one line
for i in range(self._nat):
line = f.readline()
x[i][:] = [float(t) for t in line.rstrip().split()[2:5]]
force[i][:] = [float(t) for t in line.rstrip().split()[5:]]
break
line = f.readline()
if not found_tag:
raise RuntimeError("%s tag not found in %s" % (search_flag, out_file))
x = np.array(x)
for i in range(self._nat):
x[i, :] = np.dot(x[i, :], self._inverse_lattice_vector.transpose())
return x, np.array(force)
@staticmethod
def _get_energies_outfile(out_file):
target = "Utot."
etot = []
f = open(out_file, 'r')
for line in f:
ss = line.strip().split()
if len(ss) > 0 and ss[0] == target:
etot.extend([float(ss[1])])
break
else:
continue
if len(etot) == 0:
raise RuntimeError("Total energy not found.")
return np.array(etot, dtype=np.float)
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.parameterization import Parameterized, Param
from paramz.transformations import Logexp
import sys
class WarpingFunction(Parameterized):
"""
abstract function for warping
z = f(y)
"""
def __init__(self, name):
super(WarpingFunction, self).__init__(name=name)
self.rate = 0.1
def f(self, y, psi):
"""function transformation
y is a list of values (GP training data) of shape [N, 1]
"""
raise NotImplementedError
def fgrad_y(self, y, psi):
"""gradient of f w.r.t to y"""
raise NotImplementedError
def fgrad_y_psi(self, y, psi):
"""gradient of f w.r.t to y"""
raise NotImplementedError
def f_inv(self, z, max_iterations=250, y=None):
"""
Calculate the numerical inverse of f. This should be
overwritten for specific warping functions where the
inverse can be found in closed form.
:param max_iterations: maximum number of N.R. iterations
"""
z = z.copy()
y = np.ones_like(z)
it = 0
update = np.inf
while np.abs(update).sum() > 1e-10 and it < max_iterations:
fy = self.f(y)
fgrady = self.fgrad_y(y)
update = (fy - z) / fgrady
y -= self.rate * update
it += 1
#if it == max_iterations:
# print("WARNING!!! Maximum number of iterations reached in f_inv ")
# print("Sum of roots: %.4f" % np.sum(fy - z))
return y
def plot(self, xmin, xmax):
y = np.arange(xmin, xmax, 0.01)
f_y = self.f(y)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(y, f_y)
plt.xlabel('y')
plt.ylabel('f(y)')
plt.title('warping function')
plt.show()
class TanhFunction(WarpingFunction):
"""
This is the function proposed in Snelson et al.:
A sum of tanh functions with linear trends outside
the range. Notice the term 'd', which scales the
linear trend.
"""
def __init__(self, n_terms=3, initial_y=None):
"""
n_terms specifies the number of tanh terms to be used
"""
self.n_terms = n_terms
self.num_parameters = 3 * self.n_terms + 1
self.psi = np.ones((self.n_terms, 3))
super(TanhFunction, self).__init__(name='warp_tanh')
self.psi = Param('psi', self.psi)
self.psi[:, :2].constrain_positive()
self.d = Param('%s' % ('d'), 1.0, Logexp())
self.link_parameter(self.psi)
self.link_parameter(self.d)
self.initial_y = initial_y
def f(self, y):
"""
Transform y with f using parameter vector psi
psi = [[a,b,c]]
:math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))`
"""
d = self.d
mpsi = self.psi
z = d * y.copy()
for i in range(len(mpsi)):
a, b, c = mpsi[i]
z += a * np.tanh(b * (y + c))
return z
def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
d = self.d
mpsi = self.psi
# vectorized version
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD
def fgrad_y_psi(self, y, return_covar_chain=False):
"""
gradient of f w.r.t to y and psi
:returns: NxIx4 tensor of partial derivatives
"""
mpsi = self.psi
w, s, r, d = self.fgrad_y(y, return_precalc=True)
gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4))
for i in range(len(mpsi)):
a,b,c = mpsi[i]
gradients[:, :, i, 0] = (b * (1.0/np.cosh(s[i])) ** 2).T
gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T
gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T
gradients[:, :, 0, 3] = 1.0
if return_covar_chain:
covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4))
for i in range(len(mpsi)):
a,b,c = mpsi[i]
covar_grad_chain[:, :, i, 0] = (r[i]).T
covar_grad_chain[:, :, i, 1] = (a * (y + c) * ((1.0 / np.cosh(s[i])) ** 2).T)
covar_grad_chain[:, :, i, 2] = a * b * ((1.0 / np.cosh(s[i])) ** 2).T
covar_grad_chain[:, :, 0, 3] = y
return gradients, covar_grad_chain
return gradients
def update_grads(self, Y_untransformed, Kiy):
grad_y = self.fgrad_y(Y_untransformed)
grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed,
return_covar_chain=True)
djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
warping_grads = -dquad_dpsi + djac_dpsi
self.psi.gradient[:] = warping_grads[:, :-1]
self.d.gradient[:] = warping_grads[0, -1]
class LogFunction(WarpingFunction):
"""
Easy wrapper for applying a fixed log warping function to
positive-only values.
The closed_inverse flag should only be set to False for
debugging and testing purposes.
"""
def __init__(self, closed_inverse=True):
self.num_parameters = 0
super(LogFunction, self).__init__(name='log')
if closed_inverse:
self.f_inv = self._f_inv
def f(self, y):
return np.log(y)
def fgrad_y(self, y):
return 1. / y
def update_grads(self, Y_untransformed, Kiy):
pass
def fgrad_y_psi(self, y, return_covar_chain=False):
if return_covar_chain:
return 0, 0
return 0
def _f_inv(self, z, y=None):
return np.exp(z)
class IdentityFunction(WarpingFunction):
"""
Identity warping function. This is for testing and sanity check purposes
and should not be used in practice.
The closed_inverse flag should only be set to False for
debugging and testing purposes.
"""
def __init__(self, closed_inverse=True):
self.num_parameters = 0
super(IdentityFunction, self).__init__(name='identity')
if closed_inverse:
self.f_inv = self._f_inv
def f(self, y):
return y
def fgrad_y(self, y):
return np.ones(y.shape)
def update_grads(self, Y_untransformed, Kiy):
pass
def fgrad_y_psi(self, y, return_covar_chain=False):
if return_covar_chain:
return 0, 0
return 0
def _f_inv(self, z, y=None):
return z
|
import numpy as np
import scipy.stats as st
# ------------------------------
# functions
# ------------------------------
# make 2D Gaussian array
# this script is introduced in stack overflow
# https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
|
# Import classifiers
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# Import similarity indices predictors
from linkprediction.prediction_methods.prediction.similarity_indices import (
AdamicAdar, AdjustedRand, CommonNeighbors, Jaccard,
Salton, PreferentialAttachement, Sorensen,
ResourceAllocation, SameCommunity, ShortestPath,
TotalNeighbors, UDegree, VDegree,
HubPromoted, HubDepressed, LeichtHolmeNewman)
# Import social theory predictors
from linkprediction.prediction_methods.prediction.social_theory import (
StructuralHoleTheory, SocialExchangeTheory,
EndogenousBalanceTheory, EndogenousCollectiveActionTheory,
HomophilyTheories, ResourceDependencyTheory,
ExogenousBalanceTheory, ExogenousCollectiveActionTheory)
class LinkPredictorFactory:
def create(self, model, **kwargs):
if self._is_topology(model):
return self._create_topology(model, **kwargs)
elif self._is_socialtheory_endogenous(model):
return self._create_socialtheory_endogenous(model, **kwargs)
elif self._is_socialtheory_exogenous(model):
return self._create_socialtheory_exogenous(model, **kwargs)
elif self._is_classifier(model):
return self._create_classifier(model, **kwargs)
elif self._is_others(model):
return self._create_others(model, **kwargs)
else:
raise ValueError(
"Invalid value for `feature_type` ({0})."
.format(model['feature_type'])
)
def _create_topology(self, model, graph):
name = model['designation']
if name == "AdamicAdar":
return AdamicAdar(graph.to_undirected())
elif name == "AdjustedRand":
return AdjustedRand(graph.to_undirected())
elif name == "CommonNeighbors":
return CommonNeighbors(graph.to_undirected())
elif name == "Jaccard":
return Jaccard(graph.to_undirected())
elif name == "Salton":
return Salton(graph.to_undirected())
elif name == "PreferentialAttachement":
return PreferentialAttachement(graph.to_undirected())
elif name == "ResourceAllocation":
return ResourceAllocation(graph.to_undirected())
elif name == "SameCommunity":
return SameCommunity(graph.to_undirected())
elif name == "ShortestPath":
return ShortestPath(graph.to_undirected())
elif name == "TotalNeighbors":
return TotalNeighbors(graph.to_undirected())
elif name == "UDegree":
return UDegree(graph.to_undirected())
elif name == "VDegree":
return VDegree(graph.to_undirected())
elif name == "Sorensen":
return Sorensen(graph.to_undirected())
elif name == "HubPromoted":
return HubPromoted(graph.to_undirected())
elif name == "HubDepressed":
return HubDepressed(graph.to_undirected())
elif name == "LeichtHolmeNewman":
return LeichtHolmeNewman(graph.to_undirected())
else:
raise ValueError(
"Invalid value for `designation` ({0})."
.format(name)
)
def _create_socialtheory_endogenous(self, model, graph, predicted_graph, threshold):
name = model['designation']
if name == "SocialExchangeTheory":
return SocialExchangeTheory(graph, predicted_graph)
elif name == "BalanceTheory":
return EndogenousBalanceTheory(graph, predicted_graph)
elif name == "CollectiveActionTheory":
return EndogenousCollectiveActionTheory(graph, predicted_graph)
elif name == "StructuralHoleTheory":
return StructuralHoleTheory(graph, predicted_graph)
else:
raise ValueError(
"Invalid value for `designation` ({0})."
.format(name)
)
def _create_socialtheory_exogenous(self, model, graph, predicted_graph, threshold):
name = model['designation']
weightings = {}
for weight in model['parameters']['attribute_weightings']:
weightings[weight['attribute']] = weight['value']
if name == "HomophilyTheories":
return HomophilyTheories(graph, predicted_graph, threshold, weightings)
elif name == "BalanceTheory":
return ExogenousBalanceTheory(graph, predicted_graph, threshold, weightings)
elif name == "ResourceDependenceTheory":
return ResourceDependencyTheory(graph, predicted_graph, threshold, weightings)
elif name == "CollectiveActionTheory":
return ExogenousCollectiveActionTheory(graph, predicted_graph, threshold, weightings)
else:
raise ValueError(
"Invalid value for `designation` ({0})."
.format(name)
)
def _create_classifier(self, model):
name = model['designation']
if name == "DecisionTree":
return DecisionTreeClassifier()
elif name == "SupportVector":
return SVC(probability=True)
elif name == "RandomForest":
return RandomForestClassifier()
elif name == "LogisticRegression":
return LogisticRegression()
elif name == "KNeighbors":
return KNeighborsClassifier()
elif name == "GaussianNB":
return GaussianNB()
elif name == "GradientBoosting":
return GradientBoostingClassifier()
else:
raise ValueError(
"Invalid value for `designation` ({0})."
.format(name)
)
def _create_others(self, model):
raise NotImplementedError()
def _is_topology(self, model):
return model['feature_type'] == 'Topology'
def _is_socialtheory_endogenous(self, model):
return model['feature_type'] == 'Social Theory with endogenous Attributes'
def _is_socialtheory_exogenous(self, model):
return model['feature_type'] == 'Social Theory with exogenous Attributes'
def _is_classifier(self, model):
return model['feature_type'] == 'ML-Classifier'
def _is_others(self, model):
return model['feature_type'] == 'Others'
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from apps.home import blueprint
from flask import render_template, request
from flask_login import login_required
from jinja2 import TemplateNotFound
@blueprint.route('/index')
@login_required
def index():
return render_template('home/index.html', segment='index')
@blueprint.route('/<template>')
@login_required
def route_template(template):
try:
if not template.endswith('.html'):
template += '.html'
# Detect the current page
segment = get_segment(request)
# Serve the file (if exists) from app/templates/home/FILE.html
return render_template("home/" + template, segment=segment)
except TemplateNotFound:
return render_template('home/page-404.html'), 404
except:
return render_template('home/page-500.html'), 500
# Helper - Extract current page name from request
def get_segment(request):
try:
segment = request.path.split('/')[-1]
if segment == '':
segment = 'index'
return segment
except:
return None
|
import numpy as np
import matplotlib.pyplot as plt
import seekr2.modules.common_base as base
import matplotlib.animation as animation
import plotting
title = "Entropy Barrier System"
model_file = "/home/lvotapka/toy_seekr_systems/entropy_barrier/model.xml"
model = base.load_model(model_file)
boundaries = np.array([[-1.0, 1.0], [-1.0, 1.0]])
toy_plot = plotting.Toy_plot(model, title, boundaries)
milestone_cv_functions = ["value"]
plotting.draw_linear_milestones(toy_plot, milestone_cv_functions)
ani = toy_plot.animate_trajs(animating_anchor_indices=[0, 1, 2, 3, 4, 5, 6])
#plt.show()
movie_filename = "entropy_barrier.mp4"
writervideo = animation.FFMpegWriter(fps=60)
ani.save(movie_filename, writer=writervideo)
|
import os,sys
class ScenarioConfig(object): # ADD_TO_CONF_SYSTEM 加入参数搜索路径 do not remove this comment !!!
map_ = '3m'
step_mul = 8
difficulty = '7'
game_version = 'latest'
replay_dir = ''
episode_limit = 60
N_TEAM = 1
N_AGENT_EACH_TEAM = [3,] # because map_ = '3m'
AGENT_ID_EACH_TEAM = [range(0,3),]
TEAM_NAMES = [
'ALGORITHM.Starcraft.star_foundation->StarFoundation',
]
ActAsUnity = False
RewardAsUnity = True
state_provided = True
avail_act_provided = True
def make_sc2_env(env_id, rank):
return Env_Compat_Wrapper(rank)
# 一层套一层。。。这层是为了参数对齐
class Env_Compat_Wrapper():
def __init__(self, rank):
from smac.env import StarCraft2Env
self.env = StarCraft2Env(map_name=ScenarioConfig.map_,
step_mul=ScenarioConfig.step_mul,
difficulty=ScenarioConfig.difficulty,
game_version=ScenarioConfig.game_version,
replay_dir=ScenarioConfig.replay_dir)
env_info = self.env.get_env_info()
self.observation_space = { 'state_shape': env_info["state_shape"],
'obs_shape':env_info["obs_shape"]}
self.action_space = { 'n_actions': env_info["n_actions"],
'n_agents': env_info["n_agents"]}
assert env_info["n_agents"] == ScenarioConfig.N_AGENT_EACH_TEAM[0], ('Changed a map? Reconfig ScenarioConfig Above!!')
assert env_info["episode_limit"] == ScenarioConfig.episode_limit, ('Changed a map? Reconfig ScenarioConfig Above!!')
self.id = rank
pass
def step(self, act):
reward, terminated, info = self.env.step(act)
reward = [reward]
done = terminated
ob = self.env.get_obs()
info['state'] = self.env.get_state()
info['avail-act'] = self.env.get_avail_actions()
return (ob, reward, done, info)
def reset(self):
self.env.reset()
ob = self.env.get_obs()
info = {}
info['state'] = self.env.get_state()
info['avail-act'] = self.env.get_avail_actions()
return ob, info
def render(self):
return
# self.env.close()
# self.env.reset()
# state = self.env.get_state()
# obs = self.env.get_obs()
# reward, terminated, info = self.env.step(actions)
# win_tag = True if terminated and 'battle_won' in info and info['battle_won'] else False
|
import time,random,re,requests,json
# 因为时间有点久, 忘记这个文件是否可以删除了
class Qiass:
def __init__(self) -> None:
self.__createSession()
def __createSession(self):
self.__session = requests.session()
self.__refreshSessionHeaders()
def __refreshSessionHeaders(self):
self.__session.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"}
# 时间
def __timeLocal(self):
return time.localtime()
def timeLocal(self, mode=0):
date = self.__timeLocal()
year = date.tm_year
month = date.tm_mon
day = date.tm_mday
hour = date.tm_hour
minute = date.tm_min
if mode == 0:
data = {
"year": year,
"month": month,
"day": day,
"hour": hour,
"minute": minute,
}
return data
if mode == 1:
return "%s-%s-%s" % (year, month, day)
if mode == 2:
return "%s-%s-%s(%s:%s)" % (year, month, day, hour, minute)
if mode == 3:
return "%s年%s月%s日" % (year, month, day)
if mode == 4:
return "%s年%s月%s日(%s:%s)" % (year, month, day, hour, minute)
def __timeNetwork(self):pass
def timeNetwork(self):pass
# 时间戳
def __timeStamp(self) -> int:
return int(time.mktime(time.localtime()))
def timeStamp(self, Mode:int) -> int:
if Mode == 0:
return self.__timeStamp()
if Mode == 1:
return self.__timeStamp() + random.randint(100,900)
if Mode == 2:
return int(str(self.timeStamp) + str(random.randint(100,999)))
return False
# 前缀匹配(前缀, 模糊, 正则)
def __prefix(self, text:str, prifixList:list):
result = False
for i in prifixList:
# 默认语句(不带参数)
if len(text) == len(i) and text == i:
result = "Default"
break
# 带参语句
if text[:len(i)] == i:
result = text[len(i):]
break
# 这里是为了去掉空格
if result != False and result != "Default":
number = 0
for i in result:
if i == " ":
number += 1
continue
break
if number > 0:
result = result[number:]
# 这里是因为上面去掉空格, 那就是没有传参, 那就是使用Default了, 被误伤了
if result == "":
return "Default"
return result
def prefix(self, text: str, prifixList: list) -> str:
if len(prifixList) > 0:
return self.__prefix(text, prifixList)
prifixList = [
'#',
'[CQ:at,qq=1053287283]',
'大晴',
]
prifixReList = [
"\[CQ:reply,id=.*?\]", # 为了匹配回复: [CQ:reply,id=.*?][CQ:at,qq=1053287283] [CQ:at,qq=1053287283] 在吗
"\[CQ:at,qq=1053287283\]" # 为了匹配回复: [CQ:reply,id=.*?][CQ:at,qq=1053287283] 不在吗
]
return self.__prefix(text, prifixList)
# 词库屏蔽
def __shielding(self, text:str, reList:list, shieldingWord=None):
isExecute = False
if shieldingWord == None:
shieldingWord = "*"
for pattern in reList:
l = re.findall(pattern, text)
if len(l) >= 1:
for j in l:
keyWord = j
if shieldingWord == "*":
text = text.replace(keyWord, shieldingWord * len(keyWord))
if shieldingWord != "*":
text = text.replace(keyWord, shieldingWord)
isExecute = text
return isExecute
def shielding(self, text:str) -> str:
re0 = [
"[0-9]{11}", "[0-9]{10}", "jj",
"\{face:[0-9]\}|\{face:[0-9]{2}\}|\{face:[0-9]{3}\}",
"狂撸", "狂射", "9000次", "求草"
"你就不能搞点新创意出来嘛", "face57","我不是大便,我的粉丝也不是苍蝇",
"别发这么无聊的信息行不",
]
re1 = [
'傻逼', '煞笔', '神经病', '母猪', "tianyu"
]
re2 = [
'菲菲', '浩哥', '小燕', '吴珂', '王雨昕', '茉姐', "杨秀花"
]
text = self.__shielding(text, re0)
text = self.__shielding(text, re1, "小可爱")
text = self.__shielding(text, re2, "小晴")
return text
# 分页
def __paging(self, qslb:list, pageRows:int, pageSee:int):
countTotal = len(qslb)
pageTotal = countTotal / pageRows
if not pageTotal.is_integer():
pageTotal = int(pageTotal) + 1
if pageSee > pageTotal:
return False
if pageSee <= 1:
start = 0
end = pageRows
if pageSee > 1:
start = pageRows * pageSee - pageRows
end = pageRows * pageSee
data = {}
for i in list(qslb)[start:end]:
data[i] = qslb[i]
result = {
"pageSee":pageSee,
"pageTotal":pageTotal,
"data":data
}
return result
def paging(self, qslb:list, pageRows=5, pageSee=1):
try:
pageRows = int(pageRows)
pageSee = int(pageSee)
except:
return False
return self.__paging(qslb, pageRows, pageSee)
# 概率
def __probability(self, number=10, percentage=2):
data = {"True" : 0, "False" : 0}
for i in range(0, number):
lucky = random.randint(0, 100 * 10) # 概率公式
if lucky <= percentage * 10:
data["True"] += 1
else:
data["False"] += 1
return data
def probability(self, number=1, percentage=2):
if number < 1:
return None
data = self.__probability(number, percentage)
if number == 1:
if data["True"] > 0:
return True
else:
return False
return data
# 聊天(本地词库, 图灵123, 青云客)
def __chatThesaurus(self, text:str):
#TODO 后期改为数据库储存
data = {
"早":"早啊,祝你今天愉快!",
"中午好": "中午好,吃午饭没呀",
"晚上好": "晚上好,我去睡觉觉啦",
}
for i in data:
if i == text:
return data[i]
return False
def __chatTuLing123(self, text:str) -> requests.Response:
# TODO 这里需要填写自己的apiKey
url = "http://openapi.tuling123.com/openapi/api/v2"
dataAll = {
"reqType": "0", # 0-文本(默认)、1-图片、2-音频
"perception": {
# 文本,图片,音频,客户端
"inputText": {
"text": text,
},
"inputImage": {
"url": "",
},
"inputMedia": {
"url": "",
},
"selfInfo": {
# 所在城市 省份 街道
"location": {
"city": "",
"province": "",
"street": "",
},
},
},
"userInfo": {
# 机器人标识,用户唯一标识,群聊唯一标识,群内用户昵称
"apiKey": "就是这个地方",
"userId": "",
"groupId": "",
"userIdName": "",
},
}
params = {
"reqType": "0",
"perception": {
"inputText": {
"text": text,
}
},
"userInfo": {
"apiKey": "就是这个地方",
"userId": "5",
},
}
return self.__session.post(url, data=json.dumps(params))
def __chatQingYunKe(self, text:str) -> requests.Response:
url = "http://api.qingyunke.com/api.php"
params = {
"key": "free",
"appid": "0",
"msg": text,
}
return self.__session.get(url, params=params)
def chatThesaurus(self, text:str):
answer = self.__chatThesaurus(text)
return answer
def chatTuLing123(self, text:str):
res = self.__chatTuLing123(text)
data = json.loads(res.text)
answer = data["results"][0]["values"]["text"]
if answer == "请求次数超限制!":
answer = False
return answer
def chatQingYunKe(self, text:str):
res = self.__chatQingYunKe(text)
if res.status_code == 200:
data = json.loads(res.text)
answer = data["content"]
else:
answer = False
return answer
# 帮我选择
def helpMeChoose(self, text:str):
l = text.split("*")
return "已为您选择: " + l[random.randint(0, len(l)-1)]
# 投掷骰子
def roll(self, text:str):
if not text.isdigit():
return "以为您投掷骰子(Default)\n结果为: %d" % random.randint(0, 10)
text = int(text)
if text < 0:
return "以为您投掷骰子(Default): %d" % random.randint(0, 10)
return "已为您投掷骰子(0,%d)\n结果为: %d" % (text, random.randint(0, text))
# 功能列表/查看
def functionList(self, text:str, mode:int):
# 数据源
start = "使用方法如下:\n"
data = {
"聊天系统":start+"1.发送'大晴 你好'\n2.发送'# 你好'\n3.发送'@大晴 你好'",
"时间系统":start+"1.发送'当前时间'\n2.发送'获取时间戳'",
"随机系统":start+"1.发送'帮我选择 打游戏*学习'\n2.发送'扔骰子 66'\n3.发送'随机食物'\n4.发送'随机表情'",
"娱乐系统":start+"1.发送'戳一戳 @群友'",
"功能系统":start+"1.发送'功能列表'\n2.发送'查看功能 聊天系统'",
"公告系统":start+"1.发送'公告列表'\n2.发送'查看公告 v1.0'",
"翻译系统":start+"暂无介绍...",
"查询系统":start+"暂无介绍...",
"新闻系统":start+"暂无介绍...",
"模板系统":start+"暂无介绍...",
"被动系统":start+"1.有概率自动回复\n2.主动欢迎新人入群\n",
"计算系统":start+"暂无介绍...",
"好感度系统":start+"暂无介绍...",
"模拟器系统":start+"暂无介绍...",
"黑白名单系统":start+"暂无介绍...",
}
# 列表
if mode == 0:
'''
result = ""
i = 0
for items in data:
result += "%d.%s\n" % (i, items)
i += 1
return result + "Tips: 发送'查看功能 功能名'即可获取功能信息"
'''
result = "[功能列表]\n"
for i in range(0, len(data)):
if i != 0 and i % 2 == 0:
result += "\n"
name = list(data)[i]
if len(name) > 4:
result += "%s%s" % (name, " "*(4+(4-len(name))))
continue
result += "%s%s" % (name, " "*4)
return result + "\nTips: 发送'查看功能 功能名'即可获取功能信息"
# 信息
if mode == 1:
# 先使用roid
if text.isdigit():
if int(text) > -1 and int(text) < len(data):
k = list(data)[int(text)]
v = data[k]
return "[%s]\n%s" % (k, v)
# 再使用key-value
result = data.get(text)
if result == None:
return "未找到[%s]功能的信息" % text
if result != None:
return "[%s]\n%s" % (text, result)
return False
# 公告列表/查看
def noticeList(self, text:str, mode:int):
data = {
"测试公告标题":("测试公告内容", "测试公告作者", "测试公告发布时间"),
"v1.0":("暂无内容", "晴兽", "2022年3月3日18:45:47"),
}
# 列表
if mode == 0:
result = ""
i = 0
for items in data:
result += "%d.%s\n" % (i, items)
i += 1
return result + "Tips: 发送'查看公告 公告名'即可获取公告信息"
# 信息
if mode == 1:
result = data.get(text)
if result == None:
return "未找到[%s]公告的信息" % text
if result != None:
return "[%s]\n作者:%s\n内容:%s\n发布时间:%s" % (text, result[0],result[1],result[2])
return False
if __name__ == "__main__":
q = Qiass()
|
from sklearn.cluster import DBSCAN, KMeans
import numpy as np
import cv2
from numba import jit
def get_clustering_points(img):
'''
Retrieve points that need to be clustered from a grayscale image
Parameters:
img - grayscale img
Returns:
coordinates of each non-zero pixel in the mask (row, col)
'''
rows, cols = np.nonzero(img)
return list(zip(rows, cols))
def get_number_of_clusters(labels):
'''
From a set of an array where each element is a cluster label
Extract the number of clusters
'''
return len(set(labels)) - (1 if -1 in labels else 0)
def cluster_dbscan(img, eps = 3, min_samples=10):
'''
Apply clustering.
Parameters:
img -- binary mask where
eps -- distance needed to be included in the cluster
min_samples - how many samples make a cluster
Returns:
X, labels, n_clusters
X - coordinates of each non-zero pixel in the mask (row, col)
labels - cluster labels
n_clusters - total clusters
'''
labels = []
n_clusters = 0
X = get_clustering_points(img)
if len(X) == 0:
return X, labels, n_clusters
algo = DBSCAN(eps = eps, min_samples = min_samples)
try:
labels = algo.fit_predict(X)
n_clusters = get_number_of_clusters(labels)
except:
pass
return X, labels, n_clusters
def cluster_kmeans(img, n_classes=2):
'''
Apply k-means clustering.
Parameters:
img -- binary mask where
n_clusters - number of classes
Returns:
X, labels, n_clusters
X - coordinates of each non-zero pixel in the mask (row, col)
labels - cluster labels
'''
X = get_clustering_points(img)
kmeans = KMeans(n_clusters = n_classes)
labels = kmeans.fit_predict(X)
return X, labels
@jit
def min_max_coords(rect1, pt2):
(l1, t1, r1, b1) = rect1
(l2, t2) = pt2
l = min(l1, l2)
t = min(t1, t2)
r = max(r1, l2)
b = max(b1, t2)
return l, t, r, b
@jit
def map_clusters_to_bounding_boxes(labels, coords, n_clusters = None):
'''
Map each cluster to its bounding box
'''
if n_clusters is None:
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
buds = [None] * n_clusters
for label, coord in zip(labels, coords):
if label == -1:
continue
t, l = coord
if buds[label] is None:
buds[label] = (l, t, l, t)
else:
buds[label] = min_max_coords(buds[label], (l, t))
return buds
def mask_from_clusters(labels, coords, mask_shape_row_col):
'''
Create a 2D mask where each (row, col) is set to the label of its cluster
or 0 if it is non-clustered
Parameters:
labels - array-like where labels[i] == cluster_number
coords - coordinates of each element in labels[i]
mask_shape_row_col - tuple of (rows, cols) for the output mask
Returns:
A 2D image where each pixel is set to the number of the cluster + 1
and un-clustered pixels are set to 0
'''
mask = np.zeros(mask_shape_row_col)
mask[tuple(zip(*coords))] = labels + 1
return mask
def get_box_center (rect):
l, t, r, b = rect
return ((l + r) // 2, (t + b) // 2)
def get_box_radius(rect):
l, t, r, b = rect
return max((r-l) // 2, (b - t) // 2)
def draw_clusters(img, clusters, color=(255, 255, 0), radius = None):
for bud in clusters:
if radius is None:
rad = get_box_radius(bud)
else:
rad = radius
_ = cv2.circle(img, get_box_center(bud), rad, color, 2)
def get_clusters_from_mask_verbose(mask, eps = 1, min_samples = 1):
'''
Apply DBSCAN clustering to a mask
Parameters:
mask - the mask to compute clusters from
eps - max radius around a given point so points within the circle of this radius are included in the same cluster
min_samples - minimal number of points to define a cluster
Returns:
centers, radii, non-zero pixel coordinates, array of labels, number of clusters, average radius
'''
coords, clusters, n_clusters = cluster_dbscan(mask, eps=eps, min_samples = min_samples)
if n_clusters == 0:
return None, None, None, None, 0, None
clustered_rects = map_clusters_to_bounding_boxes(clusters, coords, n_clusters)
radii = np.array(list(map(get_box_radius, clustered_rects)))
centers = np.array(list(map(get_box_center, clustered_rects)))
avg_radius = np.mean(radii)
return centers, radii, coords, clusters, n_clusters, avg_radius
def get_clusters_from_mask(mask, eps = 1, min_samples = 1):
'''
Apply DBSCAN clustering to a mask
Parameters:
mask - the mask to compute clusters from
eps - max radius around a given point so points within the circle of this radius are included in the same cluster
min_samples - minimal number of points to define a cluster
Returns:
centers, radii, non-zero pixel coordinates, array of labels, number of clusters, average radius
'''
centers, radii, coords, clusters, n_clusters, avg_radius = get_clusters_from_mask_verbose(mask, eps, min_samples)
return centers, radii
|
import requests
def LookupActress():
url = 'https://jav-rest-api-htpvmrzjet.now.sh/api/actress?name='
actressName = input("Search for an actress: ")
actressURL = url + actressName
# print(actressURL)
actressRequest = requests.get(actressURL).json()
counts = len(actressRequest['result'])
print("{:<3} | {:<7} | {:17} | {}\t".format("#", "ID", "Actress Name", "Japanese Name"))
print("=======================================================")
for i in range(counts):
actress_id = actressRequest['result'][i]['id']
actress_name = actressRequest['result'][i]['name']
actress_japName = actressRequest['result'][i]['japanName']
print("{:<3} | {:<7} | {:17} | {}\t".format(i+1, actress_id, actress_name, actress_japName))
print('Found {} babes named "{}"'.format(counts, actressName))
print()
def LookUpMovies():
video_url = 'https://jav-rest-api-htpvmrzjet.now.sh/api/videos/'
actress_id = input("Enter actress ID: ")
vidURL = video_url + actress_id
videoRequest = requests.get(vidURL).json()
video_counts = len(videoRequest['result'])
actress_name = videoRequest['result'][0]['actress'][0]['name']
print("{:<3} | {} | {}\t| {:<15} | {}".format("#", "Year", "Name", "Code", "Title"))
print("=============================================================================")
for i in range(video_counts):
video_title = videoRequest['result'][i]['name']
if len(videoRequest['result'][i]['name']) > 50:
video_title = video_title.replace(video_title[49:], '...')
else:
video_title = videoRequest['result'][i]['name']
siteUrl = videoRequest['result'][i]['siteUrl']
video_code = siteUrl[(siteUrl.find("cid=") + 4):(len(siteUrl) - 1)].upper()
year = videoRequest['result'][i]['date'][:4]
message = "{:<3} | {} | {}\t| {:<15} | {}".format(i+1, year, actress_name, video_code, video_title)
print(message)
print("Found {} videos for {}".format(video_counts, actress_name))
print()
def programLoop():
while True:
LookupActress()
LookUpMovies()
cont = input("Do you wish to continue?[Y/N]: ")
if cont.lower() == 'n':
print('Enjoy ;)')
break
def SuggestAnActress():
#work in progress
pass
def SuggestAMovie():
#work in progress6
pass
if __name__ == "__main__":
programLoop()
#Thanks anh Hoàng toidicodedao for the Jav API :v
# if len(actress_name) > 11:
# print("{}\t | {} \t| {}\t".format(actress_id, actress_name, actress_japName))
# else:
# print("{}\t | {}\t\t| {}\t".format(actress_id, actress_name, actress_japName))
|
# -*- coding:utf-8 -*-
from .symbol import Symbol
from .exception import CoilSyntaxError, CoilRuntimeError
class Scheme(object):
""" """
symbol_tables = {keyword: Symbol(keyword) for keyword in ['quote', 'if', 'set!', 'define', 'lambda', 'begin']}
@classmethod
def pause(cls, tokenizer):
""" """
return [cls.__symbolize(token, tokenizer) for token in tokenizer][0]
@classmethod
def evaluate(cls, token, environment):
""" """
if isinstance(token, Symbol):
return environment.find(token)[token]
elif not isinstance(token, list):
return token
elif token[0] is cls.__symbol('quote'):
(_, exp) = token
return exp
elif token[0] is cls.__symbol('if'):
(_, test, conseq, alt) = token
return Scheme.evaluate((conseq if Scheme.evaluate(test, environment) else alt), environment)
elif token[0] is cls.__symbol('set!'):
(_, var, exp) = token
environment.find(var)[var] = Scheme.evaluate(exp, environment)
elif token[0] is cls.__symbol('define'):
(_, var, exp) = token
environment[var] = Scheme.evaluate(exp, environment)
elif token[0] is cls.__symbol('lambda'):
(_, var, exp) = token
return lambda *args: Scheme.evaluate(var, Environment(var, args, environment))
elif token[0] is cls.__symbol('begin'):
for exp in token[1:]:
val = Scheme.evaluate(exp, env)
return val
else:
exps = [Scheme.evaluate(exp, environment) for exp in token]
proc = exps.pop(0)
return proc(*exps)
@classmethod
def __symbolize(cls, token, tokenizer):
""" """
if token == '(':
lists = []
while True:
next_token = next(tokenizer)
if next_token == ')':
return lists
else:
lists.append(cls.__symbolize(next_token, tokenizer))
elif token == ')':
raise CoilSyntaxError("syntax error, unexpected ')'")
else:
return cls.__atom(token)
@classmethod
def __atom(cls, token):
""" """
if token == '#t' or token == '#f':
return True if token == '#t' else False
if token.isnumeric():
return float(token)
else:
return cls.__symbol(token)
@classmethod
def __format(cls, atom):
""" """
if atom is True or atom is False:
return '#t' if atom is True else '#f'
elif isinstance(atom, Symbol):
return atom
else:
return str(atom)
@classmethod
def __symbol(cls, token):
""" """
if token not in cls.symbol_tables:
cls.symbol_tables[token] = Symbol(token)
return cls.symbol_tables[token]
|
import bisect
# Usig binary search
class Solution(object):
def suggestedProducts(self, products, searchWord):
"""
:type products: List[str]
:type searchWord: str
:rtype: List[List[str]]
"""
products.sort()
result, prefix, startIdx = [], "", 0
for char in searchWord:
prefix += char
startIdx = bisect.bisect_left(products, prefix, startIdx)
currnetSearchRes = []
for product in products[startIdx: startIdx + 3]:
if product.startswith(prefix):
currnetSearchRes.append(product)
result.append(currnetSearchRes)
return result
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util import orderdict
from slicc.util import PairContainer
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Var import Var
class DataMember(PairContainer):
def __init__(self, ident, type, pairs, init_code):
super(DataMember, self).__init__(pairs)
self.ident = ident
self.type = type
self.init_code = init_code
class Enumeration(PairContainer):
def __init__(self, ident, pairs):
super(Enumeration, self).__init__(pairs)
self.ident = ident
class Type(Symbol):
def __init__(self, table, ident, location, pairs, machine=None):
super(Type, self).__init__(table, ident, location, pairs)
self.c_ident = ident
self.abstract_ident = ""
if machine:
if self.isExternal or self.isPrimitive:
if "external_name" in self:
self.c_ident = self["external_name"]
else:
# Append with machine name
self.c_ident = "%s_%s" % (machine, ident)
self.pairs.setdefault("desc", "No description avaliable")
# check for interface that this Type implements
if "interface" in self:
interface = self["interface"]
if interface in ("Message", "NetworkMessage"):
self["message"] = "yes"
if interface == "NetworkMessage":
self["networkmessage"] = "yes"
# FIXME - all of the following id comparisons are fragile hacks
if self.ident in ("CacheMemory"):
self["cache"] = "yes"
if self.ident in ("TBETable"):
self["tbe"] = "yes"
if self.ident == "TimerTable":
self["timer"] = "yes"
if self.ident == "DirectoryMemory":
self["dir"] = "yes"
if self.ident == "PersistentTable":
self["persistent"] = "yes"
if self.ident == "Prefetcher":
self["prefetcher"] = "yes"
self.isMachineType = (ident == "MachineType")
self.isStateDecl = ("state_decl" in self)
self.statePermPairs = []
self.data_members = orderdict()
self.methods = {}
self.enums = orderdict()
@property
def isPrimitive(self):
return "primitive" in self
@property
def isNetworkMessage(self):
return "networkmessage" in self
@property
def isMessage(self):
return "message" in self
@property
def isBuffer(self):
return "buffer" in self
@property
def isInPort(self):
return "inport" in self
@property
def isOutPort(self):
return "outport" in self
@property
def isEnumeration(self):
return "enumeration" in self
@property
def isExternal(self):
return "external" in self
@property
def isGlobal(self):
return "global" in self
@property
def isInterface(self):
return "interface" in self
# Return false on error
def addDataMember(self, ident, type, pairs, init_code):
if ident in self.data_members:
return False
member = DataMember(ident, type, pairs, init_code)
self.data_members[ident] = member
var = Var(self.symtab, ident, self.location, type,
"m_%s" % ident, {}, None)
self.symtab.registerSym(ident, var)
return True
def dataMemberType(self, ident):
return self.data_members[ident].type
def methodId(self, name, param_type_vec):
return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ])
def methodIdAbstract(self, name, param_type_vec):
return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ])
def statePermPairAdd(self, state_name, perm_name):
self.statePermPairs.append([state_name, perm_name])
def addFunc(self, func):
ident = self.methodId(func.ident, func.param_types)
if ident in self.methods:
return False
self.methods[ident] = func
return True
def addEnum(self, ident, pairs):
if ident in self.enums:
return False
self.enums[ident] = Enumeration(ident, pairs)
# Add default
if "default" not in self:
self["default"] = "%s_NUM" % self.c_ident
return True
def writeCodeFiles(self, path, includes):
if self.isExternal:
# Do nothing
pass
elif self.isEnumeration:
self.printEnumHH(path)
self.printEnumCC(path)
else:
# User defined structs and messages
self.printTypeHH(path)
self.printTypeCC(path)
def printTypeHH(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
*
* Auto generated C++ code started by $__file__:$__line__
*/
#ifndef __${{self.c_ident}}_HH__
#define __${{self.c_ident}}_HH__
#include <iostream>
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
''')
for dm in self.data_members.values():
if not dm.type.isPrimitive:
code('#include "mem/protocol/$0.hh"', dm.type.c_ident)
parent = ""
if "interface" in self:
code('#include "mem/protocol/$0.hh"', self["interface"])
parent = " : public %s" % self["interface"]
code('''
$klass ${{self.c_ident}}$parent
{
public:
${{self.c_ident}}
''', klass="class")
if self.isMessage:
code('(Tick curTime) : %s(curTime) {' % self["interface"])
else:
code('()\n\t\t{')
code.indent()
if not self.isGlobal:
code.indent()
for dm in self.data_members.values():
ident = dm.ident
if "default" in dm:
# look for default value
code('m_$ident = ${{dm["default"]}}; // default for this field')
elif "default" in dm.type:
# Look for the type default
tid = dm.type.c_ident
code('m_$ident = ${{dm.type["default"]}}; // default value of $tid')
else:
code('// m_$ident has no default')
code.dedent()
code('}')
# ******** Copy constructor ********
if not self.isGlobal:
code('${{self.c_ident}}(const ${{self.c_ident}}&other)')
# Call superclass constructor
if "interface" in self:
code(' : ${{self["interface"]}}(other)')
code('{')
code.indent()
for dm in self.data_members.values():
code('m_${{dm.ident}} = other.m_${{dm.ident}};')
code.dedent()
code('}')
# ******** Full init constructor ********
if not self.isGlobal:
params = [ 'const %s& local_%s' % (dm.type.c_ident, dm.ident) \
for dm in self.data_members.itervalues() ]
params = ', '.join(params)
if self.isMessage:
params = "const Tick curTime, " + params
code('${{self.c_ident}}($params)')
# Call superclass constructor
if "interface" in self:
if self.isMessage:
code(' : ${{self["interface"]}}(curTime)')
else:
code(' : ${{self["interface"]}}()')
code('{')
code.indent()
for dm in self.data_members.values():
code('m_${{dm.ident}} = local_${{dm.ident}};')
if "nextLineCallHack" in dm:
code('m_${{dm.ident}}${{dm["nextLineCallHack"]}};')
code.dedent()
code('}')
# create a clone member
code('''
${{self.c_ident}}*
clone() const
{
return new ${{self.c_ident}}(*this);
}
''')
if not self.isGlobal:
# const Get methods for each field
code('// Const accessors methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Const accessor method for ${{dm.ident}} field.
* \\return ${{dm.ident}} field
*/
const ${{dm.type.c_ident}}&
get${{dm.ident}}() const
{
return m_${{dm.ident}};
}
''')
# Non-const Get methods for each field
code('// Non const Accessors methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Non-const accessor method for ${{dm.ident}} field.
* \\return ${{dm.ident}} field
*/
${{dm.type.c_ident}}&
get${{dm.ident}}()
{
return m_${{dm.ident}};
}
''')
#Set methods for each field
code('// Mutator methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Mutator method for ${{dm.ident}} field */
void
set${{dm.ident}}(const ${{dm.type.c_ident}}& local_${{dm.ident}})
{
m_${{dm.ident}} = local_${{dm.ident}};
}
''')
code('void print(std::ostream& out) const;')
code.dedent()
code(' //private:')
code.indent()
# Data members for each field
for dm in self.data_members.values():
if "abstract" not in dm:
const = ""
init = ""
# global structure
if self.isGlobal:
const = "static const "
# init value
if dm.init_code:
# only global structure can have init value here
assert self.isGlobal
init = " = %s" % (dm.init_code)
if "desc" in dm:
code('/** ${{dm["desc"]}} */')
code('$const${{dm.type.c_ident}} m_${{dm.ident}}$init;')
# Prototypes for methods defined for the Type
for item in self.methods:
proto = self.methods[item].prototype
if proto:
code('$proto')
code.dedent()
code('};')
code('''
inline std::ostream&
operator<<(std::ostream& out, const ${{self.c_ident}}& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif // __${{self.c_ident}}_HH__
''')
code.write(path, "%s.hh" % self.c_ident)
def printTypeCC(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.cc
*
* Auto generated C++ code started by $__file__:$__line__
*/
#include <iostream>
#include "mem/protocol/${{self.c_ident}}.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
using namespace std;
''')
code('''
/** \\brief Print the state of this object */
void
${{self.c_ident}}::print(ostream& out) const
{
out << "[${{self.c_ident}}: ";
''')
# For each field
code.indent()
for dm in self.data_members.values():
code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";''')
if self.isMessage:
code('out << "Time = " << g_system_ptr->clockPeriod() * getTime() << " ";')
code.dedent()
# Trailer
code('''
out << "]";
}''')
# print the code for the methods in the type
for item in self.methods:
code(self.methods[item].generateCode())
code.write(path, "%s.cc" % self.c_ident)
def printEnumHH(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
* Auto generated C++ code started by $__file__:$__line__
*/
#ifndef __${{self.c_ident}}_HH__
#define __${{self.c_ident}}_HH__
#include <iostream>
#include <string>
''')
if self.isStateDecl:
code('#include "mem/protocol/AccessPermission.hh"')
if self.isMachineType:
code('#include "base/misc.hh"')
code('#include "mem/ruby/common/Address.hh"')
code('struct MachineID;')
code('''
// Class definition
/** \\enum ${{self.c_ident}}
* \\brief ${{self.desc}}
*/
enum ${{self.c_ident}} {
${{self.c_ident}}_FIRST,
''')
code.indent()
# For each field
for i,(ident,enum) in enumerate(self.enums.iteritems()):
desc = enum.get("desc", "No description avaliable")
if i == 0:
init = ' = %s_FIRST' % self.c_ident
else:
init = ''
code('${{self.c_ident}}_${{enum.ident}}$init, /**< $desc */')
code.dedent()
code('''
${{self.c_ident}}_NUM
};
// Code to convert from a string to the enumeration
${{self.c_ident}} string_to_${{self.c_ident}}(const std::string& str);
// Code to convert state to a string
std::string ${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj);
// Code to increment an enumeration type
${{self.c_ident}} &operator++(${{self.c_ident}} &e);
''')
# MachineType hack used to set the base component id for each Machine
if self.isMachineType:
code('''
int ${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj);
MachineType ${{self.c_ident}}_from_base_level(int);
int ${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj);
int ${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj);
''')
for enum in self.enums.itervalues():
if enum.ident == "DMA":
code('''
MachineID map_Address_to_DMA(const Address &addr);
''')
code('''
MachineID get${{enum.ident}}MachineID(NodeID RubyNode);
''')
if self.isStateDecl:
code('''
// Code to convert the current state to an access permission
AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj);
''')
# Trailer
code('''
std::ostream& operator<<(std::ostream& out, const ${{self.c_ident}}& obj);
#endif // __${{self.c_ident}}_HH__
''')
code.write(path, "%s.hh" % self.c_ident)
def printEnumCC(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
* Auto generated C++ code started by $__file__:$__line__
*/
#include <cassert>
#include <iostream>
#include <string>
#include "base/misc.hh"
#include "mem/protocol/${{self.c_ident}}.hh"
using namespace std;
''')
if self.isStateDecl:
code('''
// Code to convert the current state to an access permission
AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each case
code.indent()
for statePerm in self.statePermPairs:
code(' case ${{self.c_ident}}_${{statePerm[0]}}:')
code(' return AccessPermission_${{statePerm[1]}};')
code.dedent()
code ('''
default:
panic("Unknown state access permission converstion for ${{self.c_ident}}");
}
}
''')
if self.isMachineType:
for enum in self.enums.itervalues():
if enum.get("Primary"):
code('#include "mem/protocol/${{enum.ident}}_Controller.hh"')
code('#include "mem/ruby/common/MachineID.hh"')
code('''
// Code for output operator
ostream&
operator<<(ostream& out, const ${{self.c_ident}}& obj)
{
out << ${{self.c_ident}}_to_string(obj);
out << flush;
return out;
}
// Code to convert state to a string
string
${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
code.indent()
for enum in self.enums.itervalues():
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' return "${{enum.ident}}";')
code.dedent()
# Trailer
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
// Code to convert from a string to the enumeration
${{self.c_ident}}
string_to_${{self.c_ident}}(const string& str)
{
''')
# For each field
start = ""
code.indent()
for enum in self.enums.itervalues():
code('${start}if (str == "${{enum.ident}}") {')
code(' return ${{self.c_ident}}_${{enum.ident}};')
start = "} else "
code.dedent()
code('''
} else {
panic("Invalid string conversion for %s, type ${{self.c_ident}}", str);
}
}
// Code to increment an enumeration type
${{self.c_ident}}&
operator++(${{self.c_ident}}& e)
{
assert(e < ${{self.c_ident}}_NUM);
return e = ${{self.c_ident}}(e+1);
}
''')
# MachineType hack used to set the base level and number of
# components for each Machine
if self.isMachineType:
code('''
/** \\brief returns the base vector index for each machine type to be
* used by NetDest
*
* \\return the base vector index for each machine type to be used by NetDest
* \\see NetDest.hh
*/
int
${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
code.indent()
for i,enum in enumerate(self.enums.itervalues()):
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' return $i;')
code.dedent()
# total num
code('''
case ${{self.c_ident}}_NUM:
return ${{len(self.enums)}};
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
/** \\brief returns the machine type for each base vector index used by NetDest
*
* \\return the MachineType
*/
MachineType
${{self.c_ident}}_from_base_level(int type)
{
switch(type) {
''')
# For each field
code.indent()
for i,enum in enumerate(self.enums.itervalues()):
code(' case $i:')
code(' return ${{self.c_ident}}_${{enum.ident}};')
code.dedent()
# Trailer
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
/** \\brief The return value indicates the number of components created
* before a particular machine\'s components
*
* \\return the base number of components for each machine
*/
int
${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj)
{
int base = 0;
switch(obj) {
''')
# For each field
code.indent()
code(' case ${{self.c_ident}}_NUM:')
for enum in reversed(self.enums.values()):
# Check if there is a defined machine with this type
if enum.get("Primary"):
code(' base += ${{enum.ident}}_Controller::getNumControllers();')
else:
code(' base += 0;')
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' break;')
code.dedent()
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
return base;
}
/** \\brief returns the total number of components for each machine
* \\return the total number of components for each machine
*/
int
${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
for enum in self.enums.itervalues():
code('case ${{self.c_ident}}_${{enum.ident}}:')
if enum.get("Primary"):
code('return ${{enum.ident}}_Controller::getNumControllers();')
else:
code('return 0;')
# total num
code('''
case ${{self.c_ident}}_NUM:
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
''')
for enum in self.enums.itervalues():
if enum.ident == "DMA":
code('''
MachineID
map_Address_to_DMA(const Address &addr)
{
MachineID dma = {MachineType_DMA, 0};
return dma;
}
''')
code('''
MachineID
get${{enum.ident}}MachineID(NodeID RubyNode)
{
MachineID mach = {MachineType_${{enum.ident}}, RubyNode};
return mach;
}
''')
# Write the file
code.write(path, "%s.cc" % self.c_ident)
__all__ = [ "Type" ]
|
import re
import types
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
],
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex="^[a-zA-Z]*$",
message="Letters only.",
)
],
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex="^[a-z]*$",
message="Letters only.",
flags=re.IGNORECASE,
)
],
)
form = UserForm(
{
"full_name": "not int nor mail",
"string": "2 is not correct",
"ignore_case_string": "IgnORE Case strIng",
}
)
with self.assertRaises(ValidationError) as e:
form.fields["full_name"].clean("not int nor mail")
self.assertEqual(2, len(e.exception.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["string"], ["Letters only."])
self.assertEqual(form.errors["string"], ["Letters only."])
def test_field_validators_can_be_any_iterable(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=(
validators.validate_integer,
validators.validate_email,
),
)
form = UserForm({"full_name": "not int nor mail"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["full_name"],
["Enter a valid integer.", "Enter a valid email address."],
)
class ValidatorCustomMessageTests(TestCase):
def test_value_placeholder_with_char_field(self):
cases = [
(validators.validate_integer, "-42.5", "invalid"),
(validators.validate_email, "a", "invalid"),
(validators.validate_email, "a@b\n.com", "invalid"),
(validators.validate_email, "a\n@b.com", "invalid"),
(validators.validate_slug, "你 好", "invalid"),
(validators.validate_unicode_slug, "你 好", "invalid"),
(validators.validate_ipv4_address, "256.1.1.1", "invalid"),
(validators.validate_ipv6_address, "1:2", "invalid"),
(validators.validate_ipv46_address, "256.1.1.1", "invalid"),
(validators.validate_comma_separated_integer_list, "a,b,c", "invalid"),
(validators.int_list_validator(), "-1,2,3", "invalid"),
(validators.MaxLengthValidator(10), 11 * "x", "max_length"),
(validators.MinLengthValidator(10), 9 * "x", "min_length"),
(validators.URLValidator(), "no_scheme", "invalid"),
(validators.URLValidator(), "http://test[.com", "invalid"),
(validators.URLValidator(), "http://[::1:2::3]/", "invalid"),
(
validators.URLValidator(),
"http://" + ".".join(["a" * 35 for _ in range(9)]),
"invalid",
),
(validators.RegexValidator("[0-9]+"), "xxxxxx", "invalid"),
]
for validator, value, code in cases:
if isinstance(validator, types.FunctionType):
name = validator.__name__
else:
name = type(validator).__name__
with self.subTest(name, value=value):
class MyForm(forms.Form):
field = forms.CharField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_null_character(self):
class MyForm(forms.Form):
field = forms.CharField(
error_messages={"null_characters_not_allowed": "%(value)s"},
)
form = MyForm({"field": "a\0b"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["a\x00b"]})
def test_value_placeholder_with_integer_field(self):
cases = [
(validators.MaxValueValidator(0), 1, "max_value"),
(validators.MinValueValidator(0), -1, "min_value"),
(validators.URLValidator(), "1", "invalid"),
]
for validator, value, code in cases:
with self.subTest(type(validator).__name__, value=value):
class MyForm(forms.Form):
field = forms.IntegerField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [str(value)]})
def test_value_placeholder_with_decimal_field(self):
cases = [
("NaN", "invalid"),
("123", "max_digits"),
("0.12", "max_decimal_places"),
("12", "max_whole_digits"),
]
for value, code in cases:
with self.subTest(value=value):
class MyForm(forms.Form):
field = forms.DecimalField(
max_digits=2,
decimal_places=1,
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_file_field(self):
class MyForm(forms.Form):
field = forms.FileField(
validators=[validators.validate_image_file_extension],
error_messages={"invalid_extension": "%(value)s"},
)
form = MyForm(files={"field": SimpleUploadedFile("myfile.txt", b"abc")})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["myfile.txt"]})
|
from typing import Union
from .validator import Validator, ValidationError, StopValidation
class CurrentPassword(Validator):
def __init__(self, model, message: Union[str, None] = None, parse: bool = True) -> None:
self.parse = parse
self.message = message or 'The password is incorrect.'
self.model = model
def handler(self, value, field, request):
if not self.model.verify_password(value):
raise ValidationError(self.message)
|
# coding: utf-8
class Solution:
"""
@param A : A string includes Upper Case letters
@param B : A string includes Upper Case letters
@return : if string A contains all of the characters in B return True else return False
"""
def compareStrings(self, A, B):
# write your code here
if not B:
return True
count = {}
for i, ch in enumerate(B):
if ch not in count:
count[ch] = 1
else:
count[ch] += 1
for i, ch in enumerate(A):
if ch in count:
count[ch] -= 1
if count[ch] == 0:
del(count[ch])
return not count
# easy: http://lintcode.com/zh-cn/problem/compare-strings/
|
'''You are given two lists, rat_1 and rat_2, that contain the daily weights of two rats over a period of ten days. Assume the rats never have exactly the same weight. Write statements to do the following:'''
rat_1 = [2,4]
rat_2 = [1,3]
""" We are assuming the rats never have the same weight"""
#1 If the weight of rat 1 is greater than that of rat 2 on day 1, print "Rat 1 weighed more than rat 2 on day 1."; otherwise, print "Rat 1 weighed less than rat 2 on day 1.".”
if rat_1[0] > rat_2[0]:
print("Rat 1 weighed more than rat 2 on day 1")
#2 “If rat 1 weighed more than rat 2 on day 1 and if rat 1 weighs more than rat 2 on the last day, print "Rat 1 remained heavier than Rat 2."; otherwise, print "Rat 2 became heavier than Rat 1.”
if rat_1[-1] > rat_2[-1]:
print("Rat 1 remained heavier than rat 2")
elif rat_1[-1] < rat_2[-1]:
print("Rat 2 became havier than Rat 1")
elif rat_1[0] < rat_2[0]:
print("Rat 1 weighed less than rat 2 on day 1")
#3 “If your solution to the previous exercise used nested if statements, then do it without nesting, or vice versa.”
if rat_1[0] > rat_2[0] and rat_1[-1] > rat_2[-1]:
print("Rat 1 remained heavier than rat 2")
if rat_1[0] > rat_2[0] and rat_1[-1] < rat_2[-1]:
print("Rat 2 became havier than Rat 1")
|
"""This module sets up a base list of configuration entries."""
__revision__ = '$Revision$'
import copy
import lxml.etree
import sys
# py3k compatibility
if sys.hexversion >= 0x03000000:
from functools import reduce
import Bcfg2.Server.Plugin
class Base(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
Bcfg2.Server.Plugin.XMLDirectoryBacked):
"""This Structure is good for the pile of independent configs
needed for most actual systems.
"""
name = 'Base'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
__child__ = Bcfg2.Server.Plugin.StructFile
deprecated = True
"""Base creates independent clauses based on client metadata."""
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Structure.__init__(self)
try:
Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self,
self.data,
self.core.fam)
except OSError:
self.logger.error("Failed to load Base repository")
raise Bcfg2.Server.Plugin.PluginInitError
def BuildStructures(self, metadata):
"""Build structures for client described by metadata."""
ret = lxml.etree.Element("Independent", version='2.0')
fragments = reduce(lambda x, y: x + y,
[base.Match(metadata) for base
in list(self.entries.values())], [])
[ret.append(copy.copy(frag)) for frag in fragments]
return [ret]
|
icons = "♠ ♥ ♦"
result = ""
r1 = " _____\n|♠ |\n| |\n| A|\n ¯¯¯¯¯ "
r2 = " _____\n|♠ |\n| |\n| K|\n ¯¯¯¯¯ "
r3 = " _____\n|♠ |\n| |\n| Q|\n ¯¯¯¯¯ "
r4 = " _____\n|♠ |\n| |\n| J|\n ¯¯¯¯¯ "
r5 = " _____\n|♠ |\n| |\n| 2|\n ¯¯¯¯¯ "
r6 = " _____\n|♠ |\n| |\n| 3|\n ¯¯¯¯¯ "
r7 = " _____\n|♠ |\n| |\n| 4|\n ¯¯¯¯¯ "
r8 = " _____\n|♦ |\n| |\n| A|\n ¯¯¯¯¯ "
r9 = " _____\n|♦ |\n| |\n| K|\n ¯¯¯¯¯ "
r10 = " _____\n|♦ |\n| |\n| Q|\n ¯¯¯¯¯ "
r11 = " _____\n|♦ |\n| |\n| J|\n ¯¯¯¯¯ "
r12 = " _____\n|♦ |\n| |\n| 2|\n ¯¯¯¯¯ "
r13 = " _____\n|♦ |\n| |\n| 3|\n ¯¯¯¯¯ "
r14 = " _____\n|♦ |\n| |\n| 4|\n ¯¯¯¯¯ "
r15 = " _____\n|♥ |\n| |\n| A|\n ¯¯¯¯¯ "
r16 = " _____\n|♥ |\n| |\n| K|\n ¯¯¯¯¯ "
r17 = " _____\n|♥ |\n| |\n| Q|\n ¯¯¯¯¯ "
r18 = " _____\n|♥ |\n| |\n| J|\n ¯¯¯¯¯ "
r19 = " _____\n|♥ |\n| |\n| 2|\n ¯¯¯¯¯ "
r20 = " _____\n|♥ |\n| |\n| 3|\n ¯¯¯¯¯ "
r21 = " _____\n|♥ |\n| |\n| 4|\n ¯¯¯¯¯ "
#==========================================================================================================
print("Welcome to the 21 Card Game")
inp1 = input("Press Enter to continue...")
if inp1 == "":
print("Here are 21 cards. Choose any card you like:")
print()
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♠ | |♠ | |♠ | |♠ | |♠ | |♠ |")
print("| | | | | | | | | | | | | |")
print("| A| | K| | Q| | J| | 2| | 3| | 4|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♦ | |♦ | |♦ | |♦ | |♦ | |♦ | |♦ |")
print("| | | | | | | | | | | | | |")
print("| A| | K| | Q| | J| | 2| | 3| | 4|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♥ | |♥ | |♥ | |♥ | |♥ | |♥ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| A| | K| | Q| | J| | 2| | 3| | 4|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print()
print("If you have done with choosing a card, help me to find your car with some info.")
number1 = input("Enter the Row number(1-3), where your card is located: ")
result += number1
print("Let's do this again")
print()
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♠ | |♠ | |♦ | |♦ | |♥ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| A| | J| | 4| | Q| | 3| | K| | 2|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♠ | |♦ | |♦ | |♦ | |♥ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| K| | 2| | A| | J| | 4| | Q| | 3|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♠ | |♦ | |♦ | |♥ | |♥ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| Q| | 3| | K| | 2| | A| | J| | 4|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
number2 = input("Again look at these cards and Enter the row number of your card: ")
result += number2
print()
print("One last step.")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♦ | |♥ | |♠ | |♥ | |♠ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| K| | J| | 3| | 4| | K| | 3| | A|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♠ | |♦ | |♠ | |♦ | |♥ | |♦ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| 2| | 4| | A| | Q| | 2| | K| | J|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
print(" _____ _____ _____ _____ _____ _____ _____")
print("|♦ | |♥ | |♠ | |♦ | |♠ | |♦ | |♥ |")
print("| | | | | | | | | | | | | |")
print("| A| | Q| | J| | 3| | Q| | 2| | 4|")
print(" ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ¯¯¯¯¯ ")
number3 = input("Look at the following cards and Enter the row number of your card: ")
result += number3
else:
print("Restart the game to play again")
#==================================================================================================
if result == "112":
print(r1)
elif result == "121":
print(r2)
elif result == "133":
print(r3)
elif result == "113":
print(r4)
elif result == "122":
print(r5)
elif result == "131":
print(r6)
elif result == "111":
print(r7)
elif result == "223":
print(r8)
elif result == "232":
print(r9)
elif result == "212":
print(r10)
elif result == "221":
print(r11)
elif result == "233":
print(r12)
elif result == "213":
print(r13)
elif result == "222":
print(r14)
elif result == "331":
print(r15)
elif result == "311":
print(r16)
elif result == "323":
print(r17)
elif result == "332":
print(r18)
elif result == "312":
print(r19)
elif result == "321":
print(r20)
elif result == "333":
print(r21)
else:
print('Wrong input')
print()
LAST = input("PRESS ENTER TO CLOSE...")
|
# VerTeX | Copyright (c) 2010-2021 Steve Kieffer | MIT license
# SPDX-License-Identifier: MIT
"""
Translating math mode snippets from VerTeX to TeX.
"""
import re
from vertex2tex.config import *
###############
# Node classes
class Node:
"""
The basic Node class, which contains the basic translation method (`xlat`).
Other node types should subclass Node and simply override the three methods `buildArg`,
`addcomma`, and `build`, as needed.
"""
def __init__(self, token_stream):
self.toks = token_stream
self.output = ''
# Subclasses should override self.commawords, if necessary.
self.commawords = [] # Basic Node considers nothing to be a commaword.
###########################################################
# If possible, subclasses should use 'Node's basic xlat
# method below, and customize it simply by overriding
# the following three methods.
def buildArg(self, s):
self.output += s+' '
def addcomma(self, T):
"""
Should return True iff it is time to quit (i.e. the final comma word has been received).
"""
self.buildArg(T)
return False
def build(self):
return self.output.strip()
###########################################################
def xlat(self):
T = self.toks.next()
while T:
# First check if the token is escaped with a \.
if T[0] == '\\':
# Tokens beginning with double backslash and having at least one character beyond
# that are passed through after lopping off the backslashes.
if len(T) >= 3 and T[1] == '\\':
s = T[2:]
# Otherwise, the token passes through unaltered. So this includes both the case of a token
# that begins only with a single backslash, and the case of a double backslash followed
# by nothing else, as is used in TeX arrays, for example.
else:
s = T
self.buildArg(s)
# Let HTML codes like > and < pass through unaltered.
elif T in html_exceptions:
s = T
self.buildArg(s)
elif T in unarynodes:
code = unarynodes[T]
lefty = UnaryNode(code, self.toks)
s = lefty.xlat()
self.buildArg(s)
elif T in binarynodes:
w, c, o = binarynodes[T]
lefty = BinaryNode(w, c, o, self.toks)
s = lefty.xlat()
self.buildArg(s)
elif T in tertiarynodes:
w, c, o = tertiarynodes[T]
lefty = TertiaryNode(w, c, o, self.toks)
s = lefty.xlat()
self.buildArg(s)
elif T in rangenodes:
symbol = rangenodes[T]
lefty = RangeNode(symbol,self.toks)
s = lefty.xlat()
self.buildArg(s)
elif T in specialnodes:
klass = specialnodes[T]
lefty = klass(self.toks)
s = lefty.xlat()
self.buildArg(s)
elif T in self.commawords:
tobreak = self.addcomma(T)
if tobreak: break
elif T in builtins:
s = builtins[T]
self.buildArg(s)
elif T in bsmes:
self.buildArg('\\'+T)
elif fontword(T):
s = fontword(T)
self.buildArg(s)
#Automatic subscripting
elif (len(T) >= 2 and
re.match('[A-Za-z]', T) and
T.find(' ') == -1):
s = autosub(T)
self.buildArg(s)
#Anything else just passes through.
else:
self.buildArg(T)
#Get the next token.
T = self.toks.next()
s = self.build()
return s
class UnaryNode(Node):
def __init__(self, wrappers, ts):
self.toks = ts
self.wrappers = wrappers
self.stuff = ''
self.commawords = [';']
def buildArg(self, s):
self.stuff += s+' '
def addcomma(self, T):
return True
def build(self):
a = self.stuff
w = self.wrappers
s = w[0]+a+w[1]
return s
class BinaryNode(Node):
def __init__(self, wrappers, comma, order, ts):
self.toks = ts
self.wrappers = wrappers
self.commawords = [comma,';']
self.order = order
self.args = ['','']
self.argptr = 0
def buildArg(self, s):
self.args[self.argptr] += s+' '
def addcomma(self, T):
self.argptr += 1
return T == ';'
def build(self):
a, b = [self.args[i] for i in self.order]
w = self.wrappers
s = w[0]+a+w[1]+b+w[2]
return s
class TertiaryNode(Node):
def __init__(self, wrappers, comma, order, ts):
self.toks = ts
self.wrappers = wrappers
self.commawords = list(comma)+[';']
self.order = order
self.args = ['','','']
self.argptr = 0
self.commaseq = []
def buildArg(self, s):
self.args[self.argptr] += s+' '
def addcomma(self, T):
self.argptr += 1
self.commaseq.append(T)
return T == ';'
def build(self):
a, b, c = [self.args[i] for i in self.order]
w = self.wrappers
s = w[0]+a+w[1]+b+w[2]+c+w[3]
return s
class RangeNode(Node):
def __init__(self, symbol, ts):
self.toks = ts
self.symbol = symbol
self.commawords = ['over', 'from', 'to', ';']
self.args = ['', '', '']
self.argptr = -1
self.commaseq = []
def buildArg(self, s):
self.args[self.argptr] += s+' '
def addcomma(self, T):
self.argptr += 1
self.commaseq.append(T)
return T == ';'
def build(self):
if self.commaseq == [';']:
s = self.symbol+' '
elif self.commaseq == ['over', ';']:
c = self.symbol
r = self.args[0]
s = c+'_{'+r+'} '
elif self.commaseq == ['over', 'from', 'to', ';']:
c = self.symbol
v, a, b = self.args
s = c+'_{'+v+'='+a+'}^{'+b+'} '
else:
s = '--error in range operator--'
return s
class Matrixnode(Node):
"""
To make a matrix of N columns (we don't care how
many rows you make), start with the keyword
'matrix', then write out the number N, then the
keyword 'cols', and then write the entries, separated
by semicolons. You don't have to start a new line; we
handle that automatically based on the number of columns.
Thus, you can't start a new line early; we need to see
N semicolons in order to complete each line.
When you've written the last entry, you should not follow
it by a semicolon. Just write the keyword 'endmatrix'.
(A semicolon will cause a new row to begin, giving you
one blank row at the end of your matrix.)
"""
def __init__(self, ts):
self.toks = ts
self.commawords = ['cols', ';', 'endmatrix']
self.args = []
self.stuff = ''
self.cols = 0
self.err = False
def buildArg(self, s):
self.stuff += s+' '
def addcomma(self, c):
if c == 'endmatrix':
if self.stuff: self.args.append(self.stuff)
done = True
elif c == 'cols':
try: self.cols = int(self.stuff)
except: self.err = True
self.stuff = ''
done = False
else: # c == ';'
self.args.append(self.stuff)
self.stuff = ''
done = False
return done
def build(self):
if self.err: s = '-- error in matrix node --'
else:
C = self.cols
s = '\\begin{array}{'
s += 'c'*C
s += '}'
A = self.args
for k in range(len(A)):
r = k%C
if r > 0: s += ' & '
s += A[k]
if r == C-1: s += '\\\\'
if s[-1] != '\n': s += '\n'
s += '\\end{array}'
return s
class Padspnode(Node):
"""
This is intended for p-adic numbers with extra spaces
between the digits. Probably you can think of other uses
for it too.
The name could mean 'p-adic space node' or 'padded space node'.
When you've written the last entry, you can follow
it by a semicolon, but you don't have to. To end,
write the keyword 'end'.
"""
def __init__(self, ts):
self.toks = ts
self.commawords = [';', 'end']
self.args = []
self.stuff = ''
self.spacer = '\\: '
self.err = False
def buildArg(self, s):
self.stuff += s+' '
def addcomma(self, c):
if c == 'end':
if self.stuff: self.args.append(self.stuff)
done = True
else: # c == ';'
self.args.append(self.stuff)
self.stuff = ''
done = False
return done
def build(self):
if self.err: s = '-- error in padsp node --'
else:
s = self.spacer.join(self.args)
return s
specialnodes = {
'matrix': Matrixnode,
'padsp': Padspnode
}
##########
# Auto-subscripting
# Regular expressions to match letter sets and variations:
any_letter = all_letter_re_component+"|[A-Za-z]"
letter_matcher = re.compile(any_letter)
opt_font_any_letter = '(%s)?(%s)' % (
font_prefix_re_component, any_letter
)
letter_matcher_fonts = re.compile(opt_font_any_letter)
r = '(%s|[0-9,+-])' % opt_font_any_letter
sub_matcher = re.compile(r)
r = '(UU|uu|vv|%s|[0-9,+-])' % opt_font_any_letter
uusub_matcher = re.compile(r)
def autosub(s):
r"""
Autosubscripting (and superscripting).
If s is a string starting with some [A-Za-z], and having no
spaces in it, and has already failed
to match anything else (so, it was not escaped with a \, it
was not a leftword, commaword, userdef, built-in def, bsme word,
or font-letter), then we do automatic subscripting.
First we match the longest letter name we can, at the beginning
of s. Call this L, and call the remainder R. We then split R
into letter names, digits, and the characters [+-,], translate
this as needed, and put them all together into a subscript.
The exceptions are the codes 'uu' and 'vv'. If we find 'uu'
anywhere in the subscript, this switches us from subscript to
superscript. You may use 'uu' without any foregoing subscript.
The code 'vv' initiates a subscript. If used in the midst of
another subscript, you will get a double subscript, as in
$a_{i_1}$. If used in the midst of a superscript, you will get a
subscript on the superscript, as in $2^{alpha_i}$. If used as
the very first thing in an automatic subscript, it will have no
effect, but is useful in creating a subscript which would
otherwise have triggered a keyword. For example, 'pvvie' will
give $p_{i e}$ instead of $\pi$.
"""
#Make sure s starts with a letter, and has no spaces.
#If it is not so, then return the empty string.
if not re.match('[A-Za-z]', s) or s.find(' ') >= 0: return ''
#translator class
class Xlator:
def __init__(self):
self.vvcount = 0
self.state = 0
def reset(self):
self.vvcount = 0; self.state = 0
def xlat(self,t):
s = ''
if t == 'uu': s = '}^{'
elif t == 'UU':
if self.vvcount == 0:
s = t
else:
self.vvcount -= 1
s = '}}^{'
elif t == 'vv':
# If 'vv' is the very first token, do not raise vvcount.
if self.state > 0: self.vvcount += 1
s = '_{'
else: s = Node(TokenStream([t])).xlat()
if self.state == 0: self.state = 1
return s
def getvvcount(self):
return self.vvcount
#create translator instance
X = Xlator()
#Match the longest initial vertex letter.
M = letter_matcher_fonts.match(s)
L = X.xlat(M.group())
s = s[M.end():]
#Parse and translate the subscript.
X.reset()
A = uusub_matcher.findall(s)
sub = ' '.join([X.xlat(x[0]) for x in A])
sub = '_{'+sub+'}'
sub += '}'*X.getvvcount()
if sub[:3] == '_{}': sub = sub[3:] # in case started with uu
elif sub[:3] == '_{_': sub = sub[2:] # in case started with vv
return L+sub
##########
def fontword(T):
"""
Check whether T is of the form <font><letter>.
If so, return the string that should be passed to
the parsing node's buildArg method. If not, return
an empty string.
"""
#translation function
def xlat(t):
return Node(TokenStream([t])).xlat()
s = ''
if len(T) > 0 and letter_matcher.split(T)[-1] == '':
letter = letter_matcher.findall(T)[-1]
prefix = T[:-len(letter)]
if prefix in fonts:
font = fonts[prefix]
letter = xlat(letter)
s = '\\%s{%s}'%(font,letter)
return s
class TokenStream:
def __init__(self,token_list):
self.token_list = token_list
self.k = 0
self.N = len(token_list)
def next(self):
"""
Return the next token, or None if none remain.
"""
if self.k < self.N:
T = self.token_list[self.k]
self.k += 1
return T
else:
return None
def showRest(self):
"""
Return a list containing a copy of the remaining
tokens.
"""
return self.token_list[self.k:]
def getPtr(self):
"""
Get the current pointer value.
"""
return self.k
def getSlice(self,a,b):
"""
Return the tokens in the clopen interval from a to b.
Thus, you get token a, but not b.
In other words, it's just the [a:b] slice.
"""
return self.token_list[a:b]
TOKEN_RE = re.compile(
(r'\\{' +
r'|\\[^{\s]*' +
r'|[A-Za-z][A-Za-z0-9+\-,]*[A-Za-z0-9]' +
r'|#\d+' +
"|"+"|".join(html_exceptions) +
r'|\S' +
r'|\s+')
)
def tokenize(text):
return re.findall(TOKEN_RE, text)
def compress(text):
"""
Delete all whitespace characters except those followed by an upper or lowercase letter.
:param text: The text to be compressed.
:return: The compressed text.
"""
return re.sub(r'\s+(?![a-zA-Z])', '', text)
def translate_snippet(text):
"""
Translate a single "snippet" (i.e. the contents of a TeX math mode) from VerTeX into plain TeX.
:param text: The text of the snippet.
:return: The translated text.
"""
tokens = tokenize(text)
ts = TokenStream(tokens)
root = Node(ts)
out = root.xlat()
out = compress(out)
return out
|
# Importing the base class
from co_simulation_solvers.gauss_seidel_strong_coupling_solver import GaussSeidelStrongCouplingSolver
def CreateSolver(cosim_solver_settings, level):
return SimpleSteadyCouplingSolver(cosim_solver_settings, level)
class SimpleSteadyCouplingSolver(GaussSeidelStrongCouplingSolver):
def _SynchronizeInputData(self, solver, solver_name):
input_data_list = self.cosim_solver_details[solver_name]["input_data_list"]
for input_data in input_data_list:
from_solver = self.solvers[input_data["from_solver"]]
data_name = input_data["data_name"]
data_definition = from_solver.GetDataDefinition(data_name)
data_settings = { "data_format" : data_definition["data_format"],
"data_name" : data_name,
"io_settings" : input_data["io_settings"] }
solver.ImportData(data_settings, from_solver)
def _SynchronizeOutputData(self, solver, solver_name):
output_data_list = self.cosim_solver_details[solver_name]["output_data_list"]
for output_data in output_data_list:
to_solver = self.solvers[output_data["to_solver"]]
data_name = output_data["data_name"]
data_definition = to_solver.GetDataDefinition(data_name)
data_settings = { "data_format" : data_definition["data_format"],
"data_name" : data_name,
"io_settings" : output_data["io_settings"] }
solver.ExportData(data_settings, to_solver)
|
import pathlib
class _BaseSaver:
"""
Base class to save figures or data
"""
def __init__(self, *args, **kwargs):
allowed_kwargs = {
'out_dir',
'modes',
'clip',
'alpha',
'version',
'vmin',
'vmax',
'cmap',
'super_precision',
'n_precision_enhancers',
'norm_dict',
'station_positions',
'num_stations',
'num_vars',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood: ', kwarg)
self.out_dir = kwargs.get('out_dir')
self.out_dir = pathlib.Path(self.out_dir)
self.modes = kwargs.get('modes', ['train', 'val', 'test'])
# Make required directories
sub_out_dirs = [self.out_dir / mode for mode in self.modes]
for sub_out_dir in sub_out_dirs:
if not sub_out_dir.exists():
sub_out_dir.mkdir(parents=True)
def save(self, *args, **kwargs):
self._save(*args, **kwargs)
|
import torch
from torch.nn import functional as F
from torch.nn import Dropout, Sequential, Linear, Softmax
class GradientReversalFunction(torch.autograd.Function):
"""Revert gradient without any further input modification."""
@staticmethod
def forward(ctx, x, l, c):
ctx.l = l
ctx.c = c
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.clamp(-ctx.c, ctx.c)
return ctx.l * grad_output.neg(), None, None
class GradientClippingFunction(torch.autograd.Function):
"""Clip gradient without any further input modification."""
@staticmethod
def forward(ctx, x, c):
ctx.c = c
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.clamp(-ctx.c, ctx.c)
return grad_output, None
class ReversalClassifier(torch.nn.Module):
"""Adversarial classifier (with two FC layers) with a gradient reversal layer.
Arguments:
input_dim -- size of the input layer (probably should match the output size of encoder)
hidden_dim -- size of the hiden layer
output_dim -- number of channels of the output (probably should match the number of speakers/languages)
gradient_clipping_bound (float) -- maximal value of the gradient which flows from this module
Keyword arguments:
scale_factor (float, default: 1.0)-- scale multiplier of the reversed gradientts
"""
def __init__(self, input_dim, hidden_dim, output_dim, gradient_clipping_bounds, scale_factor=1.0):
super(ReversalClassifier, self).__init__()
self._lambda = scale_factor
self._clipping = gradient_clipping_bounds
self._output_dim = output_dim
self._classifier = Sequential(
Linear(input_dim, hidden_dim),
Linear(hidden_dim, output_dim)
)
def forward(self, x):
x = GradientReversalFunction.apply(x, self._lambda, self._clipping)
x = self._classifier(x)
return x
@staticmethod
def loss(input_lengths, speakers, prediction, embeddings=None):
ignore_index = -100
ml = torch.max(input_lengths)
input_mask = torch.arange(ml, device=input_lengths.device)[None, :] < input_lengths[:, None]
target = speakers.repeat(ml, 1).transpose(0,1)
target[~input_mask] = ignore_index
return F.cross_entropy(prediction.transpose(1,2), target, ignore_index=ignore_index)
class CosineSimilarityClassifier(torch.nn.Module):
"""Cosine similarity-based adversarial classifier.
Arguments:
input_dim -- size of the input layer (probably should match the output size of encoder)
output_dim -- number of channels of the output (probably should match the number of speakers/languages)
gradient_clipping_bound (float) -- maximal value of the gradient which flows from this module
"""
def __init__(self, input_dim, output_dim, gradient_clipping_bounds):
super(CosineSimilarityClassifier, self).__init__()
self._classifier = Linear(input_dim, output_dim)
self._clipping = gradient_clipping_bounds
def forward(self, x):
x = GradientClippingFunction.apply(x, self._clipping)
return self._classifier(x)
@staticmethod
def loss(input_lengths, speakers, prediction, embeddings, instance):
l = ReversalClassifier.loss(input_lengths, speakers, prediction)
w = instance._classifier.weight.T # output x input
dot = embeddings @ w
norm_e = torch.norm(embeddings, 2, 2).unsqueeze(-1)
cosine_loss = torch.div(dot, norm_e)
norm_w = torch.norm(w, 2, 0).view(1, 1, -1)
cosine_loss = torch.div(cosine_loss, norm_w)
cosine_loss = torch.abs(cosine_loss)
cosine_loss = torch.sum(cosine_loss, dim=2)
l += torch.mean(cosine_loss)
return l
|
from django.db import models
from django.utils.text import slugify
from django.shortcuts import reverse
class Actor(models.Model):
name = models.CharField(max_length=30)
photo = models.ImageField(upload_to="actor_photo/", blank=True)
def __str__(self):
return self.name
class Movie(models.Model):
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=9, decimal_places=2)
director = models.CharField(max_length=30)
actors = models.ManyToManyField(Actor)
genres = models.CharField(max_length=100)
country = models.CharField(max_length=5)
year = models.CharField(max_length=4)
imdb_score = models.DecimalField(max_digits=9, decimal_places=2)
imdb_link = models.URLField(default="")
trailer = models.URLField(default="", null=True)
description = models.TextField()
slug = models.SlugField(default="", unique=True)
is_active = models.BooleanField(default=True)
poster = models.ImageField(upload_to="movie_poster/", blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Movie, self).save(*args, **kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("movie-detail", kwargs={"slug": self.slug})
def get_add_to_cart_url(self):
return reverse("cart:add-to-cart", kwargs={"slug": self.slug})
def get_remove_from_cart_url(self):
return reverse("cart:remove-from-cart", kwargs={"slug": self.slug})
|
# Generated by Django 2.2.1 on 2019-07-27 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0010_auto_20190724_1051'),
]
operations = [
migrations.CreateModel(
name='OpenTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.IntegerField(default=0)),
('end', models.IntegerField(default=24)),
],
),
]
|
#!/usr/bin/env python
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Verify that a failed build action with -j works as expected.
"""
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
try:
import psutil # noqa: F401
except ImportError:
test.skip_test("Failed to import psutil required for test, skipping.")
test.dir_fixture('fixture')
# We want to verify that -j 2 starts precisely two jobs, the first of
# which fails and the second of which succeeds, and then stops processing
# due to the first build failure - the second build job does not just
# continue processing tasks. To try to control the timing, the two
# task scripts will be managed by a server script which regulates the
# state of the test.
#
# The failure script waits until the server responds that the
# copy script has, in fact, gotten started. If we don't wait, then SCons
# could detect our script failure early (typically if a high system load
# happens to delay SCons' ability to start the next script) and then not
# start the successful script at all.
#
# The successful script waits until the server responds that the
# failure script has finished (the server checks that the task pid does not
# exist). If we don't wait for that, then SCons could detect our successful
# exit first (typically if a high system load happens to delay the failure
# script) and start another job before it sees the failure from the first
# script.
#
# Both scripts are set to bail if they had to wait too long for what
# they expected to see.
test.run(arguments='-j 2 .',
status=2,
stderr="scons: *** [f3] Error 1\n")
test.must_not_exist(test.workpath('f3'))
test.must_match(test.workpath('f4'), 'f4.in')
test.must_not_exist(test.workpath('f5'))
test.must_not_exist(test.workpath('f6'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
"""Basic timing tools."""
from functools import wraps
import time
import numpy as np
def timed(f):
"""Wraps a function so it returns results and run time."""
@wraps(f)
def wrapper(*args, **kwargs):
start = time.monotonic()
result = f(*args, **kwargs)
end = time.monotonic()
return result, end - start
return wrapper
def time_repeat(f, repeats=3):
"""Time a function multiple times and return results and statistics.
Expects a function without arguments.
"""
results = []
times = np.zeros(repeats, dtype=float)
for i in range(repeats):
start = time.monotonic()
res = f()
results.append(res)
end = time.monotonic()
times[i] = end - start
return (
results,
{
"times": times.tolist(),
"mean": np.mean(times).item(),
"min": np.min(times).item(),
"max": np.max(times).item(),
},
)
|
#!/usr/bin/env python3
#
# usage:
# python3 -m venv ve3
# ve3/bin/pip install markdown2
# ve3/bin/python misc/release.py
import argparse
import base64
import io
import logging
import os
import re
import shlex
import subprocess
import tarfile
import time
try:
import grp
def groupname(gid):
return grp.getgrgid(gid)[0]
except:
def groupname(gid):
return str(gid)
import markdown2
logger = logging.getLogger(__name__)
#GOOS GOARCH DEB_HOST_ARCH
osArchArch = [
('linux', 'amd64', 'amd64'),
('linux', 'arm', 'armhf'),
('linux', 'arm64', 'arm64'),
('darwin', 'amd64', None),
]
channel = 'indexer'
filespec = [
# [files], source path, deb path, tar path
[
['algorand-indexer.service', 'algorand-indexer@.service'],
'misc/systemd',
'lib/systemd/system',
'',
],
[
['algorand-indexer'],
'cmd/algorand-indexer',
'usr/bin',
'',
],
[
['LICENSE'],
'',
None,
'',
],
]
debian_copyright_top = (
'''Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: Algorand Indexer
Upstream-Contact: Algorand developers <dev@algorand.com>
Source: https://github.com/algorand/indexer
Files: *
Copyright: Algorand developers <dev@algorand.com>
License: MIT
''')
def debian_copyright(outpath):
with open(outpath, 'wt') as fout:
fout.write(debian_copyright_top)
with open('LICENSE') as fin:
for line in fin:
line = line.strip()
if not line:
line = ' .\n'
else:
line = ' ' + line + '\n'
fout.write(line)
def arch_ver(outpath, inpath, debarch, version):
with open(outpath, 'wt') as fout:
with open(inpath) as fin:
for line in fin:
line = line.replace('@ARCH@', debarch)
line = line.replace('@VER@', version)
line = line.replace('@CHANNEL@', channel)
fout.write(line)
def link(sourcepath, destpath):
if os.path.exists(destpath):
if (os.path.getmtime(destpath) >= os.path.getmtime(sourcepath)):
return # nothing to do
os.remove(destpath)
os.link(sourcepath, destpath)
_tagpat = re.compile(r'tag:\s+([^,]+)')
def compile_version_opts(release_version=None, allow_mismatch=False):
result = subprocess.run(['git', 'log', '-n', '1', '--pretty=%H %D'], stdout=subprocess.PIPE)
result.check_returncode()
so = result.stdout.decode()
githash, desc = so.split(None, 1)
tags = []
tag = None
for m in _tagpat.finditer(desc):
tag = m.group(1)
tags.append(tag)
if tag == release_version:
break
if tag != release_version:
if not allow_mismatch:
raise Exception('.version is {!r} but tags {!r}'.format(release_version, tags))
else:
logger.warning('.version is %r but tags %r', release_version, tags)
now = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime()) + '+0000'
result = subprocess.run(['git', 'status', '--porcelain'], stdout=subprocess.PIPE)
result.check_returncode()
if len(result.stdout) > 2:
dirty = "true"
else:
dirty = ""
# Note: keep these in sync with Makefile
ldflags = '-ldflags=-X github.com/algorand/indexer/version.Hash={}'.format(githash)
ldflags += ' -X github.com/algorand/indexer/version.Dirty={}'.format(dirty)
ldflags += ' -X github.com/algorand/indexer/version.CompileTime={}'.format(now)
ldflags += ' -X github.com/algorand/indexer/version.GitDecorateBase64={}'.format(base64.b64encode(desc.encode()).decode())
if release_version:
ldflags += ' -X github.com/algorand/indexer/version.ReleaseVersion={}'.format(release_version)
logger.debug('Hash=%r Dirty=%r CompileTime=%r decorate=%r ReleaseVersion=%r', githash, dirty, now, desc, release_version)
logger.debug('%s', ldflags)
return ldflags
def compile(goos=None, goarch=None, ldflags=None):
env = dict(os.environ)
env['CGO_ENABLED'] = '0'
if goos is not None:
env['GOOS'] = goos
if goarch is not None:
env['GOARCH'] = goarch
cmd = ['go', 'build']
if ldflags is not None:
cmd.append(ldflags)
subprocess.run(cmd, cwd='cmd/algorand-indexer', env=env).check_returncode()
def build_deb(debarch, version, outdir):
os.makedirs('.deb_tmp/DEBIAN', exist_ok=True)
debian_copyright('.deb_tmp/DEBIAN/copyright')
arch_ver('.deb_tmp/DEBIAN/control', 'misc/debian/control', debarch, version)
os.makedirs('.deb_tmp/etc/apt/apt.conf.d', exist_ok=True)
arch_ver('.deb_tmp/etc/apt/apt.conf.d/52algorand-indexer-upgrades', 'misc/debian/52algorand-indexer-upgrades', debarch, version)
for files, source_path, deb_path, _ in filespec:
if deb_path is None:
continue
for fname in files:
if deb_path:
os.makedirs(os.path.join('.deb_tmp', deb_path), exist_ok=True)
link(os.path.join(source_path, fname), os.path.join('.deb_tmp', deb_path, fname))
debname = 'algorand-indexer_{}_{}.deb'.format(version, debarch)
debpath = os.path.join(outdir, debname)
subprocess.run(
['dpkg-deb', '--build', '.deb_tmp', debpath])
return debpath
def extract_usage():
usage = False
usageBuffer = ""
with open('README.md') as infile:
for line in infile:
if "USAGE_START_MARKER" in line:
usage = True
continue
elif "USAGE_END_MARKER" in line:
usage = False
continue
elif usage:
usageBuffer += line
return usageBuffer
_usage_html = None
def usage_html():
global _usage_html
if _usage_html is not None:
return _usage_html
md = extract_usage()
_usage_html = markdown2.markdown(md, extras=["tables", "fenced-code-blocks"])
return _usage_html
def build_tar(goos, goarch, version, outdir):
rootdir = 'algorand-indexer_{}_{}_{}'.format(goos, goarch, version)
tarname = os.path.join(outdir, rootdir) + '.tar.bz2'
tf = tarfile.open(tarname, 'w:bz2')
for files, source_path, _, tar_path in filespec:
if tar_path is None:
continue
for fname in files:
tf.add(os.path.join(source_path, fname), os.path.join(rootdir, tar_path, fname))
ti = tarfile.TarInfo(os.path.join(rootdir, "usage.html"))
ti.mtime = time.time()
ti.mode = 0o444
ti.type = tarfile.REGTYPE
ti.uid = os.getuid()
ti.uname = os.getenv('USER')
ti.gid = os.getgid()
ti.gname = groupname(os.getgid())
uhtml = usage_html().encode('utf-8')
ti.size=len(uhtml)
tf.addfile(ti, io.BytesIO(uhtml))
tf.close()
return tarname
def hostOsArch():
result = subprocess.run(['go', 'env'], stdout=subprocess.PIPE)
result.check_returncode()
goenv = {}
for line in result.stdout.decode().splitlines():
line = line.strip()
k,v = line.split('=', 1)
goenv[k] = shlex.split(v)[0]
return goenv['GOHOSTOS'], goenv['GOHOSTARCH']
def main():
start = time.time()
ap = argparse.ArgumentParser()
ap.add_argument('-o', '--outdir', help='The output directory for the build assets', type=str, default='.')
ap.add_argument('--no-deb', action='store_true', default=False, help='disable debian package building')
ap.add_argument('--host-only', action='store_true', default=False, help='only build for host OS and CPU')
ap.add_argument('--build-only', action='store_true', default=False, help="don't make tar or deb release")
ap.add_argument('--fake-release', action='store_true', default=False, help='relax some checks during release script development')
ap.add_argument('--verbose', action='store_true', default=False)
args = ap.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
outdir = args.outdir
if args.host_only:
hostos, hostarch = hostOsArch()
logger.info('will only run %s %s', hostos, hostarch)
with open('.version') as fin:
version = fin.read().strip()
ldflags = compile_version_opts(version, allow_mismatch=args.fake_release)
for goos, goarch, debarch in osArchArch:
if args.host_only and (goos != hostos or goarch != hostarch):
logger.debug('skip %s %s', goos, goarch)
continue
logger.info('GOOS=%s GOARCH=%s DEB_HOST_ARCH=%s', goos, goarch, debarch)
compile(goos, goarch, ldflags)
if args.build_only:
logger.debug('skip packaging')
continue
tarname = build_tar(goos, goarch, version, outdir)
logger.info('\t%s', tarname)
if (not args.no_deb) and (debarch is not None):
debname = build_deb(debarch, version, outdir)
logger.info('\t%s', debname)
dt = time.time() - start
logger.info('done %0.1fs', dt)
return
if __name__ == '__main__':
main()
|
import math
import numpy as np
class Utils():
@staticmethod
def normal_distr(x, avg, std):
return 1/(std*((2*math.pi)**0.5))*math.exp(-0.5*((x - avg)/std)**2)
@staticmethod
def get_freq_hist(data):
freq = np.zeros(data.shape[0])
unique = np.unique(data)
for val in unique:
val_filter = (data == val)
freq[val_filter] = np.arange(freq[val_filter].shape[0])
return freq
@staticmethod
def calc_err(x_data, y_data, r, t_min, y=0):
curve_fit = Utils.softplus_func(x_data, r, t_min, y)
return np.sum(np.abs(y_data - curve_fit))
@staticmethod
def softplus_func(t, r, t_min, y=0):
lin = r*(t - t_min)
lin[lin < 100] = np.log(np.exp(lin[lin < 100]) + np.exp(y))
return lin
|
import argparse
import sys
from termy.constants import TERMY_INTRO_MESSAGE, VERSION
from termy.service.flow_handler.handle_flows import configure_termy, search_and_execute, update_termy, \
resolve_command_from_GPT3, display_current_configs, periodic_update_prompt
DESCRIPTION = TERMY_INTRO_MESSAGE
def init_cli_app():
parser = argparse.ArgumentParser(add_help=False,
description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter,
usage=argparse.SUPPRESS)
parser.add_argument('search', help='Input query in natural language', nargs='*')
parser.add_argument("-c", "--configure", action='store_true',
help="Configure your termy with the google sheet containing commands")
parser.add_argument("--show-config", action='store_true', help="Shows the current configurations")
parser.add_argument("--gpt3", action='store_true',
help="Use GPT-3 API to convert any natural language query into a terminal command")
parser.add_argument("-u", "--update", action='store_true',
help="Update termy and sync the latest commands from the google sheet")
parser.add_argument("-v", "--version", action='store_true',
help="Version Info")
parser.add_argument("-h", "--help", action='store_true',
help="How to use Termy")
args = parser.parse_args()
if args.gpt3: # gpt3 search query
if args.search:
query = ' '.join(args.search)
else:
query = ''
resolve_command_from_GPT3(query)
elif args.search: # regular search query
query = ' '.join(args.search)
periodic_update_prompt()
search_and_execute(query)
elif args.configure:
configure_termy()
elif args.update:
update_termy()
elif args.version:
print(VERSION)
elif args.show_config:
display_current_configs()
else:
parser.print_help(sys.stdout)
if __name__ == '__main__':
init_cli_app()
|
#!/usr/bin/env python
'''CPS Helpers for the DHCP IO Library'''
# Copyright (c) 2018 Inocybe Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import cps_utils
import cps
import nas_os_utils
import nas_acl
IFNAME = "if/interfaces/interface/name"
IFOBJ = "dell-base-if-cmn/if/interfaces/interface"
PORTS = "dell-if/if/interfaces/interface/untagged-ports"
TBL_ID = 1 # we may decide to use our own table - check
UDP = 17
#E_STG = {'INGRESS': 1, 'EGRESS': 2}
#E_FTYPE = {'IN_PORTS': 7, 'IN_PORT': 9, 'L4_SRC_PORT':17, 'L4_DST_PORT':18, 'IP_PROTOCOL': 20}
#E_ATYPE = {'PACKET_ACTION': 3}
#E_PTYPE = {'DROP': 1, 'COPY_TO_CPU': 3, 'TRAP_TO_CPU': 5, 'COPY_TO_CPU_CANCEL_AND_DROP': 6}
def get_ports(bridge):
'''A simple function to return the openswitch perception of the port
list in a vlan'''
cps_obj = cps_utils.CPSObject(IFOBJ, data={IFNAME:bridge.encode('ascii')})
cps_result = []
if not cps.get([cps_obj.get()], cps_result):
return None
result = []
for value in cps_result[0]['data'][PORTS]:
result.append(cps_utils.cps_attr_types_map.from_data(PORTS, value))
return result
def _add_filter(ports, udp_port, prio, entry_name, exclude=None):
'''Add a filter for a port which drops the packet to CPU so that
the DHCP agent can work on it'''
ifaces = []
for port in ports:
if exclude is not None and ifaces != exclude:
ifaces.append(nas_os_utils.if_nametoindex(port))
entry = nas_acl.EntryCPSObj(table_id=TBL_ID, entry_id=entry_name, priority=prio)
entry.add_match_filter(filter_type="IP_PROTOCOL", filter_val=UDP)
entry.add_match_filter(filter_type="L4_DST_PORT", filter_val=udp_port)
entry.add_match_filter(filter_type="IN_PORTS", filter_val=ifaces)
entry.add_action(action_type="PACKET_ACTION", action_val="TRAP_TO_CPU")
cps_upd = ({'operation':'create', 'change':entry.data()})
return cps.transaction([cps_upd])
def add_filter(bridge, udp_port, prio, entry_name, exclude=None):
'''Add a filter for a port which drops the packet to CPU so that
the DHCP agent can work on it'''
return _add_filter(get_ports(bridge), udp_port, prio, entry_name, exclude=exclude)
def del_filter(name):
'''Delete a filter based on its unique ID returned by set_filters'''
cps_obj = cps_utils.CPSObject(module='base-acl/entry', data={'table-id': TBL_ID, 'name':name})
cps_upd = ('delete', cps_obj.get())
cps_trans = cps_utils.CPSTransaction([cps_upd])
return cps_trans.commit()
|
def main():
a = float(input())
b = float(input())
c = float(input())
pi = 3.14159
d = ((a*c)/2)
e = (pi*c*c)
f = (((a+b)*c)/2)
g = (b*b)
h = (a*b)
print("TRIANGULO: %.3f" %d)
print("CIRCULO: %.3f" %e)
print("TRAPEZIO: %.3f" %f)
print("QUADRADO: %.3f" %g)
print("RETANGULO: %.3f" %h)
main()
|
import tensorflow as tf
from stepcovnet.config.TrainingConfig import TrainingConfig
from stepcovnet.inputs.AbstractInput import AbstractInput
from stepcovnet.training.TrainingFeatureGenerator import TrainingFeatureGenerator
class TrainingInput(AbstractInput):
def __init__(self, training_config: TrainingConfig):
super(TrainingInput, self).__init__(config=training_config)
self.output_types = (
{"arrow_input": tf.dtypes.int32,
"arrow_mask": tf.dtypes.int32,
"audio_input": tf.dtypes.float64},
tf.dtypes.int8, # labels
tf.dtypes.float16 # sample weights
)
self.output_shape = (
{"arrow_input": tf.TensorShape((None,) + self.config.arrow_input_shape),
"arrow_mask": tf.TensorShape((None,) + self.config.arrow_mask_shape),
"audio_input": tf.TensorShape((None,) + self.config.audio_input_shape)},
tf.TensorShape((None,) + self.config.label_shape), # labels
tf.TensorShape((None,)) # sample weights
)
self.train_feature_generator = TrainingFeatureGenerator(dataset_path=self.config.dataset_path,
dataset_type=self.config.dataset_type,
lookback=self.config.lookback,
batch_size=self.config.hyperparameters.batch_size,
indexes=self.config.train_indexes,
num_samples=self.config.num_train_samples,
scalers=self.config.train_scalers,
difficulty=self.config.difficulty,
warmup=True,
tokenizer_name=self.config.tokenizer_name)
self.val_feature_generator = TrainingFeatureGenerator(dataset_path=self.config.dataset_path,
dataset_type=self.config.dataset_type,
lookback=self.config.lookback,
batch_size=self.config.hyperparameters.batch_size,
indexes=self.config.val_indexes,
num_samples=self.config.num_val_samples,
scalers=self.config.train_scalers,
difficulty=self.config.difficulty,
shuffle=False,
tokenizer_name=self.config.tokenizer_name)
self.all_feature_generator = TrainingFeatureGenerator(dataset_path=self.config.dataset_path,
dataset_type=self.config.dataset_type,
lookback=self.config.lookback,
batch_size=self.config.hyperparameters.batch_size,
indexes=self.config.all_indexes,
num_samples=self.config.num_samples,
scalers=self.config.all_scalers,
difficulty=self.config.difficulty,
warmup=True,
tokenizer_name=self.config.tokenizer_name)
def get_tf_dataset(self, generator):
return tf.data.Dataset.from_generator(
generator,
output_types=self.output_types,
output_shapes=self.output_shape,
).prefetch(tf.data.experimental.AUTOTUNE)
@property
def train_generator(self):
return self.get_tf_dataset(self.train_feature_generator)
@property
def val_generator(self):
return self.get_tf_dataset(self.val_feature_generator)
@property
def all_generator(self):
return self.get_tf_dataset(self.all_feature_generator)
|
"""demo_register URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import include, url
from accounts import views
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.conf import settings
admin.autodiscover()
urlpatterns = [
path('accounts/', include('accounts.urls')),
path('', include('pages.urls')),
path('admin/', admin.site.urls),
url(r'^index/', views.index, name = 'index'),
#url(r'^fullcalendar/', views.fullcalendar, name = 'fullcalendar'),
url(r'^$', views.index, name = 'index'),
url(r'^schedule/', include('schedule.urls')),
url(r'^fullcalendar/', TemplateView.as_view(template_name="fullcalendar.html"), name='fullcalendar'),
url(r'^mymeetings/', TemplateView.as_view(template_name="mymeetings.html"), name='mymeetings'),
url(r'^doctor/', TemplateView.as_view(template_name="doctor.html"), name='doctor'),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins import DetCityMixin
class DetEntertainmentCommissionSpider(DetCityMixin, CityScrapersSpider):
name = "det_entertainment_commission"
agency = "Detroit Entertainment Commission"
agency_cal_id = "1616"
agency_doc_id = ["1616", "7066", "7071"]
def _parse_title(self, response):
title = super()._parse_title(response)
if "commission" not in title.lower():
return title
return "Entertainment Commission"
def _parse_description(self, response):
return ""
def _parse_classification(self, response):
return COMMISSION
|
"""Find translation keys that are in Lokalise but no longer defined in source."""
import argparse
import json
from .const import CORE_PROJECT_ID, FRONTEND_DIR, FRONTEND_PROJECT_ID, INTEGRATIONS_DIR
from .error import ExitApp
from .lokalise import get_api
from .util import get_base_arg_parser
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
parser = get_base_arg_parser()
parser.add_argument(
"--target", type=str, default="core", choices=["core", "frontend"],
)
return parser.parse_args()
def find_extra(base, translations, path_prefix, missing_keys):
"""Find all keys that are in translations but not in base."""
for key, value in translations.items():
cur_path = f"{path_prefix}::{key}" if path_prefix else key
# Value is either a dict or a string
if isinstance(value, dict):
base_search = None if base is None else base.get(key)
find_extra(base_search, value, cur_path, missing_keys)
elif base is None or key not in base:
missing_keys.append(cur_path)
def find_core():
"""Find all missing keys in core."""
missing_keys = []
for int_dir in INTEGRATIONS_DIR.iterdir():
strings = int_dir / "strings.json"
if not strings.is_file():
continue
translations = int_dir / "translations" / "en.json"
strings_json = json.loads(strings.read_text())
translations_json = json.loads(translations.read_text())
find_extra(
strings_json, translations_json, f"component::{int_dir.name}", missing_keys
)
return missing_keys
def find_frontend():
"""Find all missing keys in frontend."""
if not FRONTEND_DIR.is_dir():
raise ExitApp(f"Unable to find frontend at {FRONTEND_DIR}")
source = FRONTEND_DIR / "src/translations/en.json"
translated = FRONTEND_DIR / "translations/en.json"
missing_keys = []
find_extra(
json.loads(source.read_text()),
json.loads(translated.read_text()),
"",
missing_keys,
)
return missing_keys
def run():
"""Clean translations."""
args = get_arguments()
if args.target == "frontend":
missing_keys = find_frontend()
lokalise = get_api(FRONTEND_PROJECT_ID)
else:
missing_keys = find_core()
lokalise = get_api(CORE_PROJECT_ID)
if not missing_keys:
print("No missing translations!")
return 0
key_data = lokalise.keys_list(
{"filter_keys": ",".join(missing_keys), "limit": 1000}
)
if len(key_data) != len(missing_keys):
print(
f"Lookin up key in Lokalise returns {len(key_data)} results, expected {len(missing_keys)}"
)
return 1
print(f"Deleting {len(missing_keys)} keys:")
for key in missing_keys:
print(" -", key)
print()
while input("Type YES to delete these keys: ") != "YES":
pass
print(lokalise.keys_delete_multiple([key["key_id"] for key in key_data]))
return 0
|
import csv
import json
import time
from datetime import datetime
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from accounts.models import UserUploadedImage
from astrometry.models import AstrometrySubmission
from astrometry.process import process_astrometry_online
from corrections import get_jd_for_analysis
from imageflow.models import ImageAnalysis, ImageAnalysisPair, ImageFilter, Reduction
from lightcurve.alcdef import AlcdefWriter
from lightcurve.models import LightCurve
from lightcurve.util import ordered_analysis_status
from reduction.util import find_point_by_id, find_star_by_designation
def edit_lightcurve(request, lightcurve_id):
lc = LightCurve.objects.get(id=lightcurve_id)
images = UserUploadedImage.objects.filter(lightcurve=lc).order_by('analysis__image_datetime')
# Always add 5 extra empty image pairs to the list.
image_pairs = list(ImageAnalysisPair.objects.filter(lightcurve=lc)) + ([None] * 5)
context = {
'lightcurve': lc,
'reduction': lc.get_or_create_reduction(),
'images': images,
'image_filters': ImageFilter.objects.all(),
'magband_filters': ImageFilter.objects.all().exclude(band='C'),
'image_pairs': image_pairs,
'ci_bands': ImageFilter.objects.get_ci_bands(),
}
return render_to_response('lightcurve.html', context,
context_instance=RequestContext(request))
def images(request, lightcurve_id):
'''List images in lightcurve.
'''
lc = LightCurve.objects.get(id=lightcurve_id)
images = UserUploadedImage.objects.filter(lightcurve=lc).order_by('analysis__image_datetime')
sort = request.GET.get('sort')
if sort == 'filename':
images = images.order_by('original_filename')
elif sort == 'timestamp':
images = images.order_by('analysis__image_datetime')
elif sort == 'status':
images = images.annotate(status_sort = ordered_analysis_status()).order_by('status_sort')
context = {
'lightcurve': lc,
'images': images,
}
return render_to_response('images.html', context,
context_instance=RequestContext(request))
def plot_lightcurve(request, lightcurve_id):
lc = LightCurve.objects.get(id=lightcurve_id)
context = {
'lightcurve': lc,
}
return render_to_response('lightcurve_plot.html', context,
context_instance=RequestContext(request))
def plot_lightcurve_json(request, lightcurve_id):
lc = LightCurve.objects.get(id=lightcurve_id)
ret = []
if request.GET.get('type') == 'instrumental':
analyses = ImageAnalysis.objects.filter(useruploadedimage__lightcurve=lc) \
.exclude(status=ImageAnalysis.ASTROMETRY_PENDING)
for analysis in analyses:
result = find_point_by_id(analysis.annotated_point_sources, analysis.target_id)
if not result:
continue
ret.append({
'analysisId': analysis.id,
'timestamp': analysis.image_datetime,
'timestampJd': get_jd_for_analysis(analysis),
'result': result,
})
else:
# type == 'standard' or type == 'error'
reductions = Reduction.objects.filter(analysis__useruploadedimage__lightcurve=lc,
analysis__status=ImageAnalysis.ADDED_TO_LIGHT_CURVE,
status=Reduction.COMPLETE)
for reduction in reductions:
result = find_point_by_id(reduction.reduced_stars, reduction.analysis.target_id)
if not result:
# Reduction not complete.
continue
ret.append({
'analysisId': reduction.analysis.id,
'reductionId': reduction.id,
'timestamp': reduction.analysis.image_datetime,
'timestampJd': get_jd_for_analysis(reduction.analysis),
# TODO(ian): Maybe one day we can bake the target id into the URL.
# That way you can compare your target light curve to any light
# curve from a known object!
'result': result,
})
return JsonResponse({
'success': True,
'results': ret,
})
@transaction.atomic
def save_observation_default(request, lightcurve_id):
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
images = lc.imageanalysis_set.all()
lat = request.POST.get('lat')
lng = request.POST.get('lng')
elevation = request.POST.get('elevation')
extinction = request.POST.get('extinction')
target_name = request.POST.get('target')
magband = request.POST.get('magband')
filter = request.POST.get('filter')
for image in images:
if lat:
image.image_latitude= float(lat)
if lng:
image.image_longitude = float(lng)
if elevation:
image.image_elevation = float(elevation)
if extinction:
reduction = lc.get_or_create_reduction()
reduction.second_order_extinction = float(extinction)
reduction.save()
if magband:
lc.magband = ImageFilter.objects.get(band=magband)
if filter:
lc.filter = ImageFilter.objects.get(band=filter)
# Unconditionally set (None if blank)
lc.target_name = target_name
lc.save()
image.save()
return JsonResponse({
'success': True,
})
@transaction.atomic
def apply_photometry_settings(request, lightcurve_id):
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
template_analysis = ImageAnalysis.objects.get(pk=request.POST.get('analysisId'))
template_settings = template_analysis.get_or_create_photometry_settings()
# Apply the settings from this analysis to every analysis.
count = 0
print 'template', template_settings
for analysis in lc.imageanalysis_set.all():
settings = analysis.get_or_create_photometry_settings()
changed = settings.sigma_psf != template_settings.sigma_psf or \
settings.crit_separation != template_settings.crit_separation or \
settings.threshold != template_settings.threshold or \
settings.box_size != template_settings.box_size or \
settings.iters != template_settings.iters
if changed or True:
settings.sigma_psf = template_settings.sigma_psf
settings.crit_separation = template_settings.crit_separation
settings.treshold = template_settings.threshold
settings.box_size = template_settings.box_size
settings.iters = template_settings.iters
settings.save()
analysis.status = ImageAnalysis.PHOTOMETRY_PENDING
analysis.save()
count += 1
return JsonResponse({
'success': True,
'numUpdated': count,
})
@transaction.atomic
def save_image_pairs(request, lightcurve_id):
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
reduction = lc.get_or_create_reduction()
color_index_manual = request.POST.get('manual_color_index')
if color_index_manual:
reduction.color_index_manual = float(color_index_manual)
else:
reduction.color_index_manual = None
reduction.save()
images = lc.imageanalysis_set.all()
ciband = request.POST.get('ciband')
pairs = json.loads(request.POST.get('pairs'))
if ciband:
lc.ciband = ciband
lc.save()
if pairs:
# Clear existing ImageAnalysisPairs
ImageAnalysisPair.objects.filter(lightcurve=lc).delete()
# Rebuild them
for pair in pairs:
# Exclude Nones
if all(pair):
analysis1 = ImageAnalysis.objects.get(pk=pair[0], user=request.user.id)
analysis2 = ImageAnalysis.objects.get(pk=pair[1], user=request.user.id)
ImageAnalysisPair.objects.create(lightcurve=lc, analysis1=analysis1, analysis2=analysis2)
return JsonResponse({
'success': True,
})
@transaction.atomic
def add_images(request, lightcurve_id):
# Add all images that are currently eligible to be in the lightcurve.
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
analyses = lc.imageanalysis_set.filter(status=ImageAnalysis.REDUCTION_COMPLETE)
for analysis in analyses:
analysis.status = ImageAnalysis.ADDED_TO_LIGHT_CURVE
analysis.save()
return JsonResponse({
'success': True,
'count': len(analyses),
})
def add_image_toggle(request, lightcurve_id):
analysis_id = request.POST.get('analysisId')
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
image = lc.imageanalysis_set.get(id=analysis_id)
if image.status == ImageAnalysis.ADDED_TO_LIGHT_CURVE:
image.status = ImageAnalysis.REDUCTION_COMPLETE
elif image.status == ImageAnalysis.REDUCTION_COMPLETE:
image.status = ImageAnalysis.ADDED_TO_LIGHT_CURVE
image.save()
return JsonResponse({
'added': image.status == ImageAnalysis.ADDED_TO_LIGHT_CURVE,
'success': True,
})
def edit_lightcurve_name(request, lightcurve_id):
name = request.POST.get('name')
lc = get_object_or_404(LightCurve, id=lightcurve_id, user=request.user.id)
lc.name = name
lc.save()
return JsonResponse({
'success': True,
})
def status(request, lightcurve_id):
lc = get_object_or_404(LightCurve, pk=lightcurve_id, user=request.user.id)
if request.method == 'POST':
val = request.POST.get('status')
if val == 'REDUCTION_PENDING':
lc.status = LightCurve.REDUCTION_PENDING
elif val == 'PHOTOMETRY_PENDING':
lc.status = LightCurve.PHOTOMETRY_PENDING
else:
return JsonResponse({
'success': False,
'message': 'Did not recognize status %s' % val
})
lc.save()
return JsonResponse({
'success': True,
})
else:
images = lc.useruploadedimage_set.all()
pairs = ImageAnalysisPair.objects.filter(lightcurve=lc)
num_processed = sum([image.submission.is_done() for image in images if image.submission])
num_photometry = sum([image.analysis.is_photometry_complete() for image in images if image.analysis])
num_target = sum([image.analysis.target_id > 0 for image in images if image.analysis])
num_reviewed = sum([image.analysis.is_reviewed() for image in images if image.analysis])
num_lightcurve = sum([image.analysis.status == ImageAnalysis.ADDED_TO_LIGHT_CURVE for image in images if image.analysis])
num_reduction_complete = sum([image.analysis.status == ImageAnalysis.REDUCTION_COMPLETE for image in images if image.analysis])
return JsonResponse({
'success': True,
'status': lc.status,
'numProcessed': num_processed,
'numPhotometry': num_photometry,
'numPairs': len(pairs),
'numComparisonStars': len(lc.comparison_stars),
'numTarget': num_target,
'numReductionComplete': num_reduction_complete + num_lightcurve,
'numReviewed': num_reviewed,
'numLightcurve': num_lightcurve,
'numImages': len(images),
})
def comparison_desigs(request, lightcurve_id):
lc = get_object_or_404(LightCurve, pk=lightcurve_id, user=request.user.id)
if request.method == 'POST':
desigs = set(json.loads(request.POST.get('desigs')))
lc.comparison_stars = [star for star in lc.common_stars if star['designation'] in desigs]
lc.save()
return JsonResponse({
'success': True,
})
return JsonResponse({
'success': True,
'desigs': [star['designation'] for star in lc.comparison_desigs],
})
@transaction.atomic
def run_image_reductions(request, lightcurve_id):
lc = get_object_or_404(LightCurve, pk=lightcurve_id, user=request.user.id)
analyses = lc.imageanalysis_set.all()
count = 0
for analysis in analyses:
if analysis.target_id and analysis.target_id > 0:
analysis.status = ImageAnalysis.REVIEW_PENDING
analysis.save()
reduction = analysis.get_or_create_reduction()
reduction.status = Reduction.PENDING
reduction.save()
count += 1
return JsonResponse({
'success': True,
'numTriggered': count,
})
def my_lightcurve(request):
lc_list = LightCurve.objects.filter(user=request.user.id)
context_list = []
for lc in lc_list:
images = UserUploadedImage.objects.filter(lightcurve=lc)
context_list.append({
'lightcurve': lc,
'images': images,
})
return render_to_response('lightcurve_list.html', {'contexts': context_list},
context_instance=RequestContext(request))
def all_lightcurve(request):
lc_list = LightCurve.objects.all()
context_list = []
for lc in lc_list:
images = UserUploadedImage.objects.filter(lightcurve=lc)
context_list.append({
'lightcurve': lc,
'images': images,
})
return render_to_response('lightcurve_list.html', {'contexts': context_list, 'request_all': True},
context_instance=RequestContext(request))
def download(request, lightcurve_id):
file_type = request.GET.get('file_type')
lc = LightCurve.objects.get(id=lightcurve_id)
analyses = ImageAnalysis.objects.filter(useruploadedimage__lightcurve=lc) \
.exclude(status=ImageAnalysis.ASTROMETRY_PENDING)
if file_type == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="LightCurve%s.csv"' % (lightcurve_id)
writer = csv.writer(response)
writer.writerow(['Datetime', 'JD', 'Mag instrumental', 'Mag standard', 'Mag std'])
for analysis in analyses:
if analysis.annotated_point_sources != []:
result = find_point_by_id(analysis.annotated_point_sources, analysis.target_id)
if not result:
continue
writer.writerow([analysis.image_datetime, get_jd_for_analysis(analysis), result.get('mag_instrumental', None), result.get('mag_standard', None), result.get('mag_std', None)])
elif file_type == 'alcdef':
myalcdef = AlcdefWriter()
myalcdef.set('CIBAND', lc.ciband)
myalcdef.set('CONTACTNAME', lc.user)
myalcdef.set('CONTACTINFO', lc.user.email)
myalcdef.set('DELIMITER', 'PIPE')
myalcdef.set('FILTER', 'C')
myalcdef.set('OBSERVERS', lc.user)
myalcdef.set('OBJECTNAME', lc.target_name)
myalcdef.set('OBJECTNUMBER', lc.target_name)
myalcdef.add_comment(lc.notes)
total_time = 0
for analysis in analyses:
total_time += time.mktime(analysis.image_datetime.timetuple())
if analysis.annotated_point_sources != []:
result = find_point_by_id(analysis.annotated_point_sources, analysis.target_id)
if not result:
continue
myalcdef.add_data(get_jd_for_analysis(analysis), result.get('mag_instrumental', None), result.get('mag_instrumental_unc', None), result.get('airmass', None))
mid_time = datetime.utcfromtimestamp(total_time / len(analyses))
myalcdef.set('SESSIONDATE', mid_time.strftime("%Y-%m-%d"))
myalcdef.set('SESSIONTIME', mid_time.strftime("%H:%M:%S"))
content = myalcdef.tostring()
if isinstance(content, set):
messages.error(request, ', '.join(content))
return HttpResponseRedirect(reverse('edit_lightcurve', args=lightcurve_id))
response = HttpResponse(content, content_type='text/plain; charset=us-ascii')
response['Content-Disposition'] = 'attachment; filename="LightCurve%s.alcdef"' % (lightcurve_id)
return response
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import os
import random
import time
import json
import uuid
import Queue
import multiprocessing
import traceback
from mock import Mock
from twisted.internet import reactor
from calvin.utilities import calvinlogger
from calvin.utilities.calvin_callback import CalvinCB, CalvinCBClass
from calvin.runtime.south.transports.calvinip import calvinip_transport
_log = calvinlogger.get_logger(__name__)
"""
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
print "hejsan"
"""
def slay(plist):
import signal
for p in plist:
if p.is_alive():
p.terminate()
p.join(timeout=.2)
if p.is_alive():
print "Warning: process %s still alive slay!!" % p._name
os.kill(p.pid, signal.SIGKILL)
time.sleep(.1)
if len(multiprocessing.active_children()) > 1:
print "Error: children is still alive", multiprocessing.active_children()
for a in multiprocessing.active_children():
a.terminate()
class BaseTHandler(multiprocessing.Process):
def __init__(self, uri, outqueue, inqueue, timeout=5):
multiprocessing.Process.__init__(self)
self._timeout = timeout
self._item = None
self._uri = uri
self._outqueue = outqueue
self._inqueue = inqueue
self._running = False
def set_ttf(self, ttf):
self._ttf = ttf
def _return(self, test=False, variables={}, stack=None):
if stack is None:
stack = traceback.format_stack(limit=15)[:-1]
else:
stack = []
self._outqueue.put([test, stack, variables])
def _stop_reactor(self, timeout=False):
if timeout:
self.__timeout()
if self._item:
# Server not stopped fail
self._return(False, {'self._item': repr(self._item)})
self._running = False
print(reactor, reactor.running)
if reactor.running:
reactor.callLater(.1, reactor.stop)
def _read_thread(self):
print("%s - Read thread started" % self._name)
while self._running:
try:
cmd = self._inqueue.get(timeout=.1)
except:
continue
func = getattr(self, cmd[0])
print("Running: %s(%s, %s)" % (func.__name__, cmd[1], cmd[2]))
reactor.callFromThread(func, *cmd[1], **cmd[2])
print("%s - Read thread died" % self._name)
def start(self):
self._running = True
self.daemon = True
multiprocessing.Process.start(self)
def __timeout(self, command=None, *args):
print("Timeout in", self)
self._return("timeout", {command: args})
def _base_run(self):
# make it work with twisted py.test plugin also
reactor._started = False
print "timeout %s", self._timeout
reactor.callLater(self._timeout, self._stop_reactor, timeout=True)
reactor.callInThread(self._read_thread)
reactor.run()
def run(self):
self._base_run()
class TransportTestServerHandler(BaseTHandler):
def __init__(self, *args, **kwargs):
self._name = "TestServerHandler"
BaseTHandler.__init__(self, *args, **kwargs)
self._tp = None
def get_callbacks(self):
return {'server_started': [CalvinCB(self._server_started)],
'server_stopped': [CalvinCB(self._server_stopped)],
'join_failed': [CalvinCB(self._join_failed)],
'peer_disconnected': [CalvinCB(self._peer_disconnected)],
'peer_connected': [CalvinCB(self._peer_connected)]}
def _data_received(self, *args):
print("server_data_received", args)
def _peer_connected(self, transport, uri):
print("server_peer_connected", transport)
transport.callback_register('join_finished', CalvinCB(self._join_finished))
transport.callback_register('data_received', CalvinCB(self._data_received))
def _join_failed(self, transport, _id, uri, is_orginator, reason):
_log.debug("Server join failed on uri %s, reason %s", uri, reason)
self._return('server_join_failed', {'transport': repr(transport), 'uri': uri, 'reason': reason})
def _join_finished(self, transport, _id, uri, is_orginator):
print("server_join_finished", transport, _id, uri)
self._return(transport._coder is not None and _id and uri, {'transport._coder': transport._coder , 'id': _id, 'uri': uri})
self._return('server_join_finished', {'transport': repr(transport), '_id': _id, 'uri': uri})
pass
def _peer_disconnected(self, *args):
print("server peer disconnected", args)
def _server_stopped(self, *args):
print("Server stopped", args)
self._item = None
self._outqueue.put(["server_stopped", repr(args)])
# Die here ?
self._stop_reactor()
def _stop_server(self):
print("_stop_server")
self._item.stop()
self._return(not self._item.is_listening())
def stop(self):
print("server_stop", self._item)
if self._item:
self._stop_server()
# Timeout
reactor.callLater(1, self._stop_reactor)
def _server_started(self, server, port):
print("Server started", server, port)
self._item = server
# put in queue
self._return(port > 0 and port < 65536, {'port': port})
self._return('server_started', port)
def _start_server(self):
self._tp = self._ttf.listen(self._uri)
def run(self):
print("start server")
reactor.callLater(0, self._start_server)
self._base_run()
print("server finished")
def _run_command(self, command, *args):
comand(args)
reactor.callLater(0, self.start_server)
class TransportTestClientHandler(BaseTHandler):
def __init__(self, *args, **kwargs):
self._name = "TestClientHandler"
self._port = None
self._stop = False
self._tp = None
BaseTHandler.__init__(self, *args, **kwargs)
def set_ttf(self, ttf):
self._ttf = ttf
def set_port(self, port):
print("set_port", port)
self._port = port
def get_callbacks(self):
return {'peer_disconnected': [CalvinCB(self._peer_disconnected)],
'peer_connection_failed': [CalvinCB(self._connection_failed)],
'join_failed': [CalvinCB(self._join_failed)],
'peer_connected': [CalvinCB(self._peer_connected)]}
def _data_received(self, data):
print("client_data_received", data)
self._return('client_data_received', {'data': data})
def _peer_connected(self, transport, uri):
print("client_peer_connected", transport)
transport.callback_register('join_finished', CalvinCB(self._join_finished))
transport.callback_register('data_received', CalvinCB(self._data_received))
self._return('client_connected', {'transport': repr(transport), 'uri': uri})
self._item = transport
def _connection_failed(self, tp_link, uri, reason):
_log.debug("Client connection failed on uri %s, reason %s", uri, reason)
self._return('client_connection_failed', {'link': repr(tp_link), 'uri': uri, 'reason': reason})
def _join_failed(self, transport, _id, uri, is_orginator, reason):
_log.debug("Client join failed on uri %s, reason %s", uri, reason)
self._return('client_join_failed', {'transport': repr(transport), 'uri': uri, 'reason': reason})
def _join_finished(self, transport, _id, uri, is_orginator):
print("client_join_finished", transport, _id, uri)
self._return(transport._coder is not None and _id and uri, {'transport._coder': transport._coder , 'id': _id, 'uri': uri})
self._return('client_join_finished', {'transport': repr(transport), '_id': _id, 'uri': uri})
def _peer_disconnected(self, transport, uri, reason):
print("client_peer_disconnected", transport, uri, reason)
#self._return(not self._item.is_connected(), variables={'is_connected': self._item.is_connected()})
self._return('client_disconnected', {'transport': repr(transport), 'reason': reason, 'uri': uri})
# If we have stop stop everything
if self._stop:
self._item = None
self._stop_reactor()
def _stop_client(self):
print("_stop_client(disconnect)")
self._stop = True
self._item.disconnect()
def stop(self):
print("client_stop", self._item)
if self._item:
self._stop_client()
# Timeout
reactor.callLater(1, self._stop_reactor)
def _join(self):
self._tp = self._ttf.join(self._uri)
def run(self):
print("start client")
self._uri = "%s:%s" % (self._uri, self._port)
reactor.callLater(0, self._join)
self._base_run()
print("client finished")
class ConnectionFailed(Exception):
pass
class ServerJoinFailed(Exception):
pass
class ClientJoinFailed(Exception):
pass
# @pytest.mark.interactive
class TestTransportServer(object):
@pytest.mark.essential
def test_start_stop(self, monkeypatch):
_mmanager = multiprocessing.Manager()
shqs = [_mmanager.Queue(), _mmanager.Queue()]
sh = TransportTestServerHandler("calvinip://localhost", shqs[0], shqs[1], timeout=2)
ttf_uuid = str(uuid.uuid4())
ttf = calvinip_transport.CalvinTransportFactory(ttf_uuid, ttf_uuid, sh.get_callbacks())
sh.set_ttf(ttf)
sh.start()
error = None
try:
while sh.is_alive():
try:
mess = shqs[0].get(timeout=.3)
# print(mess)
except:
continue
if mess[0] == 'timeout':
print(mess[1])
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_started':
pass
shqs[1].put(['stop', [], {}])
elif mess[0] == 'server_stopped':
break
else:
# print mess
if not mess[0]:
for a in mess[1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
import traceback
traceback.print_exc()
error = e
shqs[1].put(['stop', [], {}])
sh.join(timeout=.2)
slay([sh])
if error:
pytest.fail(error)
def test_callbacks(self, monkeypatch):
#self.test_start_stop(monkeypatch)
pass
def test_peer_connected(self, monkeypatch):
pass
class TestTransportClient(object):
test_nodes = 2
@pytest.mark.essential
def test_connect(self, monkeypatch):
queues = []
_mmanager = multiprocessing.Manager()
shqs = [_mmanager.Queue(), _mmanager.Queue()]
chqs = [_mmanager.Queue(), _mmanager.Queue()]
sh = TransportTestServerHandler("calvinip://127.0.0.1", shqs[0], shqs[1], timeout=2)
ch = TransportTestClientHandler("calvinip://127.0.0.1", chqs[0], chqs[1], timeout=2)
ttfs_uuid = str(uuid.uuid4())
ttfs = calvinip_transport.CalvinTransportFactory(ttfs_uuid, ttfs_uuid, sh.get_callbacks())
ttfc_uuid = str(uuid.uuid4())
ttfc = calvinip_transport.CalvinTransportFactory(ttfc_uuid, ttfc_uuid, ch.get_callbacks())
sh.set_ttf(ttfs)
ch.set_ttf(ttfc)
sh.start()
#ch.start()
queues = [shqs, chqs]
cstop = sstop = False
stop = False
error = None
try:
while not stop:
for q in queues:
try:
mess = q[0].get(timeout=.1)
#print(mess[0])
except:
continue
if mess[0] == 'timeout':
print(mess[1])
# TODO: terminate
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_stopped':
print "Hej hej"
sstop = True
stop = (sstop and cstop)
elif mess[0] == 'server_started':
ch.set_port(mess[2])
ch.start()
elif mess[0] == 'client_disconnected':
if mess[2]['reason'] != "OK":
raise Exception("Did not disconnect cleanly")
cstop = True
stop = (sstop and cstop)
elif mess[0] == 'client_join_finished':
stop = True
elif mess[0] == 'client_join_failed':
raise ClientJoinFailed(str(mess[2]))
elif mess[0] == 'server_join_failed':
raise ServerJoinFailed(str(mess[2]))
elif mess[0] == 'client_connection_failed':
raise ConnectionFailed(str(mess[1:]))
else:
# print mess
if not mess[0]:
for a in mess[1][11:-1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
for tq in queues:
print(repr(tq))
tq[1].put(['stop', [], {}])
time.sleep(.2)
slay([sh, ch])
if error:
pytest.fail(error)
def test_connect_client_join_fail(self, monkeypatch):
_mmanager = multiprocessing.Manager()
queues = []
shqs = [_mmanager.Queue(), _mmanager.Queue()]
chqs = [_mmanager.Queue(), _mmanager.Queue()]
sh = TransportTestServerHandler("calvinip://127.0.0.1", shqs[0], shqs[1])
ch = TransportTestClientHandler("calvinip://127.0.0.1", chqs[0], chqs[1])
ttfs_uuid = str(uuid.uuid4())
ttfs = calvinip_transport.CalvinTransportFactory(ttfs_uuid, ttfs_uuid, sh.get_callbacks())
ttfc_uuid = str(uuid.uuid4())
ttfc = calvinip_transport.CalvinTransportFactory(ttfc_uuid, ttfc_uuid, ch.get_callbacks())
sh.set_ttf(ttfs)
ch.set_ttf(ttfc)
monkeypatch.setattr(ttfc, "_client_validator", lambda x: False)
sh.start()
queues = [shqs, chqs]
cstop = sstop = False
stop = False
error = None
try:
while not stop:
for q in queues:
try:
mess = q[0].get(timeout=.1)
#print(mess[0])
except:
continue
if mess[0] == 'timeout':
print(mess[1])
# TODO: terminate
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_stopped':
print "Hej hej"
sstop = True
stop = (sstop and cstop)
elif mess[0] == 'server_started':
ch.set_port(mess[2])
ch.start()
elif mess[0] == 'client_disconnected':
cstop = True
stop = (sstop and cstop)
elif mess[0] == 'client_join_finished':
stop = True
elif mess[0] == 'client_join_failed':
raise ClientJoinFailed(str(mess[2]))
elif mess[0] == 'server_join_failed':
raise ServerJoinFailed(str(mess[2]))
elif mess[0] == 'client_connection_failed':
raise ConnectionFailed(str(mess[2]))
else:
# print mess
if not mess[0]:
for a in mess[1][11:-1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
for tq in queues:
print(repr(tq))
tq[1].put(['stop', [], {}])
slay([sh, ch])
if error:
with pytest.raises(ClientJoinFailed):
import traceback
traceback.print_exc(error)
raise error
else:
pytest.fail("No exception")
def test_connect_server_join_fail(self, monkeypatch):
_mmanager = multiprocessing.Manager()
queues = []
shqs = [_mmanager.Queue(), _mmanager.Queue()]
chqs = [_mmanager.Queue(), _mmanager.Queue()]
sh = TransportTestServerHandler("calvinip://127.0.0.1", shqs[0], shqs[1])
ch = TransportTestClientHandler("calvinip://127.0.0.1", chqs[0], chqs[1])
ttfs_uuid = str(uuid.uuid4())
ttfs = calvinip_transport.CalvinTransportFactory(ttfs_uuid, ttfs_uuid, sh.get_callbacks())
ttfc_uuid = str(uuid.uuid4())
ttfc = calvinip_transport.CalvinTransportFactory(ttfc_uuid, ttfc_uuid, ch.get_callbacks())
sh.set_ttf(ttfs)
ch.set_ttf(ttfc)
monkeypatch.setattr(ttfs, "_client_validator", lambda x: False)
sh.start()
queues = [shqs, chqs]
cstop = sstop = False
stop = False
error = None
try:
while not stop:
for q in queues:
try:
mess = q[0].get(timeout=.1)
#print(mess[0])
except:
continue
if mess[0] == 'timeout':
print(mess[1])
# TODO: terminate
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_stopped':
print "Hej hej"
sstop = True
stop = (sstop and cstop)
elif mess[0] == 'server_started':
ch.set_port(mess[2])
ch.start()
elif mess[0] == 'client_disconnected':
cstop = True
stop = (sstop and cstop)
elif mess[0] == 'client_join_finished':
stop = True
elif mess[0] == 'client_join_failed':
raise ClientJoinFailed(str(mess[2]))
elif mess[0] == 'server_join_failed':
raise ServerJoinFailed(str(mess[2]))
elif mess[0] == 'client_connection_failed':
raise ConnectionFailed(str(mess[2]))
else:
# print mess
if not mess[0]:
for a in mess[1][11:-1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
for tq in queues:
print(repr(tq))
tq[1].put(['stop', [], {}])
slay([sh, ch])
if error:
with pytest.raises(ServerJoinFailed):
import traceback
traceback.print_exc(error)
raise error
else:
pytest.fail("No exception")
def test_connect_fail(self, monkeypatch):
_mmanager = multiprocessing.Manager()
queues = []
shqs = [_mmanager.Queue(), _mmanager.Queue()]
chqs = [_mmanager.Queue(), _mmanager.Queue()]
sh = TransportTestServerHandler("calvinip://127.0.0.1", shqs[0], shqs[1])
ch = TransportTestClientHandler("calvinip://127.0.0.1", chqs[0], chqs[1])
ttfs_uuid = str(uuid.uuid4())
ttfs = calvinip_transport.CalvinTransportFactory(ttfs_uuid, ttfs_uuid, sh.get_callbacks())
ttfc_uuid = str(uuid.uuid4())
ttfc = calvinip_transport.CalvinTransportFactory(ttfc_uuid, ttfc_uuid, ch.get_callbacks())
sh.set_ttf(ttfs)
ch.set_ttf(ttfc)
sh.start()
#ch.start()
queues = [shqs, chqs]
cstop = sstop = False
stop = False
error = None
try:
while not stop:
for q in queues:
try:
mess = q[0].get(timeout=.1)
#print(mess[0])
except:
continue
if mess[0] == 'timeout':
print(mess[1])
# TODO: terminate
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_stopped':
print "Hej hej"
sstop = True
stop = (sstop and cstop)
elif mess[0] == 'server_started':
ch.set_port(str(int(mess[2])+1))
ch.start()
elif mess[0] == 'client_disconnected':
cstop = True
stop = (sstop and cstop)
elif mess[0] == 'client_join_finished':
stop = True
elif mess[0] == 'client_join_failed':
raise ClientJoinFailed(str(mess[2]))
elif mess[0] == 'server_join_failed':
raise ServerJoinFailed(str(mess[2]))
elif mess[0] == 'client_connection_failed':
raise ConnectionFailed(str(mess[2]))
else:
# print mess
if not mess[0]:
for a in mess[1][11:-1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
for tq in queues:
print "hej", repr(tq)
tq[1].put(['stop', [], {}])
print sh, ch
slay([sh, ch])
if error:
with pytest.raises(ConnectionFailed):
import traceback
traceback.print_exc(error)
raise error
else:
pytest.fail("No exception")
def test_data(self, monkeypatch):
pass
def test_callback(self, monkeypatch):
pass
|
"""Step placement functions."""
from genologics.entities import Process, StepActions
def finish_protocol_complete(lims, process_id):
"""Finish next step after current step is finished (Dx Mark protocol complete)."""
process = Process(lims, id=process_id)
inputs = ''
actions = []
# Check all analytes
for analyte in process.analytes()[0]:
analyte_workflow_stages_and_statuses = analyte.workflow_stages_and_statuses
if analyte_workflow_stages_and_statuses[-1][2] == 'Dx Mark protocol complete' and analyte_workflow_stages_and_statuses[-1][1] == 'QUEUED':
actions.append({'action': 'complete', 'artifact': analyte})
step_uri = analyte_workflow_stages_and_statuses[-1][0].step.uri
inputs += '<input uri="{0}" replicates="1"/>'.format(analyte.uri)
# Generate start step XML
xml = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<tmp:step-creation xmlns:tmp="http://genologics.com/ri/step">
<configuration uri="{0}"/>
<inputs>
{1}
</inputs>
</tmp:step-creation>
'''.format(step_uri, inputs)
# Start step
output = lims.post(
uri=lims.get_uri('steps'),
data=xml
)
# Get started step uri
step_action_uri = output.find('actions').get('uri')
step_actions = StepActions(lims, uri=step_action_uri)
# Advance to next step screen
step = step_actions.step
step.advance() # Next step
# Mark everything complete
step_actions.set_next_actions(actions)
step_actions.put()
# Finish step
step.advance()
|
# -*- coding: utf-8 -*-
"""
Test 'pymonzo.monzo_api' file
"""
from __future__ import unicode_literals
import codecs
import json
import os
import tempfile
import pytest
from six.moves.urllib.parse import urljoin
from pymonzo import MonzoAPI
from pymonzo import config
from pymonzo.api_objects import MonzoAccount, MonzoBalance, MonzoPot, MonzoTransaction
class TestMonzoAPI:
"""
Test `monzo_api.MonzoAPI` class.
"""
@pytest.fixture(scope='session')
def monzo(self):
"""Helper fixture that returns a `MonzoAPI` instance"""
return MonzoAPI(access_token='explicit_access_token')
@pytest.fixture
def mocked_monzo(self, mocker):
"""Helper fixture that returns a mocked `MonzoAPI` instance"""
mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocker.patch('pymonzo.monzo_api.MonzoAPI._save_token_on_disk')
client_id = 'explicit_client_id'
client_secret = 'explicit_client_secret'
auth_code = 'explicit_auth_code'
monzo = MonzoAPI(
client_id=client_id,
client_secret=client_secret,
auth_code=auth_code,
)
return monzo
def test_class_initialization(self, monkeypatch, mocker):
"""
Test class `__init__` method.
Quite long and complicated because of the number of possible
scenarios. Possibly to revisit in the future.
"""
access_token = 'explicit_access_token'
client_id = 'explicit_client_id'
client_secret = 'explicit_client_secret'
auth_code = 'explicit_auth_code'
monkeypatch.setenv(config.MONZO_ACCESS_TOKEN_ENV, 'env_access_token')
monkeypatch.setenv(config.MONZO_CLIENT_ID_ENV, 'env_client_id')
monkeypatch.setenv(config.MONZO_CLIENT_SECRET_ENV, 'env_client_secret')
monkeypatch.setenv(config.MONZO_AUTH_CODE_ENV, 'env_auth_code')
# When we provide all variables both explicitly and via environment
# variables, the explicit 'access token' should take precedence
mocker.patch('os.path.isfile', return_value=True)
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
expected_token = {
'access_token': 'explicit_access_token',
'token_type': 'Bearer',
}
monzo = MonzoAPI(
access_token=access_token, client_id=client_id,
client_secret=client_secret, auth_code=auth_code,
)
assert monzo._access_token == 'explicit_access_token'
assert monzo._client_id is None
assert monzo._client_secret is None
assert monzo._auth_code is None
assert monzo._token == expected_token
mocked_oauth2_session.assert_called_once_with(
client_id=None,
token=expected_token,
)
# Don't pass 'access_token' explicitly
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocked_get_oauth_token = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_oauth_token'
)
mocked_save_token_on_disk = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._save_token_on_disk'
)
expected_token = mocked_get_oauth_token.return_value
monzo = MonzoAPI(
client_id=client_id,
client_secret=client_secret,
auth_code=auth_code,
)
assert monzo._access_token is None
assert monzo._client_id == 'explicit_client_id'
assert monzo._client_secret == 'explicit_client_secret'
assert monzo._auth_code == 'explicit_auth_code'
assert monzo._token == expected_token
mocked_get_oauth_token.assert_called_once_with()
mocked_save_token_on_disk.assert_called_once_with()
mocked_oauth2_session.assert_called_once_with(
client_id='explicit_client_id',
token=expected_token,
)
# Don't pass anything explicitly and the token file exists
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocker.patch('os.path.isfile', return_value=True)
mocked_open = mocker.patch('codecs.open', mocker.mock_open())
mocked_json_load = mocker.patch('json.load')
expected_token = mocked_json_load.return_value
monzo = MonzoAPI()
assert monzo._access_token is None
assert monzo._client_id is expected_token['client_id']
assert monzo._client_secret is expected_token['client_secret']
assert monzo._auth_code is None
assert monzo._token == expected_token
mocked_open.assert_called_once_with(
config.TOKEN_FILE_PATH, 'r', 'utf-8',
)
mocked_json_load.assert_called_once_with(mocked_open.return_value)
mocked_get_oauth_token.assert_called_once_with()
mocked_oauth2_session.assert_called_once_with(
client_id=expected_token['client_id'],
token=expected_token,
)
# Don't pass anything explicitly, the token file doesn't exist
# and 'access_token' environment variable exists
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocker.patch('os.path.isfile', return_value=False)
expected_token = {
'access_token': 'env_access_token',
'token_type': 'Bearer',
}
monzo = MonzoAPI()
assert monzo._access_token == 'env_access_token'
assert monzo._client_id is None
assert monzo._client_secret is None
assert monzo._auth_code is None
assert monzo._token == expected_token
mocked_oauth2_session.assert_called_once_with(
client_id=None,
token=expected_token,
)
# Don't pass anything explicitly, the token file doesn't exist
# and 'access_token' environment variable doesn't exist
monkeypatch.delenv(config.MONZO_ACCESS_TOKEN_ENV)
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocked_get_oauth_token = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_oauth_token'
)
mocked_save_token_on_disk = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._save_token_on_disk'
)
expected_token = mocked_get_oauth_token.return_value
monzo = MonzoAPI()
assert monzo._access_token is None
assert monzo._client_id == 'env_client_id'
assert monzo._client_secret == 'env_client_secret'
assert monzo._auth_code == 'env_auth_code'
assert monzo._token == expected_token
mocked_get_oauth_token.assert_called_once_with()
mocked_save_token_on_disk.assert_called_once_with()
mocked_oauth2_session.assert_called_once_with(
client_id='env_client_id',
token=expected_token,
)
# None of the above
monkeypatch.delenv(config.MONZO_CLIENT_ID_ENV)
with pytest.raises(ValueError):
MonzoAPI(
auth_code=auth_code, client_id=client_id,
)
def test_class_save_token_on_disk_method(self, monzo):
"""Test class `_save_token_on_disk` method"""
config.TOKEN_FILE_PATH = os.path.join(
tempfile.gettempdir(), 'pymonzo_test',
)
monzo._token = {
'foo': u'UNICODE',
'bar': 1,
'baz': False,
}
expected_token = monzo._token.copy()
expected_token.update(client_secret=monzo._client_secret)
monzo._save_token_on_disk()
with codecs.open(config.TOKEN_FILE_PATH, 'r', 'utf-8') as f:
assert json.load(f) == expected_token
def test_class_get_oauth_token_method(self, mocker, mocked_monzo):
"""Test class `_get_oauth_token` method"""
mocked_fetch_token = mocker.MagicMock()
mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocked_oauth2_session.return_value.fetch_token = mocked_fetch_token
token = mocked_monzo._get_oauth_token()
assert token == mocked_fetch_token.return_value
mocked_oauth2_session.assert_called_once_with(
client_id=mocked_monzo._client_id,
redirect_uri=config.REDIRECT_URI,
)
mocked_fetch_token.assert_called_once_with(
token_url=urljoin(mocked_monzo.api_url, '/oauth2/token'),
code=mocked_monzo._auth_code,
client_secret=mocked_monzo._client_secret,
)
def test_class_refresh_oath_token_method(self, mocker, mocked_monzo):
"""Test class `_refresh_oath_token` method"""
mocked_requests_post_json = mocker.MagicMock()
mocked_requests_post = mocker.patch('pymonzo.monzo_api.requests.post')
mocked_requests_post.return_value.json = mocked_requests_post_json
mocked_save_token_on_disk = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._save_token_on_disk'
)
expected_data = {
'grant_type': 'refresh_token',
'client_id': mocked_monzo._client_id,
'client_secret': mocked_monzo._client_secret,
'refresh_token': mocked_monzo._token['refresh_token'],
}
mocked_monzo._refresh_oath_token()
assert mocked_monzo._token == mocked_requests_post_json.return_value
mocked_requests_post.assert_called_once_with(
urljoin(mocked_monzo.api_url, '/oauth2/token'),
data=expected_data,
)
mocked_requests_post_json.assert_called_once_with()
mocked_save_token_on_disk.assert_called_once_with()
def test_class_whoami_method(self, mocker, mocked_monzo):
"""Test class `whoami` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
result = mocked_monzo.whoami()
mocked_get_response.assert_called_once_with(
method='get', endpoint='/ping/whoami',
)
expected_result = mocked_get_response.return_value.json.return_value
assert result == expected_result
def test_class_accounts_method(self, mocker, mocked_monzo, accounts_api_response):
"""Test class `accounts` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = accounts_api_response
assert mocked_monzo._cached_accounts is None
result = mocked_monzo.accounts()
mocked_get_response.assert_called_once_with(
method='get', endpoint='/accounts',
)
accounts_json = accounts_api_response['accounts']
expected_result = [
MonzoAccount(data=account) for account in accounts_json
]
assert result == expected_result
assert mocked_monzo._cached_accounts == expected_result
# Calling it again should fetch '_cached_accounts'
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = accounts_api_response
result = mocked_monzo.accounts()
assert mocked_get_response.call_count == 0
assert result == mocked_monzo._cached_accounts
# But calling it with 'refresh=True' should do an API request
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = accounts_api_response
assert mocked_monzo._cached_accounts is not None
result = mocked_monzo.accounts(refresh=True)
mocked_get_response.assert_called_once_with(
method='get', endpoint='/accounts',
)
accounts_json = accounts_api_response['accounts']
expected_result = [
MonzoAccount(data=account) for account in accounts_json
]
assert result == expected_result
assert mocked_monzo._cached_accounts == expected_result
def test_class_balance_method(self, mocker, mocked_monzo,
balance_api_response, accounts_api_response):
"""Test class `balance` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = balance_api_response
accounts_json = accounts_api_response['accounts']
mocked_monzo._cached_accounts = [
MonzoAccount(data=account) for account in accounts_json
]
result = mocked_monzo.balance()
mocked_get_response.assert_called_once_with(
method='get',
endpoint='/balance',
params={
'account_id': mocked_monzo._cached_accounts[0].id,
},
)
expected_result = MonzoBalance(balance_api_response)
assert result == expected_result
# It should raise an 'ValueError' if there more (or less) then 1 account
mocked_monzo._cached_accounts = mocked_monzo._cached_accounts * 2
with pytest.raises(ValueError):
mocked_monzo.balance()
def test_class_pots_method(self, mocker, mocked_monzo, pots_api_response):
"""Test class `pots` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = pots_api_response
assert mocked_monzo._cached_pots is None
result = mocked_monzo.pots()
mocked_get_response.assert_called_once_with(
method='get', endpoint='/pots/listV1',
)
pots_json = pots_api_response['pots']
expected_result = [MonzoPot(data=pot) for pot in pots_json]
assert result == expected_result
assert mocked_monzo._cached_pots == expected_result
# Calling it again should fetch '_cached_pots'
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = pots_api_response
result = mocked_monzo.pots()
assert mocked_get_response.call_count == 0
assert result == mocked_monzo._cached_pots
# But calling it with 'refresh=True' should do an API request
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = pots_api_response
assert mocked_monzo._cached_pots is not None
result = mocked_monzo.pots(refresh=True)
mocked_get_response.assert_called_once_with(
method='get', endpoint='/pots/listV1',
)
pots_json = pots_api_response['pots']
expected_result = [MonzoPot(data=pot) for pot in pots_json]
assert result == expected_result
assert mocked_monzo._cached_pots == expected_result
def test_class_transactions_method(self, mocker, mocked_monzo,
transactions_api_response, accounts_api_response):
"""Test class `transactions` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = transactions_api_response
accounts_json = accounts_api_response['accounts']
mocked_monzo._cached_accounts = [
MonzoAccount(data=account) for account in accounts_json
]
result = mocked_monzo.transactions()
mocked_get_response.assert_called_once_with(
method='get',
endpoint='/transactions',
params={
'account_id': mocked_monzo._cached_accounts[0].id,
},
)
transactions_json = transactions_api_response['transactions']
expected_result = [
MonzoTransaction(data=transaction) for transaction in transactions_json
]
assert result == expected_result
# It should raise an 'ValueError' if there more (or less) then 1 account
mocked_monzo._cached_accounts = mocked_monzo._cached_accounts * 2
with pytest.raises(ValueError):
mocked_monzo.transactions()
def test_class_transaction_method(self, mocker, mocked_monzo, transaction_api_response):
"""Test class `transaction` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = transaction_api_response
result = mocked_monzo.transaction('foobar')
mocked_get_response.assert_called_once_with(
method='get',
endpoint='/transactions/foobar',
params={},
)
expected_result = MonzoTransaction(transaction_api_response['transaction'])
assert result == expected_result
# With expanded merchant info
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = transaction_api_response
result = mocked_monzo.transaction('foobar', expand_merchant=True)
mocked_get_response.assert_called_once_with(
method='get',
endpoint='/transactions/foobar',
params={
'expand[]': 'merchant',
},
)
expected_result = MonzoTransaction(transaction_api_response['transaction'])
assert result == expected_result
|
import vtk
def getSliderObjects(sliderRep, sliderWidget, titleText, iren, initVal, minVal, maxVal, posXLeft, posY, callBackFunc):
sliderRep.SetMinimumValue(minVal)
sliderRep.SetMaximumValue(maxVal)
sliderRep.SetValue(initVal)
sliderRep.SetTitleText(titleText)
sliderRep.GetPoint1Coordinate().SetCoordinateSystemToDisplay()
sliderRep.GetPoint1Coordinate().SetValue(posXLeft, posY, 0)
sliderRep.GetPoint2Coordinate().SetCoordinateSystemToDisplay()
sliderRep.GetPoint2Coordinate().SetValue(posXLeft+300, posY, 0)
sliderRep.SetSliderLength(0.025)
sliderRep.SetSliderWidth(0.025)
sliderRep.SetEndCapLength(0.0125)
sliderWidget.SetInteractor(iren)
sliderWidget.SetRepresentation(sliderRep)
sliderWidget.KeyPressActivationOff()
sliderWidget.SetAnimationModeToAnimate()
sliderWidget.SetEnabled(True)
sliderWidget.AddObserver("InteractionEvent", callBackFunc)
|
from typing import IO
import javaproperties
from .._FileWriter import FileWriter
from ._Report import Report
from . import constants
class ReportFileWriter(FileWriter[str, Report, "ReportFileWriter"]):
"""
Writer for report files.
"""
def _dump(self, obj: Report, file: IO[str]):
# Create a Java properties dictionary
properties = {constants.PROPERTY_PARENTID: str(obj.database_id)}
# Add the fields and their types as properties
for field in obj:
name = str(field)
properties[name] = str(obj.get_value(field))
properties[name + constants.DATATYPE_SUFFIX] = field.datatype.to_display()
print(javaproperties.to_comment(javaproperties.java_timestamp()), file=file)
print(javaproperties.to_comment("Simple report format (= Java properties file format)"), file=file)
javaproperties.dump(properties, file, timestamp=False, sort_keys=True)
|
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
import time
from matplotlib import pyplot as plt
from gd import Discriminator, Generator
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
batch_size = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
path_to_zip = keras.utils.get_file('facades.tar.gz',
cache_subdir=os.path.abspath('.'),
origin='https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facades.tar.gz',
extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'facades/')
print('dataset path:', PATH)
def load_image(image_file, is_train):
"""
load and preprocess images
:param image_file:
:param is_train:
:return:
"""
image = tf.io.read_file(image_file)
image = tf.image.decode_jpeg(image)
w = image.shape[1]
w = w // 2
real_image = image[:, :w, :]
input_image = image[:, w:, :]
input_image = tf.cast(input_image, tf.float32)
real_image = tf.cast(real_image, tf.float32)
if is_train:
# random jittering
# resizing to 286 x 286 x 3
input_image = tf.image.resize(input_image, [286, 286])
real_image = tf.image.resize(real_image, [286, 286])
# randomly cropping to 256 x 256 x 3
stacked_image = tf.stack([input_image, real_image], axis=0)
cropped_image = tf.image.random_crop(stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3])
input_image, real_image = cropped_image[0], cropped_image[1]
if np.random.random() > 0.5:
# random mirroring
input_image = tf.image.flip_left_right(input_image)
real_image = tf.image.flip_left_right(real_image)
else:
input_image = tf.image.resize(input_image, size=[IMG_HEIGHT, IMG_WIDTH])
real_image = tf.image.resize(real_image, size=[IMG_HEIGHT, IMG_WIDTH])
# normalizing the images to [-1, 1]
input_image = (input_image / 127.5) - 1
real_image = (real_image / 127.5) - 1
# [256, 256, 3], [256, 256, 3]
# print(input_image.shape, real_image.shape)
# => [256, 256, 6]
out = tf.concat([input_image, real_image], axis=2)
return out
train_dataset = tf.data.Dataset.list_files(PATH+'/train/*.jpg')
# The following snippet can not work, so load it hand by hand.
# train_dataset = train_dataset.map(lambda x: load_image(x, True)).batch(1)
train_iter = iter(train_dataset)
train_data = []
for x in train_iter:
train_data.append(load_image(x, True))
train_data = tf.stack(train_data, axis=0)
# [800, 256, 256, 3]
print('train:', train_data.shape)
train_dataset = tf.data.Dataset.from_tensor_slices(train_data)
train_dataset = train_dataset.shuffle(400).batch(1)
test_dataset = tf.data.Dataset.list_files(PATH+'test/*.jpg')
# test_dataset = test_dataset.map(lambda x: load_image(x, False)).batch(1)
test_iter = iter(test_dataset)
test_data = []
for x in test_iter:
test_data.append(load_image(x, False))
test_data = tf.stack(test_data, axis=0)
# [800, 256, 256, 3]
print('test:', test_data.shape)
test_dataset = tf.data.Dataset.from_tensor_slices(test_data)
test_dataset = test_dataset.shuffle(400).batch(1)
generator = Generator()
generator.build(input_shape=(batch_size, 256, 256, 3))
generator.summary()
discriminator = Discriminator()
discriminator.build(input_shape=[(batch_size, 256, 256, 3), (batch_size, 256, 256, 3)])
discriminator.summary()
g_optimizer = keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)
d_optimizer = keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)
def discriminator_loss(disc_real_output, disc_generated_output):
# [1, 30, 30, 1] with [1, 30, 30, 1]
# print(disc_real_output.shape, disc_generated_output.shape)
real_loss = keras.losses.binary_crossentropy(
tf.ones_like(disc_real_output), disc_real_output, from_logits=True)
generated_loss = keras.losses.binary_crossentropy(
tf.zeros_like(disc_generated_output), disc_generated_output, from_logits=True)
real_loss = tf.reduce_mean(real_loss)
generated_loss = tf.reduce_mean(generated_loss)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
def generator_loss(disc_generated_output, gen_output, target):
LAMBDA = 100
gan_loss = keras.losses.binary_crossentropy(
tf.ones_like(disc_generated_output), disc_generated_output, from_logits=True)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
gan_loss = tf.reduce_mean(gan_loss)
total_gen_loss = gan_loss + (LAMBDA * l1_loss)
return total_gen_loss
def generate_images(model, test_input, tar, epoch):
# the training=True is intentional here since
# we want the batch statistics while running the model
# on the test dataset. If we use training=False, we will get
# the accumulated statistics learned from the training dataset
# (which we don't want)
prediction = model(test_input, training=True)
plt.figure(figsize=(15,15))
display_list = [test_input[0], tar[0], prediction[0]]
title = ['Input Image', 'Ground Truth', 'Predicted Image']
for i in range(3):
plt.subplot(1, 3, i+1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(display_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.savefig('images/epoch%d.png'%epoch)
print('saved images.')
# plt.show()
def main():
epochs = 1000
for epoch in range(epochs):
start = time.time()
for step, inputs in enumerate(train_dataset):
input_image, target = tf.split(inputs, num_or_size_splits=[3, 3], axis=3)
# print(input_image.shape, target.shape)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# get generated pixel2pixel image
gen_output = generator(input_image, training=True)
# fed real pixel2pixel image together with original image
disc_real_output = discriminator([input_image, target], training=True)
# fed generated/fake pixel2pixel image together with original image
disc_generated_output = discriminator([input_image, gen_output], training=True)
gen_loss = generator_loss(disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
g_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables))
discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
d_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
if step% 100 == 0:
# print(disc_loss.shape, gen_loss.shape)
print(epoch, step, float(disc_loss), float(gen_loss))
if epoch % 1 == 0:
for inputs in test_dataset:
input_image, target = tf.split(inputs, num_or_size_splits=[3, 3], axis=3)
generate_images(generator, input_image, target, epoch)
break
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, time.time() - start))
for inputs in test_dataset:
input_image, target = tf.split(inputs, num_or_size_splits=[3, 3], axis=3)
generate_images(generator, input_image, target, 99999)
break
if __name__ == '__main__':
main()
|
from models.Playerlist import *
import time
def main():
with open("players.txt", 'r') as f:
players = f.readlines()
PlayerList = Playerlist()
#users that were not found in riot database
errorUsers = list()
j = 0
for player in players:
#make sure we dont exceed request limit from temp api
if(j<5):
j += 1
else:
j = 0
time.sleep(15)
# try and catch any errors requesting info from RIOT API
try:
PlayerList.addPlayer(player.strip('\n'))
except Exception as e:
errorUsers.append(e)
print(str(e))
if len(errorUsers) > 0:
print("Players not found in Riot Database for North America:")
with open("ErrorUsers.txt",'w') as errUsr:
for p in errorUsers:
print(p)
errUsr.write(str(p) + '\n')
#Create the Teams
PlayerList.createTeams()
print("Teams:")
print("-----------------------")
#print out all the teams
PlayerList.printTeams()
#convert output to csv
PlayerList.toCsv()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from neodroid.utilities.unity_specifications import Motion, Reaction, ReactionParameters
__author__ = "Christian Heider Nielsen"
import neodroid.wrappers.formal_wrapper as neo
def construct_reactions(env):
parameters = ReactionParameters(
terminable=True,
step=True,
reset=False,
configure=False,
describe=False,
episode_count=True,
)
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reactions = [
Reaction(
environment_name=f"EnvironmentPrototypingEnvironment",
parameters=parameters,
motions=motions,
)
]
for i in range(19):
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reaction = Reaction(
environment_name=f"Environment(Clone){i}PrototypingEnvironment",
parameters=parameters,
motions=motions,
)
reactions.append(reaction)
return reactions
def main():
_environments = neo.NeodroidEnvironment(name="multienv", connect_to_running=True)
while _environments.is_connected:
reactions = construct_reactions(_environments)
states = _environments.react(reactions)
if __name__ == "__main__":
main()
|
import time
import numpy as np
from extra_foam.algorithms import (
nanmean, nansum
)
def benchmark_nan_without_axis(f_cpp, f_py, shape, dtype):
data = np.random.randn(*shape).astype(dtype) + 1. # shift to avoid very small mean
data[:, :3, ::3] = np.nan
t0 = time.perf_counter()
ret_cpp = f_cpp(data)
dt_cpp = time.perf_counter() - t0
t0 = time.perf_counter()
ret_py = f_py(data)
dt_py = time.perf_counter() - t0
np.testing.assert_allclose(ret_cpp, ret_py, rtol=1e-4)
print(f"\nwithout axis, dtype = {dtype} - \n"
f"dt (cpp): {dt_cpp:.4f}, "
f"dt (numpy): {dt_py:.4f}")
def benchmark_nan_keep_zero_axis(f_cpp, f_py, shape, dtype):
data = np.random.randn(*shape).astype(dtype=dtype) + 1. # shift to avoid very small mean
data[:, :3, ::3] = np.nan
t0 = time.perf_counter()
ret_cpp = f_cpp(data, axis=(-2, -1))
dt_cpp = time.perf_counter() - t0
t0 = time.perf_counter()
ret_py = f_py(data, axis=(-2, -1))
dt_py = time.perf_counter() - t0
np.testing.assert_allclose(ret_cpp, ret_py, rtol=1e-4)
print(f"\nkeep zero axis, dtype = {dtype} - \n"
f"dt (cpp): {dt_cpp:.4f}, "
f"dt (numpy): {dt_py:.4f}")
if __name__ == "__main__":
print("*" * 80)
print("Benchmark statistics functions")
print("*" * 80)
s = (16, 1096, 1120)
for f_cpp, f_py in [(nansum, np.nansum), (nanmean, np.nanmean)]:
print(f"\n----- {f_cpp.__name__} ------")
benchmark_nan_without_axis(f_cpp, f_py, s, np.float32)
benchmark_nan_without_axis(f_cpp, f_py, s, np.float64)
benchmark_nan_keep_zero_axis(f_cpp, f_py, s, np.float32)
benchmark_nan_keep_zero_axis(f_cpp, f_py, s, np.float64)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2020, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
"""
import importlib.machinery as imm # Python 3 is required
import sys
import os
import dpctl
import numpy
from setuptools import setup, Extension
from Cython.Build import cythonize
from Cython.Compiler import Options as cython_options
from utils.command_style import source_style
from utils.command_clean import source_clean
from utils.command_build_clib import custom_build_clib, dpnp_backend_c_description, _project_backend_dir, _sdl_cflags, _project_extra_link_args, IS_WIN
from utils.command_build_cmake_clib import custom_build_cmake_clib
"""
Python version check
"""
if sys.version_info[:2] < (3, 6):
raise RuntimeError("DPNP: Python version >= 3.6 required.")
"""
Get the project version
"""
thefile_path = os.path.abspath(os.path.dirname(__file__))
version_mod = imm.SourceFileLoader('version', os.path.join(thefile_path, 'dpnp', 'version.py')).load_module()
__version__ = version_mod.__version__
"""
Set project auxilary data like readme and licence files
"""
with open('README.md') as f:
__readme_file__ = f.read()
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
"""
Extra defined commands for the build system
>$ python ./setup.py --help-commands
>$ python ./setup.py style
>$ python ./setup.py style -a
>$ python ./setup.py clean
TODO: spell check, valgrind, code coverage
"""
# TODO: refactor/fix
# on Win we need a specific build_clib definition to prevent using cmake during build_ext execution
if IS_WIN:
dpnp_build_commands = {'style': source_style,
'build_clib_setuptools': custom_build_clib,
'build_clib': custom_build_clib,
'clean': source_clean
}
else:
dpnp_build_commands = {'style': source_style,
'build_clib_setuptools': custom_build_clib,
'build_clib': custom_build_cmake_clib,
'clean': source_clean
}
if IS_WIN:
'''
This variable controls setuptools execution on windows
to avoid automatically search and confirm workability of the compiler
If not set, error "Microsoft Visual C++ 14.0 or greater is required." appiars
'''
os.environ["DISTUTILS_USE_SDK"] = "1"
"""
The project modules description
"""
kwargs_common = {
"include_dirs": [numpy.get_include(), dpctl.get_include()] + _project_backend_dir,
"library_dirs": [os.path.dirname(dpctl.get_include()),],
"libraries": ["DPCTLSyclInterface"],
"extra_compile_args": _sdl_cflags,
"extra_link_args": _project_extra_link_args,
"define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
"language": "c++"
}
dpnp_algo = Extension(
name="dpnp.dpnp_algo.dpnp_algo",
sources=[os.path.join("dpnp", "dpnp_algo", "dpnp_algo.pyx")],
**kwargs_common)
dpnp_dparray = Extension(
name="dpnp.dparray",
sources=[os.path.join("dpnp", "dparray.pyx")],
**kwargs_common)
dpnp_random = Extension(
name="dpnp.random.dpnp_algo_random",
sources=[os.path.join("dpnp", "random", "dpnp_algo_random.pyx")],
**kwargs_common)
dpnp_linalg = Extension(
name="dpnp.linalg.dpnp_algo_linalg",
sources=[os.path.join("dpnp", "linalg", "dpnp_algo_linalg.pyx")],
**kwargs_common)
dpnp_fft = Extension(
name="dpnp.fft.dpnp_algo_fft",
sources=[os.path.join("dpnp", "fft", "dpnp_algo_fft.pyx")],
**kwargs_common)
dpnp_utils = Extension(
name="dpnp.dpnp_utils.dpnp_algo_utils",
sources=[os.path.join("dpnp", "dpnp_utils", "dpnp_algo_utils.pyx")],
**kwargs_common)
cython_options.docstrings = True
cython_options.warning_errors = True
dpnp_cython_mods = cythonize([dpnp_algo, dpnp_dparray, dpnp_random, dpnp_utils, dpnp_linalg, dpnp_fft],
compiler_directives={"language_level": sys.version_info[0],
"warn.unused": False,
"warn.unused_result": False,
"warn.maybe_uninitialized": False,
"warn.undeclared": False,
"boundscheck": True,
"linetrace": True
},
gdb_debug=False,
build_dir="build_cython",
annotate=False,
quiet=False)
setup(name="dpnp",
version=__version__,
description="NumPy-like API accelerated with SYCL",
long_description=__readme_file__,
long_description_content_type="text/markdown",
author="Intel Corporation",
maintainer="Intel Corp.",
maintainer_email="scripting@intel.com",
url="https://intelpython.github.io/dpnp/",
download_url="https://github.com/IntelPython/dpnp",
license='BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
keywords="sycl numpy python3 intel mkl oneapi gpu dpcpp pstl",
platforms=["Linux", "Windows"],
test_suite="pytest",
python_requires=">=3.6",
install_requires=["numpy>=1.15"],
setup_requires=["numpy>=1.15"],
tests_require=["numpy>=1.15"],
ext_modules=dpnp_cython_mods,
cmdclass=dpnp_build_commands,
packages=['dpnp',
'dpnp.dpnp_algo',
'dpnp.dpnp_utils',
'dpnp.fft',
'dpnp.linalg',
'dpnp.random'
],
package_data={'dpnp': ['libdpnp_backend_c.so', 'dpnp_backend_c.lib', 'dpnp_backend_c.dll']},
include_package_data=True,
# this is needed for 'build' command to automatically call 'build_clib'
# it attach the library to all extensions (it is not needed)
libraries=dpnp_backend_c_description
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any
from omegaconf import DictConfig
import hydra
def add(app_cfg: DictConfig, key1: str, key2: str) -> Any:
num1 = app_cfg[key1]
num2 = app_cfg[key2]
ret = num1 + num2
print(f"Hello {app_cfg.user}, {num1} + {num2} = {ret}")
return ret
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
add(cfg.app, "num1", "num2")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from webtest import http
from doctest import SKIP
from tests.apps import input_app
PY3 = sys.version_info >= (3,)
def setup_test(test):
for example in test.examples:
# urlopen as moved in py3
if PY3:
example.options.setdefault(SKIP, 1)
if not PY3:
server = http.StopableWSGIServer.create(input_app)
server.wait()
path_to_html_file = os.path.join('tests', 'test.html')
test.globs.update(
input_app=input_app,
server=server,
your_url=server.application_url.rstrip('/') + '/html',
path_to_html_file=path_to_html_file,
)
setup_test.__test__ = False
def teardown_test(test):
if 'server' in test.globs:
test.globs['server'].shutdown()
teardown_test.__test__ = False
|
import json
class nsfc:
@staticmethod
def load_discipline():
dic = {}
with open('data/nsfc.jl', 'r', encoding='utf-8') as f:
for line in f:
th = json.loads(line)
dic[th['_id']] = th
return dic
discipline = load_discipline.__func__()
|
import utility
from .UnivariateLocalEstimator import UnivariateLocalEstimator
from .VineKernelDensity import VineKernelDensity
|
import sys
import datetime
from pyspark import SparkContext
from csv import reader
from operator import add
def week(datetimestr):
#get weeknumber of the yaer
date, time = datetimestr.split(' ')
yearstr, monthstr, daystr = date.split('-')
year = int(yearstr)
month = int(monthstr)
day = int(daystr)
isoyear, isoweeknum, isoweekdaynum = datetime.date(year, month, day).isocalendar()
if isoyear == year:
return isoweeknum
else:
return 0
if __name__ == "__main__":
sc = SparkContext()
rddbike = sc.textFile('BikeResult.txt')
lines = rddbike.mapPartitions(lambda x: reader(x))
neighbor_count = lines.filter(lambda x: int(x[1]) >= 0).map(lambda x: (int(x[1]), 1)).reduceByKey(add).sortBy(lambda x: x[0]).map(lambda x: "%d,%d" % (x[0], x[1]))
neighbor_count.saveAsTextFile("bike_n_count.txt")
week_count = lines.filter(lambda x: int(x[1]) >= 0).map(lambda x:(week(x[0]), 1)).reduceByKey(add).sortBy(lambda x: x[0]).map(lambda x: "%d,%d" % (x[0], x[1]))
week_count.saveAsTextFile("bike_week_count.txt")
|
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This file is originally from:
# https://github.com/ros/ros_comm/blob/6e5016f4b2266d8a60c9a1e163c4928b8fc7115e/tools/rostopic/src/rostopic/__init__.py
from argparse import ArgumentTypeError
from collections import defaultdict
import functools
import math
import threading
import rclpy
from rclpy.clock import Clock
from rclpy.clock import ClockType
from rclpy.qos import qos_profile_sensor_data
from ros2cli.node.direct import DirectNode
from ros2topic.api import get_msg_class
from ros2topic.api import TopicNameCompleter
from ros2topic.verb import VerbExtension
DEFAULT_WINDOW_SIZE = 10000
def unsigned_int(string):
try:
value = int(string)
except ValueError:
value = -1
if value < 0:
raise ArgumentTypeError('value must be non-negative integer')
return value
class HzVerb(VerbExtension):
"""Print the average publishing rate to screen."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'topic_name',
help="Name of the ROS topic to listen to (e.g. '/chatter')")
arg.completer = TopicNameCompleter(
include_hidden_topics_key='include_hidden_topics')
parser.add_argument(
'--window', '-w',
dest='window_size', type=unsigned_int, default=DEFAULT_WINDOW_SIZE,
help='window size, in # of messages, for calculating rate '
'(default: %d)' % DEFAULT_WINDOW_SIZE, metavar='WINDOW')
parser.add_argument(
'--filter',
dest='filter_expr', default=None,
help='only measure messages matching the specified Python expression', metavar='EXPR')
parser.add_argument(
'--wall-time',
dest='use_wtime', default=False, action='store_true',
help='calculates rate using wall time which can be helpful'
' when clock is not published during simulation')
def main(self, *, args):
return main(args)
def main(args):
topic = args.topic_name
if args.filter_expr:
def expr_eval(expr):
def eval_fn(m):
return eval(expr)
return eval_fn
filter_expr = expr_eval(args.filter_expr)
else:
filter_expr = None
with DirectNode(args) as node:
_rostopic_hz(node.node, topic, window_size=args.window_size, filter_expr=filter_expr,
use_wtime=args.use_wtime)
class ROSTopicHz(object):
"""ROSTopicHz receives messages for a topic and computes frequency."""
def __init__(self, node, window_size, filter_expr=None, use_wtime=False):
self.lock = threading.Lock()
self.last_printed_tn = 0
self.msg_t0 = -1
self.msg_tn = 0
self.times = []
self._last_printed_tn = defaultdict(int)
self._msg_t0 = defaultdict(lambda: -1)
self._msg_tn = defaultdict(int)
self._times = defaultdict(list)
self.filter_expr = filter_expr
self.use_wtime = use_wtime
self.window_size = window_size
# Clock that has support for ROS time.
self._clock = node.get_clock()
def get_last_printed_tn(self, topic=None):
if topic is None:
return self.last_printed_tn
return self._last_printed_tn[topic]
def set_last_printed_tn(self, value, topic=None):
if topic is None:
self.last_printed_tn = value
self._last_printed_tn[topic] = value
def get_msg_t0(self, topic=None):
if topic is None:
return self.msg_t0
return self._msg_t0[topic]
def set_msg_t0(self, value, topic=None):
if topic is None:
self.msg_t0 = value
self._msg_t0[topic] = value
def get_msg_tn(self, topic=None):
if topic is None:
return self.msg_tn
return self._msg_tn[topic]
def set_msg_tn(self, value, topic=None):
if topic is None:
self.msg_tn = value
self._msg_tn[topic] = value
def get_times(self, topic=None):
if topic is None:
return self.times
return self._times[topic]
def set_times(self, value, topic=None):
if topic is None:
self.times = value
self._times[topic] = value
def callback_hz(self, m, topic=None):
"""
Calculate interval time.
:param m: Message instance
:param topic: Topic name
"""
# ignore messages that don't match filter
if self.filter_expr is not None and not self.filter_expr(m):
return
with self.lock:
# Uses ROS time as the default time source and Walltime only if requested
curr_rostime = self._clock.now() if not self.use_wtime else \
Clock(clock_type=ClockType.SYSTEM_TIME).now()
# time reset
if curr_rostime.nanoseconds == 0:
if len(self.get_times(topic=topic)) > 0:
print('time has reset, resetting counters')
self.set_times([], topic=topic)
return
curr = curr_rostime.nanoseconds
msg_t0 = self.get_msg_t0(topic=topic)
if msg_t0 < 0 or msg_t0 > curr:
self.set_msg_t0(curr, topic=topic)
self.set_msg_tn(curr, topic=topic)
self.set_times([], topic=topic)
else:
self.get_times(topic=topic).append(curr - self.get_msg_tn(topic=topic))
self.set_msg_tn(curr, topic=topic)
if len(self.get_times(topic=topic)) > self.window_size:
self.get_times(topic=topic).pop(0)
def get_hz(self, topic=None):
"""
Calculate the average publising rate.
:param topic: topic name, ``list`` of ``str``
:returns: tuple of stat results
(rate, min_delta, max_delta, standard deviation, window number)
None when waiting for the first message or there is no new one
"""
if not self.get_times(topic=topic):
return
elif self.get_last_printed_tn(topic=topic) == 0:
self.set_last_printed_tn(self.get_msg_tn(topic=topic), topic=topic)
return
elif self.get_msg_tn(topic=topic) < self.get_last_printed_tn(topic=topic) + 1e9:
return
with self.lock:
# Get frequency every one minute
times = self.get_times(topic=topic)
n = len(times)
mean = sum(times) / n
rate = 1. / mean if mean > 0. else 0
# std dev
std_dev = math.sqrt(sum((x - mean)**2 for x in times) / n)
# min and max
max_delta = max(times)
min_delta = min(times)
self.set_last_printed_tn(self.get_msg_tn(topic=topic), topic=topic)
return rate, min_delta, max_delta, std_dev, n
def print_hz(self, topic=None):
"""Print the average publishing rate to screen."""
ret = self.get_hz(topic)
if ret is None:
return
rate, min_delta, max_delta, std_dev, window = ret
print('average rate: %.3f\n\tmin: %.3fs max: %.3fs std dev: %.5fs window: %s'
% (rate * 1e9, min_delta * 1e-9, max_delta * 1e-9, std_dev * 1e-9, window))
return
def _rostopic_hz(node, topic, window_size=DEFAULT_WINDOW_SIZE, filter_expr=None, use_wtime=False):
"""
Periodically print the publishing rate of a topic to console until shutdown.
:param topic: topic name, ``list`` of ``str``
:param window_size: number of messages to average over, -1 for infinite, ``int``
:param filter_expr: Python filter expression that is called with m, the message instance
"""
# pause hz until topic is published
msg_class = get_msg_class(node, topic, blocking=True)
if msg_class is None:
node.destroy_node()
return
rt = ROSTopicHz(node, window_size, filter_expr=filter_expr, use_wtime=use_wtime)
node.create_subscription(
msg_class,
topic,
functools.partial(rt.callback_hz, topic=topic),
qos_profile=qos_profile_sensor_data)
while rclpy.ok():
rclpy.spin_once(node)
rt.print_hz(topic)
node.destroy_node()
rclpy.shutdown()
|
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# February 2012 -
# Version 0.3.1, Last change on Nov 15, 2012
# This software is distributed under the terms of BSD license.
##################################################################
# Decode SMPP packet
from libSmpp import *
import sys
if __name__ == "__main__":
# level for decoding are: DEBUG, INFO, WARNING, ERROR, CRITICAL
logging.basicConfig(level=logging.DEBUG)
LoadDictionary("dictSMPP.xml")
msg=sys.argv[1]
print msg
print "="*30
H=HDRItem()
stripHdr(H,msg)
print "Len=",H.len,"Code=",H.operation,"Status=",H.result,"Sequence=",H.sequence,"Message=",H.msg
splitMsgAVPs(H)
print "Mandatory:",H.mandatory
print "Optional:",H.optional
######################################################
# History
# 0.3.1 - Nov 15, 2012 - SMPP decode initial version
|
from typing import Tuple, Union, Iterable, List, Callable, Dict, Optional
from nnuncert.models._network import MakeNet
from nnuncert.models.dnnc import DNNCModel, DNNCRidge, DNNCHorseshoe, DNNCPred
from nnuncert.models.mc_dropout import DropoutTF, MCDropout, MCDropoutPred
from nnuncert.models.ensemble import Ensemble, PNNEnsemble, NLMEnsemble, EnsPredGauss
from nnuncert.models.gp import GPModel, GPPred
from nnuncert.models.nlm import NLM, NLMPred
from nnuncert.models.pbp import PBPModel, PBPPred
from nnuncert.models.pnn import PNN, PNNPred
STR2TYPE = {
"DNNC-R" : DNNCRidge,
"DNNC-HS" : DNNCHorseshoe,
"MCDropout" : MCDropout,
"MC Dropout" : MCDropout,
"MC dropout" : MCDropout,
"PNN" : PNN,
"Deep emsemble" : PNNEnsemble,
"GP" : GPModel,
"GP-ReLU" : GPModel,
"PNN-E" : PNNEnsemble,
"NLM" : NLM,
"NLM-E" : NLMEnsemble,
"PBP" : PBPModel,
}
def make_network(model_type: Union[type, str],
input_shape: Tuple,
architecture: List[Tuple[int, str, float]],
*args, **kwargs) -> MakeNet:
"""Generate network with 'architecture' for given 'model_type'.
Parameters
----------
model_type : Union[type, str]
Model to generate network for.
input_shape : Tuple
Shape of inputs for neural network.
architecture : List[Tuple[int, str, float]]
Network architecture, per hidden layer:
[Number of hidden units, activation function in layer, dropout rate]
Returns
-------
MakeNet
Network to used as input for model initialization.
"""
if isinstance(model_type, str):
model_type = STR2TYPE[model_type]
MakeNetDict = {
DNNCModel : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
DNNCRidge : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
DNNCHorseshoe : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
MCDropout : MakeNet.joint(input_shape, architecture, dropout_type=DropoutTF, *args, **kwargs),
PNN : MakeNet.joint(input_shape, architecture, *args, **kwargs),
PNNEnsemble : MakeNet.joint(input_shape, architecture, *args, **kwargs),
NLM : MakeNet.joint(input_shape, architecture, *args, **kwargs),
NLMEnsemble : MakeNet.joint(input_shape, architecture, *args, **kwargs),
PBPModel : MakeNet.joint(input_shape, architecture, *args, **kwargs),
GPModel : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
}
return MakeNetDict[model_type]
def make_model(model_type: Union[type, str],
input_shape: Tuple,
architecture: List[Tuple[int, str, float]],
net_kwargs: Optional[Dict] = {},
*args, **kwargs):
"""Initialize model with given architecture.
Parameters
----------
model_type : Union[type, str]
Model to generate network for.
input_shape : Tuple
Shape of inputs for neural network.
architecture : List[Tuple[int, str, float]]
Network architecture, per hidden layer:
[Number of hidden units, activation function in layer, dropout rate]
net_kwargs : Optional[Dict]
Arguments to be passed to MakeNet creator function.
"""
if isinstance(model_type, str):
model_type = STR2TYPE[model_type]
# generate network
net = make_network(model_type, input_shape, architecture, **net_kwargs)
# init model
model = model_type(net, *args, **kwargs)
return model
|
class UndergroundSystem(object):
def __init__(self):
# record the customer's starting trip
# card ID: [station, t]
self.customer_trip = {}
# record the average travel time from start station to end station
# (start, end): [t, times]
self.trips = {}
def checkIn(self, id, stationName, t):
self.customer_trip[id] = [stationName, t]
def checkOut(self, id, stationName, t):
# get the check in information of the customer
start_station, start_t = self.customer_trip[id]
del self.customer_trip[id]
# the trip information
# stationName => end_station
# t => end_t
trip = (start_station, stationName)
travel_time = t - start_t
# store / update the trip information
if trip not in self.trips:
self.trips[trip] = [travel_time, 1]
else:
# another way to write that is store the total traveling time and the number of travels
# so that you do not need to calculate the avg time everytime
avg_t, times = self.trips[trip]
self.trips[trip] = [
(avg_t * times + travel_time) / (times + 1.0), times + 1]
def getAverageTime(self, startStation, endStation):
return self.trips[(startStation, endStation)][0]
# Your UndergroundSystem object will be instantiated and called as such:
# obj = UndergroundSystem()
# obj.checkIn(id,stationName,t)
# obj.checkOut(id,stationName,t)
# param_3 = obj.getAverageTime(startStation,endStation)
|
from .interface import Capreole
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains decorators for Qt
"""
from __future__ import print_function, division, absolute_import
from functools import wraps
from Qt.QtCore import Qt
from Qt.QtWidgets import QApplication
from Qt.QtGui import QCursor
def show_wait_cursor(fn):
"""
Decorator that shows wait cursor during function execution
:param fn:
"""
@wraps(fn)
def wrapper(*args, **kwargs):
cursor = QCursor(Qt.WaitCursor)
QApplication.setOverrideCursor(cursor)
try:
return fn(*args, **kwargs)
finally:
QApplication.restoreOverrideCursor()
return wrapper
def show_arrow_cursor(fn):
"""
Decorator that shows arrow cursor during function execution
:param fn:
"""
@wraps(fn)
def wrapper(*args, **kwargs):
cursor = QCursor(Qt.ArrowCursor)
QApplication.setOverrideCursor(cursor)
try:
return fn(*args, **kwargs)
finally:
QApplication.restoreOverrideCursor()
return wrapper
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 13:44:11 2021
@author: user24
"""
import tkinter as tk
import tkinter.filedialog as fd
root = tk.Tk()
root.withdraw()
file = fd.askopenfilename(
title="ファイルを選んでください。",
filetypes=[("TEXT", ".txt"), ("TEXT", ".py"), ("HTML", ".html")]
)
if file:
with open(file, "r", encoding="utf_8") as fileobj:
text = fileobj.read()
print(text)
|
import pandas as pd
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
plt.style.use('../acmart.mplrc')
### latency vs stages
print("loading csv")
d = pd.read_csv('perf-data/results.csv')
print("filtering data")
d = d[d['max_copy'] == 512]
d = d[d['pipes'] == 6]
d = d[d['samples'] == 20000000]
d = d[['sdr', 'scheduler', 'stages', 'run', 'time', 'event', 'block', 'items']]
def gr_block_index(d):
if d['sdr'] == 'gr' and d['event'] == 'rx':
return d['block'] - d['stages'] * 2 - 2
else:
return d['block']
print("applying block index change for GR...", end='')
d['block'] = d.apply(gr_block_index, axis=1)
print("done")
g = d.groupby(['sdr', 'scheduler', 'stages', 'run'])
r = pd.DataFrame({'sdr': pd.Series(dtype='str'),
'scheduler': pd.Series(dtype='str'),
'stages': pd.Series(dtype='int64'),
'latency': pd.Series(dtype='int64')})
for (i, x) in g:
a = x[['time', 'event', 'block', 'items']]
rx = a[a['event'] == 'rx'].set_index(['block', 'items'])
tx = a[a['event'] == 'tx'].set_index(['block', 'items'])
lat = rx.join(tx, lsuffix='_rx', how='inner')
foo = rx.join(tx, lsuffix='_rx', how='outer')
diff = foo.shape[0] - lat.shape[0]
if diff > 6:
print(f"{i} item lost {diff} of {lat.shape[0]}")
lat = lat['time_rx'] - lat['time']
assert np.all(lat > 0)
t = pd.DataFrame(lat, columns=['latency'])
t['sdr'] = i[0]
t['scheduler'] = i[1]
t['stages'] = i[2]
r = pd.concat([r, t], axis=0)
r['latency'] = r['latency']/1e6
r.to_pickle("latency.data")
##############################################################
##############################################################
##############################################################
##############################################################
r = pd.read_pickle("latency.data")
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
d = r.groupby(['sdr', 'scheduler', 'stages']).agg({'latency': [np.mean, np.std, percentile(5), percentile(95)]})
fig, ax = plt.subplots(1, 1)
fig.subplots_adjust(bottom=.192, left=.11, top=.99, right=.97)
t = d.loc[('gr')].reset_index()
t[('latency', 'percentile_5')] = t[('latency', 'mean')] - t[('latency', 'percentile_5')]
t[('latency', 'percentile_95')] = t[('latency', 'percentile_95')] - t[('latency', 'mean')]
ax.errorbar(t['stages'], t[('latency', 'mean')], yerr=[t[('latency', 'percentile_5')], t[('latency', 'percentile_95')]], label='GNU\,Radio')
t = d.loc[('fs', 'smoln')].reset_index();
t[('latency', 'percentile_5')] = t[('latency', 'mean')] - t[('latency', 'percentile_5')]
t[('latency', 'percentile_95')] = t[('latency', 'percentile_95')] - t[('latency', 'mean')]
ax.errorbar(t['stages']-0.3, t[('latency', 'mean')], yerr=[t[('latency', 'percentile_5')], t[('latency', 'percentile_95')]], label='Smol-N')
t = d.loc[('fs', 'flow')].reset_index();
t[('latency', 'percentile_5')] = t[('latency', 'mean')] - t[('latency', 'percentile_5')]
t[('latency', 'percentile_95')] = t[('latency', 'percentile_95')] - t[('latency', 'mean')]
ax.errorbar(t['stages']+0.3, t[('latency', 'mean')], yerr=[t[('latency', 'percentile_5')], t[('latency', 'percentile_95')]], label='Flow')
plt.setp(ax.get_yticklabels(), rotation=90, va="center")
ax.set_xlabel('\#\,Stages')
ax.set_ylabel('Latency (in ms)')
# ax.set_ylim(0, 99)
handles, labels = ax.get_legend_handles_labels()
handles = [x[0] for x in handles]
ax.legend(handles, labels, handlelength=2.95)
plt.savefig('fir_rand_latency.pdf')
plt.close('all')
|
"""Treadmill trace CLI.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import sys
import click
from six.moves import urllib_parse
from treadmill import cli
from treadmill import context
from treadmill import restclient
from treadmill.websocket import client as ws_client
from treadmill.apptrace import (events, printer)
_LOGGER = logging.getLogger(__name__)
_RC_DEFAULT_EXIT = 100
_RC_ABORTED = 101
_RC_KILLED = 102
_RC_NO_TRACES = 103
def _trace_loop(ctx, app, snapshot):
"""Instance trace loop."""
trace_printer = printer.AppTracePrinter()
rc = {'rc': _RC_DEFAULT_EXIT}
if not snapshot:
click.echo(
'# No trace information yet, waiting...\r', nl=False, err=True
)
def on_message(result):
"""Callback to process trace message."""
_LOGGER.debug('result: %r', result)
event = events.AppTraceEvent.from_dict(result['event'])
if event is None:
return False
trace_printer.process(event)
if isinstance(event, events.FinishedTraceEvent):
rc['rc'] = event.rc
if isinstance(event, events.KilledTraceEvent):
rc['rc'] = _RC_KILLED
if isinstance(event, events.AbortedTraceEvent):
rc['rc'] = _RC_ABORTED
if isinstance(event, events.DeletedTraceEvent):
return False
return True
def on_error(result):
"""Callback to process errors."""
click.echo('Error: %s' % result['_error'], err=True)
try:
ws_client.ws_loop(
ctx['wsapi'],
{'topic': '/trace',
'filter': app},
snapshot,
on_message,
on_error
)
sys.exit(rc['rc'])
except ws_client.WSConnectionError:
click.echo('Could not connect to any Websocket APIs', err=True)
sys.exit(-1)
def init():
"""Return top level command handler."""
ctx = {}
@click.command()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='REST API url to use.',
metavar='URL',
envvar='TREADMILL_RESTAPI')
@click.option('--wsapi', required=False, help='WebSocket API url to use.',
metavar='URL',
envvar='TREADMILL_WSAPI')
@click.option('--last', is_flag=True, default=False)
@click.option('--snapshot', is_flag=True, default=False)
@click.argument('app')
def trace(api, wsapi, last, snapshot, app):
"""Trace application events.
Invoking treadmill_trace with non existing application instance will
cause the utility to wait for the specified instance to be started.
Specifying already finished instance of the application will display
historical trace information and exit status.
Specifying only an application name will list all the instance IDs with
trace information available.
The trace will exit with the exit code of the container service that
caused container finish (reached retry count).
Special error codes if service did not exit gracefully and it is not
possible to capture the return code:
101 - container was aborted.
102 - container was killed (possible out of memory)
103 - no trace information
100 - everything else.
"""
# Disable too many branches.
#
# pylint: disable=R0912
ctx['api'] = api
ctx['wsapi'] = wsapi
if '#' not in app:
apis = context.GLOBAL.state_api(ctx['api'])
url = '/state/?finished=1&match={app}'.format(
app=urllib_parse.quote(app)
)
try:
response = restclient.get(apis, url)
app_states = response.json()
except restclient.NotFoundError:
app_states = []
if not app_states:
click.echo('# Trace information does not exist.', err=True)
sys.exit(_RC_NO_TRACES)
elif not last:
for name in [app['name'] for app in app_states]:
cli.out(name)
return
else:
app = app_states[-1]['name']
return _trace_loop(ctx, app, snapshot)
return trace
|
import sys
from typing import Any, Dict, Iterable, Optional
import mlflow
import pandas as pd
import pytest
import yaml
from kedro.config import ConfigLoader
from kedro.extras.datasets.pickle import PickleDataSet
from kedro.framework.hooks import hook_impl
from kedro.framework.project import Validator, _ProjectPipelines, _ProjectSettings
from kedro.framework.session import KedroSession
from kedro.framework.startup import bootstrap_project
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline, node
from kedro.runner import SequentialRunner
from kedro.versioning import Journal
from mlflow.entities import RunStatus
from mlflow.models import infer_signature
from mlflow.tracking import MlflowClient
from kedro_mlflow.framework.context import get_mlflow_config
from kedro_mlflow.framework.hooks.pipeline_hook import (
MlflowPipelineHook,
_format_conda_env,
_generate_kedro_command,
)
from kedro_mlflow.io.metrics import (
MlflowMetricDataSet,
MlflowMetricHistoryDataSet,
MlflowMetricsDataSet,
)
from kedro_mlflow.pipeline import pipeline_ml_factory
from kedro_mlflow.pipeline.pipeline_ml import PipelineML
@pytest.fixture
def python_version():
python_version = ".".join(
[
str(sys.version_info.major),
str(sys.version_info.minor),
str(sys.version_info.micro),
]
)
return python_version
@pytest.fixture
def requirements_path(tmp_path):
return tmp_path / "requirements.txt"
@pytest.fixture
def requirements_path_str(requirements_path):
return requirements_path.as_posix()
@pytest.fixture
def environment_path(tmp_path):
return tmp_path / "environment.yml"
@pytest.fixture
def environment_path_str(environment_path):
return environment_path.as_posix()
@pytest.fixture
def env_from_none(python_version):
return dict(python=python_version)
@pytest.fixture
def env_from_requirements(requirements_path, python_version):
requirements_data = ["pandas>=1.0.0,<2.0.0", "kedro==0.15.9"]
with open(requirements_path, mode="w") as file_handler:
for item in requirements_data:
file_handler.write(f"{item}\n")
return dict(python=python_version, dependencies=requirements_data)
@pytest.fixture
def env_from_dict(python_version):
env_from_dict = dict(python=python_version, dependencies=["pandas>=1.0.0,<2.0.0"])
return env_from_dict
@pytest.fixture
def env_from_environment(environment_path, env_from_dict):
with open(environment_path, mode="w") as file_handler:
yaml.dump(env_from_dict, file_handler)
env_from_environment = env_from_dict
return env_from_environment
class DummyProjectHooks:
@hook_impl
def register_config_loader(self, conf_paths: Iterable[str]) -> ConfigLoader:
return ConfigLoader(conf_paths)
@hook_impl
def register_catalog(
self,
catalog: Optional[Dict[str, Dict[str, Any]]],
credentials: Dict[str, Dict[str, Any]],
load_versions: Dict[str, str],
save_version: str,
journal: Journal,
) -> DataCatalog:
return DataCatalog.from_config(
catalog, credentials, load_versions, save_version, journal
)
def _mock_imported_settings_paths(mocker, mock_settings):
for path in [
"kedro.framework.context.context.settings",
"kedro.framework.session.session.settings",
"kedro.framework.project.settings",
]:
mocker.patch(path, mock_settings)
return mock_settings
def _mock_settings_with_hooks(mocker, hooks):
class MockSettings(_ProjectSettings):
_HOOKS = Validator("HOOKS", default=hooks)
return _mock_imported_settings_paths(mocker, MockSettings())
@pytest.fixture
def mock_settings_with_mlflow_hooks(mocker):
return _mock_settings_with_hooks(
mocker,
hooks=(
DummyProjectHooks(),
MlflowPipelineHook(),
# MlflowNodeHook(),
),
)
@pytest.fixture(autouse=True)
def mocked_logging(mocker):
# Disable logging.config.dictConfig in KedroSession._setup_logging as
# it changes logging.config and affects other unit tests
return mocker.patch("logging.config.dictConfig")
@pytest.fixture
def mock_failing_pipelines(mocker):
def failing_node():
mlflow.start_run(nested=True)
raise ValueError("Let's make this pipeline fail")
def mocked_register_pipelines():
failing_pipeline = Pipeline(
[
node(
func=failing_node,
inputs=None,
outputs="fake_output",
)
]
)
return {"__default__": failing_pipeline, "pipeline_off": failing_pipeline}
mocker.patch.object(
_ProjectPipelines,
"_get_pipelines_registry_callable",
return_value=mocked_register_pipelines,
)
@pytest.mark.parametrize(
"conda_env,expected",
(
[None, pytest.lazy_fixture("env_from_none")],
[pytest.lazy_fixture("env_from_dict"), pytest.lazy_fixture("env_from_dict")],
[
pytest.lazy_fixture("requirements_path"),
pytest.lazy_fixture("env_from_requirements"),
],
[
pytest.lazy_fixture("requirements_path_str"),
pytest.lazy_fixture("env_from_requirements"),
],
[
pytest.lazy_fixture("environment_path"),
pytest.lazy_fixture("env_from_environment"),
],
[
pytest.lazy_fixture("environment_path_str"),
pytest.lazy_fixture("env_from_environment"),
],
),
)
def test_format_conda_env(conda_env, expected):
conda_env = _format_conda_env(conda_env)
assert conda_env == expected
def test_format_conda_env_error():
with pytest.raises(ValueError, match="Invalid conda_env"):
_format_conda_env(["invalid_list"])
@pytest.fixture
def dummy_pipeline():
def preprocess_fun(data):
return data
def train_fun(data, param):
return 2
def metrics_fun(data, model):
return {"metric_key": {"value": 1.1, "step": 0}}
def metric_fun(data, model):
return 1.1
def metric_history_fun(data, model):
return [0.1, 0.2]
def predict_fun(model, data):
return data * model
dummy_pipeline = Pipeline(
[
node(
func=preprocess_fun,
inputs="raw_data",
outputs="data",
tags=["training", "inference"],
),
node(
func=train_fun,
inputs=["data", "params:unused_param"],
outputs="model",
tags=["training"],
),
node(
func=metrics_fun,
inputs=["model", "data"],
outputs="my_metrics",
tags=["training"],
),
node(
func=metrics_fun,
inputs=["model", "data"],
outputs="another_metrics",
tags=["training"],
),
node(
func=metric_fun,
inputs=["model", "data"],
outputs="my_metric",
tags=["training"],
),
node(
func=metric_fun,
inputs=["model", "data"],
outputs="another_metric",
tags=["training"],
),
node(
func=metric_history_fun,
inputs=["model", "data"],
outputs="my_metric_history",
tags=["training"],
),
node(
func=metric_history_fun,
inputs=["model", "data"],
outputs="another_metric_history",
tags=["training"],
),
node(
func=predict_fun,
inputs=["model", "data"],
outputs="predictions",
tags=["inference"],
),
]
)
return dummy_pipeline
@pytest.fixture
def dummy_pipeline_ml(dummy_pipeline, env_from_dict):
dummy_pipeline_ml = pipeline_ml_factory(
training=dummy_pipeline.only_nodes_with_tags("training"),
inference=dummy_pipeline.only_nodes_with_tags("inference"),
input_name="raw_data",
conda_env=env_from_dict,
model_name="model",
)
return dummy_pipeline_ml
@pytest.fixture
def dummy_catalog(tmp_path):
dummy_catalog = DataCatalog(
{
"raw_data": MemoryDataSet(pd.DataFrame(data=[1], columns=["a"])),
"params:unused_param": MemoryDataSet("blah"),
"data": MemoryDataSet(),
"model": PickleDataSet((tmp_path / "model.csv").as_posix()),
"my_metrics": MlflowMetricsDataSet(),
"another_metrics": MlflowMetricsDataSet(prefix="foo"),
"my_metric": MlflowMetricDataSet(),
"another_metric": MlflowMetricDataSet(key="foo"),
"my_metric_history": MlflowMetricHistoryDataSet(),
"another_metric_history": MlflowMetricHistoryDataSet(key="bar"),
}
)
return dummy_catalog
@pytest.fixture
def pipeline_ml_with_parameters():
def remove_stopwords(data, stopwords):
return data
def train_fun_hyperparam(data, hyperparam):
return 2
def predict_fun(model, data):
return data * model
def convert_probs_to_pred(data, threshold):
return (data > threshold) * 1
full_pipeline = Pipeline(
[
# almost the same that previsously but stopwords are parameters
# this is a shared parameter between inference and training22
node(
func=remove_stopwords,
inputs=dict(data="data", stopwords="params:stopwords"),
outputs="cleaned_data",
tags=["training", "inference"],
),
# parameters in training pipeline, should not be persisted
node(
func=train_fun_hyperparam,
inputs=["cleaned_data", "params:penalty"],
outputs="model",
tags=["training"],
),
node(
func=predict_fun,
inputs=["model", "cleaned_data"],
outputs="predicted_probs",
tags=["inference"],
),
# this time, there is a parameter only for the inference pipeline
node(
func=convert_probs_to_pred,
inputs=["predicted_probs", "params:threshold"],
outputs="predictions",
tags=["inference"],
),
]
)
pipeline_ml_with_parameters = pipeline_ml_factory(
training=full_pipeline.only_nodes_with_tags("training"),
inference=full_pipeline.only_nodes_with_tags("inference"),
input_name="data",
conda_env={"python": "3.7.0", "dependencies": ["kedro==0.16.5"]},
)
return pipeline_ml_with_parameters
@pytest.fixture
def dummy_signature(dummy_catalog, dummy_pipeline_ml):
input_data = dummy_catalog.load(dummy_pipeline_ml.input_name)
dummy_signature = infer_signature(input_data)
return dummy_signature
@pytest.fixture
def dummy_run_params(tmp_path):
dummy_run_params = {
"run_id": "abcdef",
"project_path": tmp_path.as_posix(),
"env": "local",
"kedro_version": "0.16.0",
"tags": [],
"from_nodes": [],
"to_nodes": [],
"node_names": [],
"from_inputs": [],
"load_versions": [],
"pipeline_name": "my_cool_pipeline",
"extra_params": [],
}
return dummy_run_params
@pytest.mark.parametrize(
"pipeline_to_run",
[
(pytest.lazy_fixture("dummy_pipeline")),
(pytest.lazy_fixture("dummy_pipeline_ml")),
],
)
def test_mlflow_pipeline_hook_with_different_pipeline_types(
kedro_project_with_mlflow_conf,
env_from_dict,
pipeline_to_run,
dummy_catalog,
dummy_run_params,
):
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf):
# config_with_base_mlflow_conf is a conftest fixture
pipeline_hook = MlflowPipelineHook()
runner = SequentialRunner()
pipeline_hook.after_catalog_created(
catalog=dummy_catalog,
# `after_catalog_created` is not using any of below arguments,
# so we are setting them to empty values.
conf_catalog={},
conf_creds={},
feed_dict={},
save_version="",
load_versions="",
run_id=dummy_run_params["run_id"],
)
pipeline_hook.before_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
runner.run(pipeline_to_run, dummy_catalog)
run_id = mlflow.active_run().info.run_id
pipeline_hook.after_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
# test : parameters should have been logged
mlflow_conf = get_mlflow_config()
mlflow_client = MlflowClient(mlflow_conf.mlflow_tracking_uri)
run_data = mlflow_client.get_run(run_id).data
# all run_params are recorded as tags
for k, v in dummy_run_params.items():
if v:
assert run_data.tags[k] == str(v)
# params are not recorded because we don't have MlflowNodeHook here
# and the model should not be logged when it is not a PipelineML
nb_artifacts = len(mlflow_client.list_artifacts(run_id))
if isinstance(pipeline_to_run, PipelineML):
assert nb_artifacts == 1
else:
assert nb_artifacts == 0
# Check if metrics datasets have prefix with its names.
# for metric
assert dummy_catalog._data_sets["my_metrics"]._prefix == "my_metrics"
assert dummy_catalog._data_sets["another_metrics"]._prefix == "foo"
assert dummy_catalog._data_sets["my_metric"].key == "my_metric"
assert dummy_catalog._data_sets["another_metric"].key == "foo"
if isinstance(pipeline_to_run, PipelineML):
trained_model = mlflow.pyfunc.load_model(f"runs:/{run_id}/model")
assert trained_model.metadata.signature.to_dict() == {
"inputs": '[{"name": "a", "type": "long"}]',
"outputs": None,
}
@pytest.mark.parametrize(
"copy_mode,expected",
[
(None, {"raw_data": None, "data": None, "model": None}),
("assign", {"raw_data": "assign", "data": "assign", "model": "assign"}),
("deepcopy", {"raw_data": "deepcopy", "data": "deepcopy", "model": "deepcopy"}),
({"model": "assign"}, {"raw_data": None, "data": None, "model": "assign"}),
],
)
def test_mlflow_pipeline_hook_with_copy_mode(
kedro_project_with_mlflow_conf,
dummy_pipeline_ml,
dummy_catalog,
dummy_run_params,
copy_mode,
expected,
):
# config_with_base_mlflow_conf is a conftest fixture
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf):
pipeline_hook = MlflowPipelineHook()
runner = SequentialRunner()
pipeline_hook.after_catalog_created(
catalog=dummy_catalog,
# `after_catalog_created` is not using any of arguments bellow,
# so we are setting them to empty values.
conf_catalog={},
conf_creds={},
feed_dict={},
save_version="",
load_versions="",
run_id=dummy_run_params["run_id"],
)
pipeline_to_run = pipeline_ml_factory(
training=dummy_pipeline_ml.training,
inference=dummy_pipeline_ml.inference,
input_name=dummy_pipeline_ml.input_name,
conda_env={"python": "3.7.0", "dependencies": ["kedro==0.16.5"]},
model_name=dummy_pipeline_ml.model_name,
copy_mode=copy_mode,
)
pipeline_hook.before_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
runner.run(pipeline_to_run, dummy_catalog)
run_id = mlflow.active_run().info.run_id
pipeline_hook.after_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
mlflow_tracking_uri = (kedro_project_with_mlflow_conf / "mlruns").as_uri()
mlflow.set_tracking_uri(mlflow_tracking_uri)
loaded_model = mlflow.pyfunc.load_model(model_uri=f"runs:/{run_id}/model")
actual_copy_mode = {
name: ds._copy_mode
for name, ds in loaded_model._model_impl.python_model.loaded_catalog._data_sets.items()
}
assert actual_copy_mode == expected
def test_mlflow_pipeline_hook_metric_metrics_with_run_id(
kedro_project_with_mlflow_conf, dummy_pipeline_ml, dummy_run_params
):
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf):
mlflow_conf = get_mlflow_config()
mlflow.set_tracking_uri(mlflow_conf.mlflow_tracking_uri)
with mlflow.start_run():
existing_run_id = mlflow.active_run().info.run_id
dummy_catalog_with_run_id = DataCatalog(
{
"raw_data": MemoryDataSet(pd.DataFrame(data=[1], columns=["a"])),
"params:unused_param": MemoryDataSet("blah"),
"data": MemoryDataSet(),
"model": PickleDataSet(
(kedro_project_with_mlflow_conf / "data" / "model.csv").as_posix()
),
"my_metrics": MlflowMetricsDataSet(run_id=existing_run_id),
"another_metrics": MlflowMetricsDataSet(
run_id=existing_run_id, prefix="foo"
),
"my_metric": MlflowMetricDataSet(run_id=existing_run_id),
"another_metric": MlflowMetricDataSet(
run_id=existing_run_id, key="foo"
),
"my_metric_history": MlflowMetricHistoryDataSet(run_id=existing_run_id),
"another_metric_history": MlflowMetricHistoryDataSet(
run_id=existing_run_id, key="bar"
),
}
)
pipeline_hook = MlflowPipelineHook()
runner = SequentialRunner()
pipeline_hook.after_catalog_created(
catalog=dummy_catalog_with_run_id,
# `after_catalog_created` is not using any of arguments bellow,
# so we are setting them to empty values.
conf_catalog={},
conf_creds={},
feed_dict={},
save_version="",
load_versions="",
run_id=dummy_run_params["run_id"],
)
pipeline_hook.before_pipeline_run(
run_params=dummy_run_params,
pipeline=dummy_pipeline_ml,
catalog=dummy_catalog_with_run_id,
)
runner.run(dummy_pipeline_ml, dummy_catalog_with_run_id)
current_run_id = mlflow.active_run().info.run_id
pipeline_hook.after_pipeline_run(
run_params=dummy_run_params,
pipeline=dummy_pipeline_ml,
catalog=dummy_catalog_with_run_id,
)
mlflow_client = MlflowClient(mlflow_conf.mlflow_tracking_uri)
# the first run is created in Default (id 0),
# but the one initialised in before_pipeline_run
# is create in kedro_project experiment (id 1)
all_runs_id = set(
[
run.run_id
for k in range(2)
for run in mlflow_client.list_run_infos(experiment_id=f"{k}")
]
)
# the metrics are supposed to have been logged inside existing_run_id
run_data = mlflow_client.get_run(existing_run_id).data
# Check if metrics datasets have prefix with its names.
# for metric
assert all_runs_id == {current_run_id, existing_run_id}
assert run_data.metrics["my_metrics.metric_key"] == 1.1
assert run_data.metrics["foo.metric_key"] == 1.1
assert run_data.metrics["my_metric"] == 1.1
assert run_data.metrics["foo"] == 1.1
assert (
run_data.metrics["my_metric_history"] == 0.2
) # the list is tored, but only the last value is retrieved
assert (
run_data.metrics["bar"] == 0.2
) # the list is tored, but only the last value is retrieved
def test_mlflow_pipeline_hook_save_pipeline_ml_with_parameters(
kedro_project_with_mlflow_conf, # a fixture to be in a kedro project
tmp_path,
pipeline_ml_with_parameters,
dummy_run_params,
):
# config_with_base_mlflow_conf is a conftest fixture
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf):
mlflow_conf = get_mlflow_config()
mlflow.set_tracking_uri(mlflow_conf.mlflow_tracking_uri)
catalog_with_parameters = DataCatalog(
{
"data": MemoryDataSet(pd.DataFrame(data=[1], columns=["a"])),
"cleaned_data": MemoryDataSet(),
"params:stopwords": MemoryDataSet(["Hello", "Hi"]),
"params:penalty": MemoryDataSet(0.1),
"model": PickleDataSet(
(kedro_project_with_mlflow_conf / "data" / "model.csv").as_posix()
),
"params:threshold": MemoryDataSet(0.5),
}
)
pipeline_hook = MlflowPipelineHook()
runner = SequentialRunner()
pipeline_hook.after_catalog_created(
catalog=catalog_with_parameters,
# `after_catalog_created` is not using any of arguments bellow,
# so we are setting them to empty values.
conf_catalog={},
conf_creds={},
feed_dict={},
save_version="",
load_versions="",
run_id=dummy_run_params["run_id"],
)
pipeline_hook.before_pipeline_run(
run_params=dummy_run_params,
pipeline=pipeline_ml_with_parameters,
catalog=catalog_with_parameters,
)
runner.run(pipeline_ml_with_parameters, catalog_with_parameters)
current_run_id = mlflow.active_run().info.run_id
# This is what we want to test: model must be saved and the parameters automatically persisted on disk
pipeline_hook.after_pipeline_run(
run_params=dummy_run_params,
pipeline=pipeline_ml_with_parameters,
catalog=catalog_with_parameters,
)
# the 2 parameters which are inputs of inference pipeline
# must have been persisted and logged inside the model's artifacts
model = mlflow.pyfunc.load_model(f"runs:/{current_run_id}/model")
assert set(
model.metadata.to_dict()["flavors"]["python_function"]["artifacts"].keys()
) == {"model", "params:stopwords", "params:threshold"}
# the model should be loadable and predict() should work (this tests KedroPipelineModel)
assert model.predict(pd.DataFrame(data=[1], columns=["a"])).values[0][0] == 1
@pytest.mark.parametrize(
"model_signature,expected_signature",
(
[None, None],
["auto", pytest.lazy_fixture("dummy_signature")],
[
pytest.lazy_fixture("dummy_signature"),
pytest.lazy_fixture("dummy_signature"),
],
),
)
def test_mlflow_pipeline_hook_with_pipeline_ml_signature(
kedro_project_with_mlflow_conf,
env_from_dict,
dummy_pipeline,
dummy_catalog,
dummy_run_params,
model_signature,
expected_signature,
):
# config_with_base_mlflow_conf is a conftest fixture
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf):
pipeline_hook = MlflowPipelineHook()
runner = SequentialRunner()
pipeline_to_run = pipeline_ml_factory(
training=dummy_pipeline.only_nodes_with_tags("training"),
inference=dummy_pipeline.only_nodes_with_tags("inference"),
input_name="raw_data",
conda_env=env_from_dict,
model_name="model",
model_signature=model_signature,
)
pipeline_hook.after_catalog_created(
catalog=dummy_catalog,
# `after_catalog_created` is not using any of arguments bellow,
# so we are setting them to empty values.
conf_catalog={},
conf_creds={},
feed_dict={},
save_version="",
load_versions="",
run_id=dummy_run_params["run_id"],
)
pipeline_hook.before_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
runner.run(pipeline_to_run, dummy_catalog)
run_id = mlflow.active_run().info.run_id
pipeline_hook.after_pipeline_run(
run_params=dummy_run_params, pipeline=pipeline_to_run, catalog=dummy_catalog
)
# test : parameters should have been logged
trained_model = mlflow.pyfunc.load_model(f"runs:/{run_id}/model")
assert trained_model.metadata.signature == expected_signature
def test_generate_kedro_commands():
# TODO : add a better test because the formatting of record_data is subject to change
# We could check that the command is recored and then rerun properly
record_data = {
"tags": ["tag1", "tag2"],
"from_nodes": ["node1"],
"to_nodes": ["node3"],
"node_names": ["node1", "node2", "node1"],
"from_inputs": ["data_in"],
"load_versions": {"data_inter": "01:23:45"},
"pipeline_name": "fake_pl",
}
expected = "kedro run --from-inputs=data_in --from-nodes=node1 --to-nodes=node3 --node=node1,node2,node1 --pipeline=fake_pl --tag=tag1,tag2 --load-version=data_inter:01:23:45"
assert _generate_kedro_command(**record_data) == expected
@pytest.mark.parametrize("default_value", [None, []])
def test_generate_default_kedro_commands(default_value):
"""This test ensures that the _generate_kedro_comands accepts both
`None` and empty `list` as default value, because CLI and interactive
`Journal` do not use the same default.
Args:
default_value ([type]): [description]
"""
record_data = {
"tags": default_value,
"from_nodes": default_value,
"to_nodes": default_value,
"node_names": default_value,
"from_inputs": default_value,
"load_versions": default_value,
"pipeline_name": "fake_pl",
}
expected = "kedro run --pipeline=fake_pl"
assert _generate_kedro_command(**record_data) == expected
def test_on_pipeline_error(
kedro_project_with_mlflow_conf,
mock_settings_with_mlflow_hooks,
mock_failing_pipelines,
):
tracking_uri = (kedro_project_with_mlflow_conf / "mlruns").as_uri()
bootstrap_project(kedro_project_with_mlflow_conf)
with KedroSession.create(project_path=kedro_project_with_mlflow_conf) as session:
with pytest.raises(ValueError):
session.run()
# the run we want is the last one in Default experiment
failing_run_info = MlflowClient(tracking_uri).list_run_infos("0")[0]
assert mlflow.active_run() is None # the run must have been closed
assert failing_run_info.status == RunStatus.to_string(
RunStatus.FAILED
) # it must be marked as failed
|
import logging
import numpy as np
from scipy.stats import ks_2samp, describe
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from test_harness.experiments.baseline_experiment import BaselineExperiment
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler("../logs/app.log")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class UncertaintyKSExperiment(BaselineExperiment):
def __init__(self, model, dataset, k, significance_thresh, param_grid=None):
super().__init__(model, dataset, param_grid)
self.name = "Method 2 (Uncertainty-KS)"
self.k = k
self.significance_thresh = significance_thresh
self.ref_distributions = []
self.det_distributions = []
self.p_vals = []
@staticmethod
def make_kfold_predictions(X, y, model, dataset, k):
"""A KFold version of LeaveOneOut predictions.
Rather than performing exhaustive leave-one-out methodology to get predictions
for each observation, we use a less exhaustive KFold approach.
When k == len(X), this is equivalent to LeaveOneOut: expensive, but robust. Reducing k
saves computation, but reduces robustness of model.
Args:
X (pd.Dataframe) - features in evaluation window
y (pd.Series) - labels in evaluation window
k (int) - number of folds
type (str) - specified kfold or LeaveOneOut split methodology
Returns:
preds (np.array) - an array of predictions for each X in the input (NOT IN ORDER OF INPUT)
"""
# NOTE - need to think through if this should be a pipeline with MinMaxScaler...???
splitter = StratifiedKFold(n_splits=k, random_state=42, shuffle=True)
# splitter = LeaveOneOut()
preds = np.array([])
split_ACCs = np.array([])
for train_indicies, test_indicies in splitter.split(X, y):
# create column transformer
column_transformer = ColumnTransformer(
[
(
"continuous",
StandardScaler(),
dataset.column_mapping["numerical_features"],
),
(
"categorical",
"passthrough",
dataset.column_mapping["categorical_features"],
),
]
)
# instantiate training pipeline
pipe = Pipeline(
steps=[
("scaler", column_transformer),
("clf", model),
]
)
# fit it
pipe.fit(X.iloc[train_indicies], y.iloc[train_indicies])
# score it on this Kfold's test data
y_preds_split = pipe.predict_proba(X.iloc[test_indicies])
y_preds_split_posclass_proba = y_preds_split[:, 1]
preds = np.append(preds, y_preds_split_posclass_proba)
# get accuracy for split
split_ACC = pipe.score(X.iloc[test_indicies], y.iloc[test_indicies])
split_ACCs = np.append(split_ACCs, split_ACC)
logger.info(f"FINAL SHAPE kfold preds: {preds.shape}")
return preds, split_ACCs
def get_reference_response_distribution(self):
# get data in reference window
window_idx = self.reference_window_idx
logger.info(f"GETTING REFERENCE DISTRIBUTION FOR WINDOW: {window_idx}")
X_train, y_train = self.dataset.get_window_data(window_idx, split_labels=True)
# perform kfoldsplits to get predictions
preds, split_ACCs = self.make_kfold_predictions(
X_train, y_train, self.model, self.dataset, self.k
)
ref_ACC = np.mean(split_ACCs)
ref_ACC_SD = np.std(split_ACCs)
return preds, ref_ACC, ref_ACC_SD
def get_detection_response_distribution(self):
# get data in prediction window
window_idx = self.detection_window_idx
logger.info(f"GETTING DETECTION DISTRIBUTION FOR WINDOW: {window_idx}")
X_test, y_test = self.dataset.get_window_data(window_idx, split_labels=True)
# use trained model to get response distribution
preds = self.trained_model.predict_proba(X_test)[:, 1]
# get accuracy for detection window
det_ACC = self.evaluate_model_aggregate(window="detection")
return preds, det_ACC
@staticmethod
def perform_ks_test(dist1, dist2):
return ks_2samp(dist1, dist2)
def calculate_errors(self):
self.false_positives = [
True if self.drift_signals[i] and not self.drift_occurences[i] else False
for i in range(len(self.drift_signals))
]
self.false_negatives = [
True if not self.drift_signals[i] and self.drift_occurences[i] else False
for i in range(len(self.drift_signals))
]
def run(self):
"""Response Uncertainty Experiment
This experiment uses a KS test to detect changes in the target/response distribution between
the reference and detection windows.
Logic flow:
- Train on initial reference window
- Perform Stratified KFold to obtain prediction distribution on reference window
- Use trained model to generate predictions on detection window
- Perform statistical test (KS) between reference and detection window response distributions
- If different, retrain and update both windows
- If from same distribution, update detection window and repeat
"""
logger.info(
f"-------------------- Started SQSI Model Replacement Run --------------------"
)
self.train_model_gscv(window="reference", gscv=True)
CALC_REF_RESPONSE = True
for i, split in enumerate(self.dataset.splits):
if i > self.reference_window_idx:
logger.info(f"Dataset index of split end: {self.dataset.splits[i]}")
logger.info(
f"Need to calculate Reference response distribution? - {CALC_REF_RESPONSE}"
)
# log actual score on detection window
self.experiment_metrics["scores"].extend(
self.evaluate_model_incremental(n=10)
)
# get reference window response distribution with kfold + detection response distribution
if CALC_REF_RESPONSE:
(
ref_response_dist,
ref_ACC,
ref_ACC_SD,
) = self.get_reference_response_distribution()
det_response_dist, det_ACC = self.get_detection_response_distribution()
logger.info(f"REFERENCE STATS: {describe(ref_response_dist)}")
logger.info(f"DETECTION STATS: {describe(det_response_dist)}")
logger.info(f"Dataset Split: {i}")
logger.info(f"REFERENCE STATS: {describe(ref_response_dist)}")
logger.info(f"DETECTION STATS: {describe(det_response_dist)}")
self.ref_distributions.append(ref_response_dist)
self.det_distributions.append(det_response_dist)
# compare distributions
ks_result = self.perform_ks_test(
dist1=ref_response_dist, dist2=det_response_dist
)
self.p_vals.append(ks_result.pvalue)
logger.info(f"KS Test: {ks_result}")
significant_change = (
True if ks_result[1] < self.significance_thresh else False
)
self.drift_signals.append(significant_change)
# compare accuracies to see if detection was false alarm
# i.e. check if change in accuracy is significant
delta_ACC = np.absolute(det_ACC - ref_ACC)
threshold_ACC = 3 * ref_ACC_SD # considering outside 3 SD significant
significant_ACC_change = True if delta_ACC > threshold_ACC else False
self.drift_occurences.append(significant_ACC_change)
if significant_change:
# reject null hyp, distributions are NOT identical --> retrain
self.train_model_gscv(window="detection", gscv=True)
self.update_reference_window()
CALC_REF_RESPONSE = True
_ks_result_report = "FAILED"
else:
CALC_REF_RESPONSE = False
_ks_result_report = "PASSED"
self.update_detection_window()
logger.info(f"KS Test Result: {_ks_result_report} | {ks_result}")
self.calculate_label_expense()
self.calculate_train_expense()
self.calculate_errors()
|
import sys
import unittest
import six
from conans.errors import ConanException
from conans.model.options import Options, OptionsValues, PackageOptionValues, PackageOptions, \
option_undefined_msg
from conans.model.ref import ConanFileReference
class OptionsTest(unittest.TestCase):
def setUp(self):
package_options = PackageOptions.loads("""{static: [True, False],
optimized: [2, 3, 4],
path: ANY}""")
values = PackageOptionValues()
values.add_option("static", True)
values.add_option("optimized", 3)
values.add_option("path", "NOTDEF")
package_options.values = values
self.sut = Options(package_options)
def test_int(self):
self.assertEqual(3, int(self.sut.optimized))
def test_in(self):
package_options = PackageOptions.loads("{static: [True, False]}")
sut = Options(package_options)
self.assertTrue("static" in sut)
self.assertFalse("shared" in sut)
self.assertTrue("shared" not in sut)
self.assertFalse("static" not in sut)
def test_undefined_value(self):
""" Not assigning a value to options will raise an error at validate() step
"""
package_options = PackageOptions.loads("""{
path: ANY}""")
with six.assertRaisesRegex(self, ConanException, option_undefined_msg("path")):
package_options.validate()
package_options.path = "Something"
package_options.validate()
def test_undefined_value_none(self):
""" The value None is allowed as default, not necessary to default to it
"""
package_options = PackageOptions.loads('{path: [None, "Other"]}')
package_options.validate()
package_options = PackageOptions.loads('{path: ["None", "Other"]}')
package_options.validate()
def test_items(self):
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
def test_change(self):
self.sut.path = "C:/MyPath"
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
with six.assertRaisesRegex(self, ConanException,
"'5' is not a valid 'options.optimized' value"):
self.sut.optimized = 5
def test_boolean(self):
self.sut.static = False
self.assertFalse(self.sut.static)
self.assertTrue(not self.sut.static)
self.assertTrue(self.sut.static == False)
self.assertFalse(self.sut.static == True)
self.assertFalse(self.sut.static != False)
self.assertTrue(self.sut.static != True)
self.assertTrue(self.sut.static == "False")
self.assertTrue(self.sut.static != "True")
def test_basic(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("thread", True)
boost_values.add_option("thread.multi", "off")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", True)
hello1_values = PackageOptionValues()
hello1_values.add_option("static", False)
hello1_values.add_option("optimized", 4)
options = {"Boost": boost_values,
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello0/0.1@diego/testing")
own_ref = ConanFileReference.loads("Hello1/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "4"),
("path", "NOTDEF"),
("static", "False"),
("Boost:static", "False"),
("Boost:thread", "True"),
("Boost:thread.multi", "off"),
("Poco:deps_bundled", "True")])
boost_values = PackageOptionValues()
boost_values.add_option("static", 2)
boost_values.add_option("thread", "Any")
boost_values.add_option("thread.multi", "on")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", "What")
hello1_values = PackageOptionValues()
hello1_values.add_option("static", True)
hello1_values.add_option("optimized", "2")
options2 = {"Boost": boost_values,
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello2/0.1@diego/testing")
with six.assertRaisesRegex(self, ConanException, "Hello2/0.1@diego/testing tried to change "
"Hello1/0.1@diego/testing option optimized to 2"):
self.sut.propagate_upstream(options2, down_ref, own_ref)
self.assertEqual(self.sut.values.dumps(),
"""optimized=4
path=NOTDEF
static=False
Boost:static=False
Boost:thread=True
Boost:thread.multi=off
Poco:deps_bundled=True""")
def test_pattern_positive(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
options = {"Boost.*": boost_values}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "FuzzBuzz"),
("static", "False"),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def test_multi_pattern(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "2"),
("path", "FuzzBuzz"),
("static", "False"),
('*:optimized', '2'),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def test_multi_pattern_error(self):
boost_values = PackageOptionValues()
boost_values.add_option("optimized", 4)
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [('optimized', '4'),
('path', 'NOTDEF'),
('static', 'True'),
('*:optimized', '2'),
('Boost.*:optimized', '4')])
def test_all_positive(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
options = {"*": boost_values}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "FuzzBuzz"),
("static", "False"),
("*:path", "FuzzBuzz"),
("*:static", "False"),
])
def test_pattern_ignore(self):
boost_values = PackageOptionValues()
boost_values.add_option("fake_option", "FuzzBuzz")
options = {"Boost.*": boost_values}
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "NOTDEF"),
("static", "True"),
("Boost.*:fake_option", "FuzzBuzz"),
])
def test_pattern_unmatch(self):
boost_values = PackageOptionValues()
boost_values.add_option("fake_option", "FuzzBuzz")
options = {"OpenSSL.*": boost_values}
down_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "NOTDEF"),
("static", "True"),
("OpenSSL.*:fake_option", "FuzzBuzz"),
])
def test_get_safe_options(self):
self.assertEqual(True, self.sut.get_safe("static"))
self.assertEqual(3, self.sut.get_safe("optimized"))
self.assertEqual("NOTDEF", self.sut.get_safe("path"))
self.assertEqual(None, self.sut.get_safe("unknown"))
self.sut.path = "None"
self.sut.static = False
self.assertEqual(False, self.sut.get_safe("static"))
self.assertEqual("None", self.sut.get_safe("path"))
self.assertEqual(False, self.sut.get_safe("static", True))
self.assertEqual("None", self.sut.get_safe("path", True))
self.assertEqual(True, self.sut.get_safe("unknown", True))
class OptionsValuesPropagationUpstreamNone(unittest.TestCase):
def test_propagate_in_options(self):
package_options = PackageOptions.loads("""{opt: [None, "a", "b"],}""")
values = PackageOptionValues()
values.add_option("opt", "a")
package_options.values = values
sut = Options(package_options)
other_options = PackageOptionValues()
other_options.add_option("opt", None)
options = {"whatever.*": other_options}
down_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(sut.values.as_list(), [("opt", "a"),
("whatever.*:opt", "None"),
])
def test_propagate_in_pacakge_options(self):
package_options = PackageOptions.loads("""{opt: [None, "a", "b"],}""")
values = PackageOptionValues()
package_options.values = values
package_options.propagate_upstream({'opt': None}, None, None, [])
self.assertEqual(package_options.values.items(), [('opt', 'None'), ])
class OptionsValuesTest(unittest.TestCase):
def setUp(self):
self.sut = OptionsValues.loads("""static=True
optimized=3
Poco:deps_bundled=True
Boost:static=False
Boost:thread=True
Boost:thread.multi=off
""")
def test_from_list(self):
option_values = OptionsValues(self.sut.as_list())
self.assertEqual(option_values.dumps(), self.sut.dumps())
def test_from_dict(self):
options_as_dict = dict([item.split('=') for item in self.sut.dumps().splitlines()])
option_values = OptionsValues(options_as_dict)
self.assertEqual(option_values.dumps(), self.sut.dumps())
def test_consistency(self):
def _check_equal(hs1, hs2, hs3, hs4):
opt_values1 = OptionsValues(hs1)
opt_values2 = OptionsValues(hs2)
opt_values3 = OptionsValues(hs3)
opt_values4 = OptionsValues(hs4)
self.assertEqual(opt_values1.dumps(), opt_values2.dumps())
self.assertEqual(opt_values1.dumps(), opt_values3.dumps())
self.assertEqual(opt_values1.dumps(), opt_values4.dumps())
# Check that all possible input options give the same result
_check_equal([('opt', 3)], [('opt', '3'), ], ('opt=3', ), {'opt': 3})
_check_equal([('opt', True)], [('opt', 'True'), ], ('opt=True', ), {'opt': True})
_check_equal([('opt', False)], [('opt', 'False'), ], ('opt=False', ), {'opt': False})
_check_equal([('opt', None)], [('opt', 'None'), ], ('opt=None', ), {'opt': None})
_check_equal([('opt', 0)], [('opt', '0'), ], ('opt=0', ), {'opt': 0})
_check_equal([('opt', '')], [('opt', ''), ], ('opt=', ), {'opt': ''})
# Check for leading and trailing spaces
_check_equal([(' opt ', 3)], [(' opt ', '3'), ], (' opt =3', ), {' opt ': 3})
_check_equal([('opt', ' value ')], [('opt', ' value '), ], ('opt= value ', ),
{'opt': ' value '})
# This is expected behaviour:
self.assertNotEqual(OptionsValues([('opt', ''), ]).dumps(),
OptionsValues(('opt=""', )).dumps())
def test_dumps(self):
self.assertEqual(self.sut.dumps(), "\n".join(["optimized=3",
"static=True",
"Boost:static=False",
"Boost:thread=True",
"Boost:thread.multi=off",
"Poco:deps_bundled=True"]))
def test_sha_constant(self):
self.assertEqual(self.sut.sha,
"2442d43f1d558621069a15ff5968535f818939b5")
self.sut.new_option = False
self.sut["Boost"].new_option = "off"
self.sut["Poco"].new_option = 0
self.assertEqual(self.sut.dumps(), "\n".join(["new_option=False",
"optimized=3",
"static=True",
"Boost:new_option=off",
"Boost:static=False",
"Boost:thread=True",
"Boost:thread.multi=off",
"Poco:deps_bundled=True",
"Poco:new_option=0"]))
self.assertEqual(self.sut.sha,
"2442d43f1d558621069a15ff5968535f818939b5")
def test_loads_exceptions(self):
emsg = "not enough values to unpack" if six.PY3 and sys.version_info.minor > 4 \
else "need more than 1 value to unpack"
with six.assertRaisesRegex(self, ValueError, emsg):
OptionsValues.loads("a=2\nconfig\nb=3")
with six.assertRaisesRegex(self, ValueError, emsg):
OptionsValues.loads("config\na=2\ncommit\nb=3")
def test_exceptions_empty_value(self):
emsg = "not enough values to unpack" if six.PY3 and sys.version_info.minor > 4 \
else "need more than 1 value to unpack"
with six.assertRaisesRegex(self, ValueError, emsg):
OptionsValues("a=2\nconfig\nb=3")
with six.assertRaisesRegex(self, ValueError, emsg):
OptionsValues(("a=2", "config"))
with six.assertRaisesRegex(self, ValueError, emsg):
OptionsValues([('a', 2), ('config', ), ])
def test_exceptions_repeated_value(self):
try:
OptionsValues.loads("a=2\na=12\nb=3").dumps()
OptionsValues(("a=2", "b=23", "a=12"))
OptionsValues([('a', 2), ('b', True), ('a', '12')])
except Exception as e:
self.fail("Not expected exception: {}".format(e))
def test_package_with_spaces(self):
self.assertEqual(OptionsValues([('pck2:opt', 50), ]).dumps(),
OptionsValues([('pck2 :opt', 50), ]).dumps())
def test_validate_any_as_list():
package_options = PackageOptions.loads("""{
path: ["ANY", "kk"]}""")
values = PackageOptionValues()
values.add_option("path", "FOO")
package_options.values = values
sut = Options(package_options)
assert sut.path == "FOO"
package_options = PackageOptions.loads("""{
path: "ANY"}""")
values = PackageOptionValues()
values.add_option("path", "WHATEVER")
package_options.values = values
sut = Options(package_options)
assert sut.path == "WHATEVER"
|
import hashlib
import hmac
import logging
import os
import tornado.escape
import tornado.httpserver
import tornado.gen
import tornado.ioloop
import tornado.log
import tornado.web
from . import update_pr
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.set_status(404)
self.write_error(404)
class WebhookHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
hmac_digest = headers.get('X-Hub-Signature', None)
webhook_secret = os.environ['WEBHOOK_SECRET'].encode()
# Compute the payload's hmac digest.
expected_hmac = hmac.new(
webhook_secret, self.request.body, hashlib.sha1).hexdigest()
expected_digest = 'sha1={}'.format(expected_hmac)
if hmac_digest != expected_digest:
logging.warning('HMAC FAIL: expected: {}; got: {};'
''.format(expected_digest, hmac_digest))
self.set_status(403)
if event == 'ping':
self.write('pong')
elif event == 'pull_request':
body = tornado.escape.json_decode(self.request.body)
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
pr_id = int(body['pull_request']['number'])
is_open = body['pull_request']['state'] == 'open'
# Do some sanity chceking
if is_open and owner.lower() in ['scitools', 'scitools-incubator']:
yield update_pr.check_pr('{}/{}'.format(owner, repo_name),
pr_id)
else:
self.write('Unhandled event "{}".'.format(event))
self.set_status(404)
def main():
tornado.log.enable_pretty_logging()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/webhook", WebhookHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
PORT = os.environ.get('PORT', 8080)
http_server.listen(PORT)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QGridLayout, QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
def window():
app = QApplication(sys.argv)
win = QWidget()
grid = QGridLayout()
for i in range(0,5):
for j in range(0,5):
grid.addWidget(QPushButton(str(i)+str(j)),i,j)
win.setLayout(grid)
win.setWindowTitle("PyQt Grid Example")
win.setGeometry(50,50,200,200)
win.show()
sys.exit(app.exec_())
if __name__ == '__main__':
window()
|
nodeList = []
class CLS_node(object):
def __init__(self,v,i,l,r):
self.left = l
self.right = r
self.ind = i
self.value = v
def __str__(self):
return str(nodeList[self.left])+str(self.value)+str(nodeList[self.right])
|
from itertools import combinations,product
comb = list(combinations([0,1,2,3,4,5,6,7,8,9],6))
comb = [[6 if y==9 else y for y in x ] for x in comb]
valid = [tuple(map(int, list(str(s*s).zfill(2)))) for s in range(1,10)]
valid.remove((0,9))
valid.remove((4,9))
valid.append((0,6))
valid.append((4,6))
def isvalid(comb1,comb2):
combx = list(product(comb1, comb2))+list(product(comb2,comb1))
for i in valid:
if i not in combx:
return False
return True
res=0
for i in range(len(comb)):
for j in range(i+1,len(comb)):
if isvalid(comb[i],comb[j])==True:
res+=1
'''
a = [0,5,6,7,8,6]
b = [1,2,3,4,8,6]
print(isvalid(a,b))
'''
print(res)
|
from .main import greet
if __name__ == '__main__':
greet()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from math import isfinite, isnan
import numpy as np
from ax.core.metric import Metric
from ax.core.objective import Objective
from ax.core.observation import ObservationData
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import OutcomeConstraint, ScalarizedOutcomeConstraint
from ax.core.types import ComparisonOp
from ax.modelbridge.transforms.power_transform_y import (
PowerTransformY,
_compute_power_transforms,
_compute_inverse_bounds,
)
from ax.modelbridge.transforms.utils import get_data, match_ci_width_truncated
from ax.utils.common.testutils import TestCase
from sklearn.preprocessing import PowerTransformer
def get_constraint(metric, bound, relative):
return [
OutcomeConstraint(
metric=metric, op=ComparisonOp.GEQ, bound=bound, relative=relative
)
]
class PowerTransformYTest(TestCase):
def setUp(self):
self.obsd1 = ObservationData(
metric_names=["m1", "m2"],
means=np.array([0.5, 0.9]),
covariance=np.array([[0.03, 0.0], [0.0, 0.001]]),
)
self.obsd2 = ObservationData(
metric_names=["m1", "m2"],
means=np.array([0.1, 0.4]),
covariance=np.array([[0.005, 0.0], [0.0, 0.05]]),
)
self.obsd3 = ObservationData(
metric_names=["m1", "m2"],
means=np.array([0.9, 0.8]),
covariance=np.array([[0.02, 0.0], [0.0, 0.01]]),
)
self.obsd_nan = ObservationData(
metric_names=["m1", "m2"],
means=np.array([0.3, 0.2]),
covariance=np.array([[float("nan"), 0.0], [0.0, float("nan")]]),
)
def testInit(self):
shared_init_args = {
"search_space": None,
"observation_features": None,
"observation_data": [self.obsd1, self.obsd2],
}
# Test error for not specifying a config
with self.assertRaises(ValueError):
PowerTransformY(**shared_init_args)
# Test error for not specifying at least one metric
with self.assertRaises(ValueError):
PowerTransformY(**shared_init_args, config={})
# Test default init
for m in ["m1", "m2"]:
tf = PowerTransformY(**shared_init_args, config={"metrics": [m]})
# tf.power_transforms should only exist for m and be a PowerTransformer
self.assertIsInstance(tf.power_transforms, dict)
self.assertEqual([*tf.power_transforms], [m]) # Check keys
self.assertIsInstance(tf.power_transforms[m], PowerTransformer)
# tf.inv_bounds should only exist for m and be a tuple of length 2
self.assertIsInstance(tf.inv_bounds, dict)
self.assertEqual([*tf.inv_bounds], [m]) # Check keys
self.assertIsInstance(tf.inv_bounds[m], tuple)
self.assertTrue(len(tf.inv_bounds[m]) == 2)
def testGetData(self):
for m in ["m1", "m2"]:
Ys = get_data([self.obsd1, self.obsd2, self.obsd3], m)
self.assertIsInstance(Ys, dict)
self.assertEqual([*Ys], [m])
if m == "m1":
self.assertEqual(Ys[m], [0.5, 0.1, 0.9])
else:
self.assertEqual(Ys[m], [0.9, 0.4, 0.8])
def testComputePowerTransform(self):
Ys = get_data([self.obsd1, self.obsd2, self.obsd3], ["m2"])
pts = _compute_power_transforms(Ys)
self.assertEqual(pts["m2"].method, "yeo-johnson")
self.assertIsInstance(pts["m2"].lambdas_, np.ndarray)
self.assertEqual(pts["m2"].lambdas_.shape, (1,))
Y_np = np.array(Ys["m2"])[:, None]
Y_trans = pts["m2"].transform(Y_np)
# Output should be standardized
self.assertAlmostEqual(Y_trans.mean(), 0.0)
self.assertAlmostEqual(Y_trans.std(), 1.0)
# Transform back
Y_np2 = pts["m2"].inverse_transform(Y_trans)
self.assertAlmostEqual(np.max(np.abs(Y_np - Y_np2)), 0.0)
def testComputeInverseBounds(self):
Ys = get_data([self.obsd1, self.obsd2, self.obsd3], ["m2"])
pt = _compute_power_transforms(Ys)["m2"]
# lambda < 0: im(f) = (-inf, -1/lambda) without standardization
pt.lambdas_.fill(-2.5)
bounds = _compute_inverse_bounds({"m2": pt})["m2"]
self.assertEqual(bounds[0], -np.inf)
# Make sure we got the boundary right
left = pt.inverse_transform(np.array(bounds[1] - 0.01, ndmin=2))
right = pt.inverse_transform(np.array(bounds[1] + 0.01, ndmin=2))
self.assertTrue(isnan(right) and not isnan(left))
# 0 <= lambda <= 2: im(f) = R
pt.lambdas_.fill(1.0)
bounds = _compute_inverse_bounds({"m2": pt})["m2"]
self.assertTrue(bounds == (-np.inf, np.inf))
# lambda > 2: im(f) = (1 / (2 - lambda), inf) without standardization
pt.lambdas_.fill(3.5)
bounds = _compute_inverse_bounds({"m2": pt})["m2"]
self.assertEqual(bounds[1], np.inf)
# Make sure we got the boundary right
left = pt.inverse_transform(np.array(bounds[0] - 0.01, ndmin=2))
right = pt.inverse_transform(np.array(bounds[0] + 0.01, ndmin=2))
self.assertTrue(not isnan(right) and isnan(left))
def testMatchCIWidth(self):
Ys = get_data([self.obsd1, self.obsd2, self.obsd3], ["m2"])
pt = _compute_power_transforms(Ys)
pt["m2"].lambdas_.fill(-3.0)
bounds = _compute_inverse_bounds(pt)["m2"]
# Both will be NaN since we are far outside the bounds
new_mean_1, new_var_1 = match_ci_width_truncated(
mean=bounds[1] + 2.0,
variance=0.1,
transform=lambda y: pt["m2"].inverse_transform(np.array(y, ndmin=2)),
lower_bound=bounds[0],
upper_bound=bounds[1],
margin=0.001,
clip_mean=False,
)
# This will be finite since we clip
new_mean_2, new_var_2 = match_ci_width_truncated(
mean=bounds[1] + 2.0,
variance=0.1,
transform=lambda y: pt["m2"].inverse_transform(np.array(y, ndmin=2)),
lower_bound=bounds[0],
upper_bound=bounds[1],
margin=0.001,
clip_mean=True,
)
self.assertTrue(isnan(new_mean_1) and isnan(new_var_1))
self.assertTrue(isfinite(new_mean_2) and isfinite(new_var_2))
def testTransformAndUntransformOneMetric(self):
observation_data = [deepcopy(self.obsd1), deepcopy(self.obsd2)]
pt = PowerTransformY(
search_space=None,
observation_features=None,
observation_data=observation_data,
config={"metrics": ["m1"]},
)
# Transform the data and make sure we don't touch m1
observation_data_tf = pt.transform_observation_data(observation_data, [])
for obsd, obsd_orig in zip(observation_data_tf, [self.obsd1, self.obsd2]):
self.assertNotAlmostEqual(obsd.means[0], obsd_orig.means[0])
self.assertNotAlmostEqual(obsd.covariance[0][0], obsd_orig.covariance[0][0])
self.assertAlmostEqual(obsd.means[1], obsd_orig.means[1])
self.assertAlmostEqual(obsd.covariance[1][1], obsd_orig.covariance[1][1])
# Untransform the data and make sure the means are the same
observation_data_untf = pt.untransform_observation_data(observation_data_tf, [])
for obsd, obsd_orig in zip(observation_data_untf, [self.obsd1, self.obsd2]):
self.assertAlmostEqual(obsd.means[0], obsd_orig.means[0], places=4)
self.assertAlmostEqual(obsd.means[1], obsd_orig.means[1], places=4)
# NaN covar values remain as NaNs
transformed_obsd_nan = pt.transform_observation_data(
[deepcopy(self.obsd_nan)], []
)[0]
cov_results = np.array(transformed_obsd_nan.covariance)
self.assertTrue(np.all(np.isnan(np.diag(cov_results))))
def testTransformAndUntransformAllMetrics(self):
observation_data = [deepcopy(self.obsd1), deepcopy(self.obsd2)]
pt = PowerTransformY(
search_space=None,
observation_features=None,
observation_data=observation_data,
config={"metrics": ["m1", "m2"]},
)
observation_data_tf = pt.transform_observation_data(observation_data, [])
for obsd, obsd_orig in zip(observation_data_tf, [self.obsd1, self.obsd2]):
for i in range(2): # Both metrics should be transformed
self.assertNotAlmostEqual(obsd.means[i], obsd_orig.means[i])
self.assertNotAlmostEqual(
obsd.covariance[i][i], obsd_orig.covariance[i][i]
)
# Untransform the data and make sure the means are the same
observation_data_untf = pt.untransform_observation_data(observation_data_tf, [])
for obsd, obsd_orig in zip(observation_data_untf, [self.obsd1, self.obsd2]):
for i in range(2): # Both metrics should be transformed
self.assertAlmostEqual(obsd.means[i], obsd_orig.means[i])
# NaN covar values remain as NaNs
transformed_obsd_nan = pt.transform_observation_data(
[deepcopy(self.obsd_nan)], []
)[0]
cov_results = np.array(transformed_obsd_nan.covariance)
self.assertTrue(np.all(np.isnan(np.diag(cov_results))))
def testCompareToSklearn(self):
# Make sure the transformed values agree with Sklearn
observation_data = [self.obsd1, self.obsd2, self.obsd3]
y_orig = np.array([data.means[0] for data in observation_data])[:, None]
y1 = PowerTransformer("yeo-johnson").fit(y_orig).transform(y_orig).ravel()
pt = PowerTransformY(
search_space=None,
observation_features=None,
observation_data=deepcopy(observation_data),
config={"metrics": ["m1"]},
)
observation_data_tf = pt.transform_observation_data(observation_data, [])
y2 = [data.means[0] for data in observation_data_tf]
for y1_, y2_ in zip(y1, y2):
self.assertAlmostEqual(y1_, y2_)
def testTransformOptimizationConfig(self):
# basic test
m1 = Metric(name="m1")
objective_m1 = Objective(metric=m1, minimize=False)
oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
tf = PowerTransformY(
search_space=None,
observation_features=None,
observation_data=[self.obsd1, self.obsd2],
config={"metrics": ["m1"]},
)
oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
self.assertEqual(oc_tf, oc)
# Output constraint on a different metric should not transform the bound
m2 = Metric(name="m2")
for bound in [-1.234, 0, 2.345]:
oc = OptimizationConfig(
objective=objective_m1,
outcome_constraints=get_constraint(
metric=m2, bound=bound, relative=False
),
)
oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
self.assertEqual(oc_tf, oc)
# Output constraint on the same metric should transform the bound
objective_m2 = Objective(metric=m2, minimize=False)
for bound in [-1.234, 0, 2.345]:
oc = OptimizationConfig(
objective=objective_m2,
outcome_constraints=get_constraint(
metric=m1, bound=bound, relative=False
),
)
oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
oc_true = deepcopy(oc)
tf_bound = (
tf.power_transforms["m1"].transform(np.array(bound, ndmin=2)).item()
)
oc_true.outcome_constraints[0].bound = tf_bound
self.assertEqual(oc_tf, oc_true)
# Relative constraints aren't supported
oc = OptimizationConfig(
objective=objective_m2,
outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True),
)
with self.assertRaisesRegex(
ValueError,
"PowerTransformY cannot be applied to metric m1 since it is "
"subject to a relative constraint.",
):
tf.transform_optimization_config(oc, None, None)
# Support for scalarized outcome constraints isn't implemented
m3 = Metric(name="m3")
oc = OptimizationConfig(
objective=objective_m2,
outcome_constraints=[
ScalarizedOutcomeConstraint(
metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False
)
],
)
with self.assertRaises(NotImplementedError) as cm:
tf.transform_optimization_config(oc, None, None)
self.assertEqual(
"PowerTransformY cannot be used for metric(s) {'m1'} "
"that are part of a ScalarizedOutcomeConstraint.",
str(cm.exception),
)
|
"""
Overall these views are awful.
- Add back option between steps
- Make use of GeneriViews
- ViewClass
- between each step, post to self, validate data and use next to go to next step
"""
from django.shortcuts import render, redirect
from collections import defaultdict, namedtuple
from .forms import SetupForm, score_form_factory, ObservationsForm, TemplateForm, skip_form_factory
from .models import Template, Question, Score, Interview
from base.models import Candidate, Position, Seniority, Subarea, Area
import uuid, csv, xlrd, io
def setup(request):
"""setup screen render function"""
form = SetupForm()
return render(request, 'interview/setup.html', {'form': form})
def evaluation(request):
"""
Evaluation screen render function.
Receive data to create interview from query params.
Store the initial setup data for the interview on the current session.
"""
candidate_name = request.GET['candidate']
candidate, _ = Candidate.objects.get_or_create(name=candidate_name)
request.session['template_id'] = request.GET['template']
request.session['candidate_id'] = candidate.id
request.session['position_id'] = request.GET['position']
template_id = request.GET['template']
template = Template.objects.get(pk=template_id)
questions = template.questions.all()
formatter = lambda q: f'score_{q.id}'
score_field_names = [formatter(question) for question in questions]
ScoreForm = score_form_factory(score_field_names)
score_form = ScoreForm()
checkbox_formatter = lambda q: f'skip_{q.id}'
skip_field_names = [checkbox_formatter(question) for question in questions]
SkipForm = skip_form_factory(skip_field_names)
skip_form = SkipForm()
areas = defaultdict(list)
for question in questions:
area = question.subarea.area
areas[area.area].append(question)
areas = {area: [(i + 1, question, score_form[formatter(question)], skip_form[checkbox_formatter(question)])
for i, question in enumerate(questions)]
for area, questions in areas.items()}
return render(request, 'interview/evaluation.html', {'areas': areas})
def observations(request):
store_scores(request)
return render(request, 'interview/observations.html', {
'form': ObservationsForm()
})
def review(request):
comments = request.POST['comments']
interviewer_score = request.POST['interviewer_score']
request.session['comments'] = comments
request.session['interviewer_score'] = interviewer_score
template_id = request.session['template_id']
candidate_id = request.session['candidate_id']
position_id = request.session['position_id']
position = Position.objects.get(pk=position_id)
candidate = Candidate.objects.get(pk=candidate_id)
template = Template.objects.get(pk=template_id)
question_scores = [(Question.objects.get(pk=pk), score)
for pk, score in request.session['scores']]
return render(request, 'interview/review.html', {
'question_scores': question_scores,
'comments': comments,
'position': position,
'candidate': candidate,
'template': template,
'interviewer_score': interviewer_score,
})
def conclusion(request):
template_id = request.session['template_id']
candidate_id = request.session['candidate_id']
position_id = request.session['position_id']
interview = Interview()
interview.position = Position.objects.get(pk=position_id)
interview.candidate = Candidate.objects.get(pk=candidate_id)
interview.interviewer = request.user
interview.comments = request.session['comments']
interview.interviewer_score = request.session['interviewer_score']
interview.save()
question_scores = [(Question.objects.get(pk=pk), score)
for pk, score in request.session['scores']]
for question, score in question_scores:
Score(question=question, score=score, interview=interview).save()
return redirect('viz:detail', interview.id)
def template_upload(request):
if request.POST:
f = TemplateForm(request.POST, request.FILES)
f.is_valid()
template = Template()
template.name = f.cleaned_data['name']
template.description = f.cleaned_data['description']
template.owner = request.user
uploaded_file = f.cleaned_data['csv']
file_name = f'/tmp/{str(uuid.uuid4())}.xlsx'
with open(file_name, 'wb+') as f:
for chunk in uploaded_file.chunks():
f.write(chunk)
rows = excel_to_dicts(file_name)
questions = [add_question_from_template(row) for row in rows]
template.save()
template.questions.set(questions)
template.save()
return redirect('viz:list')
else:
form = TemplateForm()
return render(request, 'interview/template_upload.html', {
'form': form
})
def excel_to_dicts(file):
book = xlrd.open_workbook(file)
sheet = book.sheet_by_index(0)
labels = [cell.value for cell in sheet.row(0)]
rows = [{label: cell.value for label, cell in zip(labels, row)}
for row in list(sheet.get_rows())[1:]]
return rows
def add_question_from_template(row):
#TODO should be a get as bands are static
seniority, _ = Seniority.objects.get_or_create(seniority=row['Senioridade'])
subarea, _ = Subarea.objects.get_or_create(subarea=row['Subarea'])
area, _ = Area.objects.get_or_create(area=row['Area'])
if subarea.area != area:
# TODO assert subarea.area = area, if it isn't throw error
subarea.area = area
subarea.save()
question_body = row['Questao']
answer = row['Resposta']
weight = row['Peso']
question, created = Question.objects.update_or_create(question=question_body, defaults={
'answer': answer,
'weight': weight,
'subarea': subarea,
'seniority': seniority
})
return question
def store_scores(request):
"""
Store score for each question in session object, does not store score
for skipped questions
"""
remove_prefix = lambda key: int(key.split('_')[1])
skip_pks = [remove_prefix(key) for key, _ in request.POST.items()
if 'skip_' in key]
scores = {remove_prefix(key): int(score)
for key, score in request.POST.items() if 'score_' in key}
request.session['scores'] = [(pk, score) for pk, score in scores.items()
if pk not in skip_pks]
|
# Generated by Django 2.2 on 2021-07-01 02:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20210701_0707'),
]
operations = [
migrations.AlterField(
model_name='category',
name='category_name',
field=models.CharField(db_column='Name', max_length=100, unique=True),
),
migrations.AlterField(
model_name='owner',
name='is_staff',
field=models.BooleanField(default=True),
),
]
|
from argparse import Namespace
from typing import Tuple, Callable
import torch.nn as nn
import torch
from torch import Tensor
from .learnable import Scale, Balance
from .registry import register
@register("linear_residual")
class LinearResidual(nn.Module):
def __init__(self, weight: float,
learnable_weight: bool,
weighting_type: str,
layer: Callable):
super().__init__()
assert weighting_type in ['shortcut', 'residual'], "weighting type must be one of 'shortcut' or 'residual'"
self.weighting_type = weighting_type
self.layer = layer
self.scale = Scale(weight)
self.balance = Balance()
def forward(self, x: Tensor) -> Tensor:
if self.weighting_type == 'shortcut':
return self.balance(self.layer(x),
self.scale(x))
elif self.weighting_type == 'residual':
return self.balance(self.scale(self.layer(x)), x)
else:
raise ValueError
@register("mlp")
class MLP(nn.Module):
def __init__(self, in_features: int,
hidden_features: int=None,
out_features: int=None,
act_layer: Callable=nn.GELU,
drop: float=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
import os
from flask import Flask, render_template, redirect, request
from flask_dropzone import Dropzone
from convert import convert_to_line
app = Flask(__name__)
dropzone = Dropzone(app)
filename = None
dir_path = os.path.dirname(os.path.realpath(__file__))
input_path = os.path.join(dir_path, 'static/input')
app.config.update(
UPLOADED_PATH=input_path,
# Flask-Dropzone config:
DROPZONE_ALLOWED_FILE_TYPE='image',
DROPZONE_MAX_FILE_SIZE=3,
DROPZONE_MAX_FILES=1
)
app.config['DROPZONE_REDIRECT_VIEW'] = 'coloring'
app.secret_key = 'development key'
@app.route('/')
@app.route('/main', methods=['POST', 'GET'])
def main():
global filename
file = None
if request.method == 'POST':
f = request.files.get('file')
file = f.save(os.path.join(app.config['UPLOADED_PATH'], f.filename))
filename = f.filename
convert_to_line(filename)
return render_template('main.html')
@app.route('/github')
def github():
return redirect('https://github.com/7-B/yoco')
@app.route('/coloring', methods=['POST', 'GET'])
def coloring():
global filename
try:
filename = filename.split('/')[-1]
filename = filename.split('.')[0] + '.svg'
filename = os.path.join('output/',filename)
except Exception as ex: # 에러 종류Exception:
print(ex)
return render_template('coloring.html', file_name = filename)
@app.after_request
def add_header(r):
"""
캐시 비워주는 함수
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
|
import os
import sys
from helperFunctions import *
foldersToCreate = ['SummedSystematics', 'SingleSystematicResults', 'UEsubtractedJetResults', 'Efficiencycorrected', 'Efficiencycorrected/PDF']
folderEffCorrection = 'Efficiencycorrected'
if len(sys.argv) < 2:
print('Provide name of analysis folder')
exit()
analysisFolder=sys.argv[1]
systematicDay=sys.argv[2]
#### Read config ###
config = getConfig(analysisFolder)
config['analysisFolder'] = analysisFolder
#### Produce correction factor parameters ###
parameters = list(())
for spec in ('Electron', 'Muon', 'Pion', 'Kaon', 'Proton'):
for charge in ('neg', 'pos'):
configName = 'FS_Parameters_' + spec + '_' + charge
parameters.append('-'.join(config[configName]))
arguments = {
'effFile' : config['analysisFolder'] + "/Data/MCData/" + config['efficiencyFileNamePattern'].format('Jets_Inclusive'),
'outputfile' : config['analysisFolder'] + "/Data/MCData/fastSimulationParameters" + "_" + config['MCRunName'] + ".root",
'parameters' : parametersString
}
callRootMacro("FitCorrFactors", arguments)
#### Produce Corrections factors of full MC run
arguments = {
'pathNameEfficiency' : config['analysisFolder'] + "/Data/MCData/" + config['efficiencyFileNamePattern'].format('Jets'),
'outfileName' : config['pathMCCorrectionFile'],
}
callRootMacro("createFileForBbBCorrections", arguments) #TODO: Give observables, jetPts as argument, check if it can be merged with writeOutCorrectionFiles
#### Write correction files ###
for eff in config['MCVariationsEff']:
varFolder="Eff" + eff + "_Res100/"
arguments = {
'effFilePath': config['analysisFolder'] + "/Data/MCData/" + varFolder + config['efficiencyFileNamePattern'].format('Jets'),
'outFilePath' : config['analysisFolder'] + "/Data/MCData/",
'addToFile' : varFolder
}
callRootMacro("writeOutCorrectionFiles", arguments) #TODO: Give observables, jetPts as argument (also below). Check if this triple call can be simplified
for res in config['MCVariationsRes']:
varFolder="Eff100" + "_Res" + res + "/"
arguments = {
'effFilePath': config['analysisFolder'] + "/Data/MCData/" + varFolder + config['efficiencyFileNamePattern'].format('Jets'),
'outFilePath' : config['analysisFolder'] + "/Data/MCData/",
'addToFile' : varFolder
}
callRootMacro("writeOutCorrectionFiles", arguments)
for varFolder in config['MCVariationsLowPt']:
arguments = {
'effFilePath': config['analysisFolder'] + "/Data/MCData/" + varFolder + config['efficiencyFileNamePattern'].format('Jets'),
'outFilePath' : config['analysisFolder'] + "/Data/MCData/",
'addToFile' : varFolder
}
callRootMacro("writeOutCorrectionFiles", arguments)
#### Produce correction files for fast simulation ###
arguments = {
'effFilePath': config['analysisFolder'],
'outFilePath' : config['analysisFolder'] + "/MCSystematicsFiles",
'addToFile' : varFolder
}
callRootMacro("sysErrorsPythiaFastJet_new", arguments) #TODO: Give jetPtString etc.
#### Read systematic variables ###
systematics = getSystematicConfig(analysisFolder)
#### Make result folders ###
for resFolder in (foldersToCreate):
os.makedirs(analysisFolder + '/' + resFolder, exist_ok = True)
#### Run individual systematic Error Estimation
if config['doInclusive'] == 1:
print('Do inclusive analysis')
jetString = 'Jets_Inclusive'
runSystematicProcess(config['systematicsInclusive'], systematics, config, jetString, systematicDay)
runCalculateEfficiency(jetString, config, systematicDay, systematicDay)
if config['doJets'] == 1:
print('Do jet analysis')
jetString = 'Jets'
runSystematicProcess(config['systematicsJets'], systematics, config, jetString, systematicDay)
runCalculateEfficiency(jetString, config, systematicDay, systematicDay)
if config['doUE'] == 1:
print('Do UE analysis')
jetString = 'Jets_UE'
runSystematicProcess(config['systematicsUE'], systematics, config, jetString, systematicDay)
runCalculateEfficiency(jetString, config, systematicDay, systematicDay)
#### Subtract Underlying event ###
arguments = {
'jetFilePattern' : analysisFolder + "/Efficiencycorrected/output_EfficiencyCorrection_outputSystematicsTotal_SummedSystematicErrors_Jets_%s__%s%s__" + systematicDay + "__" + systematicDay + ".root",
'ueFilePattern' : analysisFolder + "/Efficiencycorrected/output_EfficiencyCorrection_outputSystematicsTotal_SummedSystematicErrors_Jets_UE_%s__%s%s__" + systematicDay + "__" + systematicDay + ".root",
'jetPtStepsString' : config['jetPtString'],
'centStepsString' : config['centString'],
'modesInputString' : config['modesJetsString'],
'outputFilePattern' : analysisFolder + "/UEsubtractedJetResults/output_EfficiencyCorrection_outputSystematicsTotal_SummedSystematicErrors_Jets_%s__%s%s__" + systematicDay + "__" + systematicDay + "_UEsubtractedJetResults.root",
}
callRootMacro("SubtractUnderlyingEvent", arguments)
|
"""
MIT License
Copyright (c) 2021 Matthias Konrath
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from timeit import default_timer as timer
def KSA(key_hex, key_length):
"""
Key-scheduling algorithm
"""
# Initialize the variables
j = 0
array_s = []
# Initialize the array
for i in range(256):
array_s.append(i)
# Run the algorithm
for i in range(256):
j = (j + array_s[i] + ord(key_hex[i % key_length])) % 256
tmp = array_s[i]
array_s[i] = array_s[j]
array_s[j] = tmp
return array_s
def PRGA(array_s, payload_length):
"""
Pseudo-random generation algorithm
"""
# Initialize the variables
j = 0
i = 0
k = 0
keystream = []
# Run the algorithm
for _ in range(payload_length):
i = (i + 1) % 256
j = (j + array_s[i]) % 256
tmp = array_s[i]
array_s[i] = array_s[j]
array_s[j] = tmp
k = array_s[(array_s[i] + array_s[j]) % 256]
keystream.append(k)
return keystream
#### #### #### ####
# MAIN FUNCTION #
#### #### #### ####
print '---- ---- ---- ---- ---- ---- ---- ----'
print ' RC4 Python Implementation '
print '---- ---- ---- ---- ---- ---- ---- ----'
print 'Author: Matthias Konrath'
print 'Email: matthas AT inet-sec.at'
print '\n'
key_str = "ae6c3c41884d35df3ab5adf30f5b2d360938c658341886b0ba510b421e5ab405"
key_hex = key_str.decode("hex")
key_length = len(key_str)/2
payload_str = "3ae280d0d5cd70d8e0f81300dc9031a2e0f8512cb35a7579fd79575cf287c595"
payload_str_hex = payload_str.decode("hex")
payload_str_length = len(payload_str)/2
cyphertext = []
print '[*] KEY:'
print key_str
print ''
print '[*] Payload:'
print payload_str
print ''
array_s = KSA(key_hex, key_length)
print '[*] ARRAY_S:'
print ''.join('{:02x}'.format(x) for x in array_s)
print ''
keystream = PRGA(array_s, payload_str_length)
print '[*] KEYSTREAM:'
print ''.join('{:02x}'.format(x) for x in keystream)
print ''
print '[*] CIPHERTEXT:'
for i in range(payload_str_length):
cyphertext.append(ord(payload_str_hex[i]) ^ keystream[i])
print ''.join('{:02x}'.format(x) for x in cyphertext)
print ''
print '[*] SPEED TEST'
test_size_bytes = 1024 * 1000 * 50 # 50 Megabyte
payload_str = "61" * test_size_bytes
payload_str_hex = payload_str.decode("hex")
payload_str_length = len(payload_str)/2
cyphertext = []
start = timer()
array_s = KSA(key_hex, key_length)
keystream = PRGA(array_s, payload_str_length)
for i in range(payload_str_length):
cyphertext.append(ord(payload_str_hex[i]) ^ keystream[i])
stop = timer()
# For 50 MB --> 658b79745390f3ccd8242c9d0178a018add82ba8d0058adf9dfb3a2b02d188a3
#last_bytes = ''.join('{:02x}'.format(x) for x in cyphertext[-32:])
#print "[*] LAST 32 bytes: " + last_bytes
print '[+] Encrypted {} MB in {:.2f} seconds ({:.2f} MB/s)'.format((test_size_bytes / (1024 * 1000)), (stop - start), float((float(test_size_bytes) / (1024 * 1000)) / float(stop - start)))
|
import numpy as np
from scipy.signal import savgol_filter
class SavitzkyGolov(object):
def __init__(self, window_length:int, poly:int):
self.size=window_length
self.poly=poly
def set_window_size(self, size):
self.size=size
if self.size%2==0:
self.size = self.size+1
def process(self, traj:np.ndarray)->np.ndarray:
x = traj[:,1]
y = traj[:,2]
result = []
for i in range(0, 4):
_x = savgol_filter(x=x,
window_length=self.size,
polyorder=self.poly,
deriv=i,
delta=0.1)
_y = savgol_filter(x=y,
window_length=self.size,
polyorder=self.poly,
deriv=i,
delta=0.1)
result.append(_x)
result.append(_y)
return np.dstack(result)
def filter(self, vector:np.ndarray)->np.ndarray:
vector = np.squeeze(vector)
x = vector[:,0]
y = vector[:,1]
p = np.dstack((np.zeros(len(x)), x, y))
p = np.squeeze(p)
return self.process(p)
def filter2(self, vector:np.ndarray)->np.ndarray:
vector = np.squeeze(vector)
result = [savgol_filter(x=vector[:, i],
window_length=self.size,
polyorder=self.poly)\
for i in range(0, vector.shape[1])]
return np.dstack(result)
|
from pathlib import Path
from typing import Union
__all__ = ("ScreenshotPath",)
class ScreenshotPath:
def __init__(self, dir_: Path) -> None:
self.dir = dir_
self.rerun: Union[int, None] = None
self.timestamp: Union[int, None] = None
self.scenario_path: Union[Path, None] = None
self.scenario_subject: Union[str, None] = None
self.step_name: Union[str, None] = None
self.tab_index: Union[int, None] = None
def resolve(self) -> Path:
dir_path = self.dir
if self.scenario_path is not None:
cwd = Path().resolve()
rel_path = self.scenario_path.relative_to(cwd)
dir_path = self.dir.joinpath(rel_path.with_suffix(""))
file_path = "screenshot"
if self.scenario_subject is not None:
file_path = self.scenario_subject
if self.rerun is not None:
file_path = f"[{self.rerun}]{file_path}"
if self.timestamp is not None:
file_path += f"__{self.timestamp}"
if self.step_name is not None:
file_path += f"__{self.step_name}"
if self.tab_index is not None:
file_path = f"tab{self.tab_index}__{file_path}"
return dir_path / (file_path + ".png")
def __repr__(self) -> str:
path = self.resolve()
return f"{self.__class__.__name__}<{path}>"
|
from django.shortcuts import render
from django.http import HttpResponse
import telebot
from .models import *
from django.views.decorators.csrf import csrf_exempt
import json
import re
from block_io import BlockIo
from datetime import datetime
import requests
from time import sleep
from django.utils.translation import ugettext as _
from django.utils.translation import activate
def is_digit(string):
if string.isdigit():
return True
else:
try:
float(string)
return True
except ValueError:
return False
def isint(s):
try:
int(s)
return True
except ValueError:
return False
@csrf_exempt
def bot(request):
token = '434214801:AAH67lvsi1k3vFnElT8OlCIBhrXpwDMuE5k'
try:
bot = telebot.TeleBot(token)
data = json.loads(request.body.decode('utf-8'))
if 'callback_query' in data.keys():
chat_id = data['callback_query']['from']['id']
callback_data = data['callback_query']['data']
text = ''
file_id = ''
phone_number = ''
elif 'message' in data.keys():
if 'contact' in data['message'].keys():
phone_number = data['message']['contact']['phone_number']
chat_id = data["message"]["chat"]["id"]
callback_data = ''
text = ''
file_id = ""
if 'text' in data['message'].keys():
text = data['message']['text']
chat_id = data['message']['chat']['id']
callback_data = ''
file_id = ''
phone_number = ''
else:
mes = 'Error'
bot.send_message(chat_id, mes)
return HttpResponse('OK')
isOld = BotUser.objects.filter(chat_id=chat_id).count()
settings = Settings.objects.all().first()
blockio_api_keys_btc = settings.blockio_api_keys_btc
blockio_sercret_pin = settings.blockio_sercret_pin
version = 2 # API version
block_io = BlockIo(blockio_api_keys_btc, blockio_sercret_pin, version)
if isOld != 0:
user = BotUser.objects.get(chat_id=chat_id)
if text == '/start':
try:
username = data["message"]["from"]["username"]
except:
username = ""
try:
fio1 = data["message"]["from"]["first_name"]
except:
fio1 = ""
try:
fio2 = data["message"]["from"]["last_name"]
except:
fio2 = ""
fio = fio1 + ' ' + fio2
if isOld == 0:
try:
response = block_io.get_new_address(label=str(chat_id))
if response["status"] == "success":
btc_wallet = response["data"]["address"]
else:
btc_wallet = None
except: # response["status"] == "fail":
try:
response2 = block_io.get_address_by(label=str(chat_id))
if response2["status"] == "success":
btc_wallet = response2["data"]["address"]
else:
btc_wallet = None
except:
bot.send_message(354691583, 'Не выдается кошелек BTC для пополнения')
btc_wallet = None
return HttpResponse('OK')
user = BotUser.objects.create(chat_id=chat_id, fio=fio, username=username, step="home",
btc_wallet=btc_wallet)
else:
BotUser.objects.filter(chat_id=chat_id).update(step="home")
mes = 'Choose language / Выберите язык'
button = telebot.types.ReplyKeyboardMarkup(True, False)
button.row('Русский')
button.row('English')
bot.send_message(chat_id, mes, reply_markup=button)
user.step = 'home'
user.save()
return HttpResponse('OK')
if text == 'Русский' or text == 'English':
if text == 'Русский':
user.lang = 'ru'
user.save()
elif text == 'English':
user.lang = 'en'
user.save()
activate(user.lang)
mes = _('BeeWallet является самым защищенным и самым быстрым кошельком. Отправляя средства внутри BeeWallet вы мгновенно получаете доступ к средствам без необходимости дожидаться подтверждения сети. Пригласите своих друзей в BeeWallet и экономьте до 40% на плате за транзакцию.')
button = telebot.types.ReplyKeyboardMarkup(True, False)
button.row(_('Баланс'), _('История'))
button.row(_('Отправить'), _('Получить'))
button.row(_('Настройки'), _('Поддержка'))
bot.send_message(chat_id, mes, reply_markup=button)
user.step = 'menu'
user.save()
return HttpResponse('OK')
###################
# Установить язык #
###################
activate(user.lang)
###################
if text == _('Главное меню') or callback_data == 'to_home':
mes = _('BeeWallet является самым защищенным и самым быстрым кошельком. Отправляя средства внутри BeeWallet вы мгновенно получаете доступ к средствам без необходимости дожидаться подтверждения сети. Пригласите своих друзей в BeeWallet и экономьте до 40% на плате за транзакцию.')
button = telebot.types.ReplyKeyboardMarkup(True, False)
button.row(_('Баланс'), _('История'))
button.row(_('Отправить'), _('Получить'))
button.row(_('Настройки'), _('Поддержка'))
bot.send_message(chat_id, mes, reply_markup=button)
user.step = 'menu'
user.save()
return HttpResponse('OK')
if user.btc_wallet is None:
try:
response = block_io.get_new_address(label=str(chat_id))
is_Error = False
if response["status"] == "success":
btc_wallet = response["data"]["address"]
else:
is_Error = True
except: # response["status"] == "fail":
try:
response2 = block_io.get_address_by(label=str(chat_id))
if response2["status"] == "success":
btc_wallet = response2["data"]["address"]
else:
is_Error = True
except:
bot.send_message(354691583, 'Не выдается кошелек BTC для пополнения')
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
return HttpResponse('OK')
if is_Error:
bot.send_message(354691583, 'Не выдается кошелек BTC для пополнения')
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
return HttpResponse('OK')
user.btc_wallet = btc_wallet
user.save()
if phone_number != "":
mes = _('Теперь вам могут переводить средства внутри BeeWallet, просто указав ваш номер телефона.')
user.phone_number = phone_number.replace('+', '').replace(')', '').replace('(', '').replace('-',
'').replace(' ',
'')
user.save()
bot.send_message(chat_id, mes)
return HttpResponse('OK')
if text == _('Баланс'):
try:
response = block_io.get_address_balance(labels=str(chat_id))
print(response)
if response["status"] == "success":
available_balance = float(response["data"]["available_balance"])
pending_received_balance = float(response["data"]["pending_received_balance"])
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except Exception as e:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
try:
kurs = json.loads(
requests.get('https://api.coinmarketcap.com/v1/ticker/bitcoin/?convert=%s' % user.currency).text)[
0]['price_' + user.currency.lower()]
s1 = '~ %.2f %s' % (round(float(kurs) * available_balance, 2), user.currency)
s2 = '~ %.2f %s' % (round(float(kurs) * pending_received_balance, 2), user.currency)
except:
s1 = ''
s2 = ''
mes = _('Ваш баланс:\n%.8fBTC\n%s\n\n') % (available_balance, s1)
mes += _('Ожидает подтверждения:\n%.8fBTC\n%s') % (pending_received_balance, s2)
bot.send_message(chat_id, mes)
return HttpResponse('OK')
if text == _('Получить'):
mes = _('Адрес вашего кошелька:\n%s\n\nQR код вашего кошелька:') % user.btc_wallet
bot.send_message(chat_id, mes)
bot.send_photo(chat_id, 'http://www.btcfrog.com/qr/bitcoinPNG.php?address=%s' % user.btc_wallet)
return HttpResponse('OK')
if text == _('Настройки'):
mes = _(
'Здесь вы можете:\n-изменить язык интерфейса\n-привязать телефон, чтобы Вам могли совершать переводы по телефону\n-изменить валюту эквивалента для биткоина')
button = telebot.types.ReplyKeyboardMarkup(True, False)
b1 = telebot.types.KeyboardButton(text=_('Изменить язык'))
b2 = telebot.types.KeyboardButton(text=_('Привязать телефон'), request_contact=True)
b3 = telebot.types.KeyboardButton(text=_('Изменить валюту'))
b4 = telebot.types.KeyboardButton(text=_('Главное меню'))
button.add(b1, b2, b3, b4)
bot.send_message(chat_id, mes, reply_markup=button)
return HttpResponse('OK')
if text == _('Изменить язык'):
mes = 'Choose language / Выберите язык'
button = telebot.types.ReplyKeyboardMarkup(True, False)
button.row('Русский')
button.row('English')
bot.send_message(chat_id, mes, reply_markup=button)
return HttpResponse('OK')
if text == _('Изменить валюту'):
mes = _('Выберите валюту')
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row_width = 5
b1 = telebot.types.InlineKeyboardButton(text='USD', callback_data='currencyUSD')
b2 = telebot.types.InlineKeyboardButton(text='BRL', callback_data='currencyBRL')
b3 = telebot.types.InlineKeyboardButton(text='CAD', callback_data='currencyCAD')
b4 = telebot.types.InlineKeyboardButton(text='CHF', callback_data='currencyCHF')
b5 = telebot.types.InlineKeyboardButton(text='AUD', callback_data='currencyAUD')
keyboard.add(b1, b2, b3, b4, b5)
b1 = telebot.types.InlineKeyboardButton(text='CNY', callback_data='currencyCNY')
b2 = telebot.types.InlineKeyboardButton(text='CZK', callback_data='currencyCZK')
b3 = telebot.types.InlineKeyboardButton(text='DKK', callback_data='currencyDKK')
b4 = telebot.types.InlineKeyboardButton(text='EUR', callback_data='currencyEUR')
b5 = telebot.types.InlineKeyboardButton(text='GBP', callback_data='currencyGBP')
keyboard.add(b1, b2, b3, b4, b5)
b1 = telebot.types.InlineKeyboardButton(text='HKD', callback_data='currencyHKD')
b2 = telebot.types.InlineKeyboardButton(text='HUF', callback_data='currencyHUF')
b3 = telebot.types.InlineKeyboardButton(text='IDR', callback_data='currencyIDR')
b4 = telebot.types.InlineKeyboardButton(text='ILS', callback_data='currencyILS')
b5 = telebot.types.InlineKeyboardButton(text='INR', callback_data='currencyINR')
keyboard.add(b1, b2, b3, b4, b5)
b1 = telebot.types.InlineKeyboardButton(text='JPY', callback_data='currencyJPY')
b2 = telebot.types.InlineKeyboardButton(text='KRW', callback_data='currencyKRW')
b3 = telebot.types.InlineKeyboardButton(text='MXN', callback_data='currencyMXN')
b4 = telebot.types.InlineKeyboardButton(text='MYR', callback_data='currencyMYR')
b5 = telebot.types.InlineKeyboardButton(text='NOK', callback_data='currencyNOK')
keyboard.add(b1, b2, b3, b4, b5)
b1 = telebot.types.InlineKeyboardButton(text='NZD', callback_data='currencyNZD')
b2 = telebot.types.InlineKeyboardButton(text='PHP', callback_data='currencyPHP')
b3 = telebot.types.InlineKeyboardButton(text='RUB', callback_data='currencyRUB')
b4 = telebot.types.InlineKeyboardButton(text='SEK', callback_data='currencySEK')
b5 = telebot.types.InlineKeyboardButton(text='CLP', callback_data='currencyCLP')
keyboard.add(b1, b2, b3, b4, b5)
b1 = telebot.types.InlineKeyboardButton(text='SGD', callback_data='currencySGD')
b2 = telebot.types.InlineKeyboardButton(text='THB', callback_data='currencyTHB')
b3 = telebot.types.InlineKeyboardButton(text='TRY', callback_data='currencyTRY')
b4 = telebot.types.InlineKeyboardButton(text='TWD', callback_data='currencyTWD')
b5 = telebot.types.InlineKeyboardButton(text='ZAR', callback_data='currencyZAR')
keyboard.add(b1, b2, b3, b4, b5)
bot.send_message(chat_id, mes, reply_markup=keyboard)
return HttpResponse('OK')
if callback_data.find('currency') != -1:
currency = callback_data[8:]
user.currency = currency
user.save()
mes = _('Вы изменили валюту эквивалента на %s') % currency
bot.edit_message_text(chat_id=data["callback_query"]["message"]["chat"]["id"],
message_id=data["callback_query"]["message"]["message_id"], text=mes)
return HttpResponse('OK')
if text == _('Поддержка'):
mes = _('Если у вас возникли вопросы или трудности при использовании кошелька, обратитесь в официальный чат @beeqb')
bot.send_message(user.chat_id, mes)
return HttpResponse('OK')
if text == _('История'):
try:
response = block_io.get_transactions(type='sent', addresses=user.btc_wallet)
if response["status"] == "success":
# print(response)
mes = _("Исходящие:\n")
txs = response["data"]["txs"]
if txs == []:
mes += _('Пусто')
for tx in txs:
# print(tx)
mes += _('Дата: %s\n') % datetime.fromtimestamp(int(tx['time'])).strftime('%Y-%m-%d %H:%M:%S')
mes += _('Сумма: %sBTC\n') % tx['amounts_sent'][0]['amount']
# print(mes)
# from_wallet = tx['senders'][0]
# try:
# from_wallet = BotUser.objects.get(btc_wallet = from_wallet).fio #.decode("utf-8")
# except:
# pass
# mes += 'От: %s\n' % from_wallet
to_wallet = tx['amounts_sent'][0]['recipient']
# print(to_wallet)
try:
u = BotUser.objects.get(btc_wallet=to_wallet)
print(u)
to_wallet = u.fio
if u.username is not None or u.username == '':
to_wallet += ' (@%s)' % u.username
if u.phone_number is not None or u.phone_number == '':
to_wallet += ' %s' % u.phone_number
except Exception as e:
print(str(e))
pass
mes += _('Кому: %s\n\n') % to_wallet
bot.send_message(user.chat_id, mes)
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=json.dumps(response))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except Exception as e:
try:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except:
pass
sleep(0.5)
try:
response = block_io.get_transactions(type='received', addresses=user.btc_wallet)
# print(response)
if response["status"] == "success":
mes = _("Входящие:\n")
txs = response["data"]["txs"]
# print(txs)
if txs == []:
mes += _('Пусто')
for tx in txs:
# print(tx)
mes += _('Дата: %s\n') % datetime.fromtimestamp(tx['time']).strftime('%Y-%m-%d %H:%M:%S')
# mes += 'Подтверждений: %s' % tx['confirmations']
mes += _('Сумма: %sBTC\n') % tx['amounts_received'][0]['amount']
from_wallet = tx['senders'][0]
try:
u = BotUser.objects.get(btc_wallet=from_wallet)
print(u)
from_wallet = u.fio
if u.username is not None or u.username == '':
from_wallet += ' (@%s)' % u.username
if u.phone_number is not None or u.phone_number == '':
from_wallet += ' %s' % u.phone_number
except Exception as e:
print(str(e))
pass
mes += _('От: %s\n\n') % from_wallet
# to_wallet = tx['amounts_received'][0]['recipient']
# try:
# to_wallet = BotUser.objects.get(btc_wallet = to_wallet).fio #.decode("utf-8")
# except:
# pass
# mes += 'Кому: %s\n\n' % to_wallet
bot.send_message(chat_id, mes)
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=json.dumps(response))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except Exception as e:
try:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except:
pass
if text == _('Отправить'):
try:
response = block_io.get_address_balance(labels=str(chat_id))
print(response)
if response["status"] == "success":
available_balance = float(response["data"]["available_balance"])
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except Exception as e:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
mes = _('Ваш баланс %.8fBTC.\nВведите адрес кошелька или номер телефона в формате “+12223334455” или имя пользователя в формате "@username" кому хотите перевести деньги.') % available_balance
# mes = 'Введите сумму BTC, которую хотите перевести:'
button = telebot.types.ReplyKeyboardMarkup(True, False)
button.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=button)
user.step = 'input_whom_btc_withdraw'
user.save()
return HttpResponse('OK')
if user.step == 'input_whom_btc_withdraw':
text = text.replace('+', '').replace(')', '').replace('(', '').replace('-', '').replace(' ', '')
match = re.search(r'^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$', text)
if match is not None:
mes = _('Введите сумму BTC, которую хотите перевести:')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
# user.wallet_btc = text
user.step = 'input_amount_btc_withdraw'
user.wallet_btc_withdraw = text
user.save()
return HttpResponse('OK')
else:
if text[0] == '@':
try:
u = BotUser.objects.get(username__iexact=text[1:].lower())
mes = _('Введите сумму BTC, которую хотите перевести:')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
# user.wallet_btc = text
user.step = 'input_amount_btc_withdraw'
user.wallet_btc_withdraw = str(u.chat_id)
user.save()
except:
mes = _('Ошибка! Пользователь с таким именем не зарегистрирован.')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
else:
match = re.search(r'^[0-9]{5,14}$', text)
if match is not None:
try:
u = BotUser.objects.get(phone_number=text)
mes = _('Введите сумму BTC, которую хотите перевести:')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
# user.wallet_btc = text
user.step = 'input_amount_btc_withdraw'
user.wallet_btc_withdraw = str(u.chat_id)
user.save()
except:
mes = _('Ошибка! Пользователь с таким телефоном не зарегистрирован.')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
else:
mes = _('Ошибка! Некорректное значение.')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
return HttpResponse('OK')
if user.step == 'input_amount_btc_withdraw':
text = text.replace(',', '.').replace(' ', '')
if not is_digit(text):
mes = _('Ошибка: Значение должно быть числовым. Введите другую сумму.')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
return HttpResponse('OK')
try:
response = block_io.get_address_balance(labels=str(chat_id))
print(response)
if response["status"] == "success":
available_balance = float(response["data"]["available_balance"])
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
return HttpResponse('OK')
except Exception as e:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
summa = float(text)
if summa < 0.00001:
mes = _('Ошибка! Минимальная сумма - 0.00001BTC. Введите другую сумму.')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
return HttpResponse('OK')
# available_balance = 1
if summa > available_balance:
mes = _('Ошибка! У вас недостаточно средств на счету')
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row(_('Главное меню'))
bot.send_message(chat_id, mes, reply_markup=user_markup)
return HttpResponse('OK')
bot.send_message(chat_id, _('Ожидайте...'))
if isint(user.wallet_btc_withdraw):
u = BotUser.objects.get(chat_id=int(user.wallet_btc_withdraw))
mes = _('Вы уверены, что хотите перевести %.8f BTC пользователю %s?\n') % (summa, str(u.fio))
to_wallet = u.btc_wallet
else:
mes = _('Вы уверены, что хотите перевести %.8f BTC на кошелек %s?\n') % (summa, user.wallet_btc_withdraw)
u = None
to_wallet = user.wallet_btc_withdraw
procent_fee = settings.procent_fee
procent_amount = summa * procent_fee
if procent_amount < 0.00001:
procent_amount = 0.00001
procent_wallet = settings.wallet_for_fee
amounts_str = '%.8f' % summa + ',' + '%.8f' % procent_amount
print(amounts_str)
#print(wallet_str)
wallet_str = to_wallet + ',' + procent_wallet
print(wallet_str)
try:
response = block_io.get_network_fee_estimate(amounts='%.8f' % summa, to_addresses=to_wallet,
priority='low')
print(response)
lowfee = response["data"]["estimated_network_fee"]
except Exception as e:
bot.send_message(chat_id, str(e))
return HttpResponse('OK')
if BotUser.objects.filter(btc_wallet=to_wallet).exists():
medfee = None
highfee = None
else:
try:
sleep(0.5)
response = block_io.get_network_fee_estimate(amounts='%.8f' % summa, to_addresses=to_wallet,
priority='medium')
medfee = response["data"]["estimated_network_fee"]
except Exception as e:
medfee = None
try:
sleep(0.5)
response = block_io.get_network_fee_estimate(amounts='%.8f' % summa, to_addresses=to_wallet,
priority='high')
highfee = response["data"]["estimated_network_fee"]
except Exception as e:
highfee = None
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text=_('Перевести за %sBTC') % lowfee,
callback_data='yes_btc_withdraw_lowfee'))
if medfee is not None:
keyboard.add(telebot.types.InlineKeyboardButton(text=_('Перевести за %sBTC') % medfee,
callback_data='yes_btc_withdraw_medfee'))
if highfee is not None:
keyboard.add(telebot.types.InlineKeyboardButton(text=_('Перевести за %sBTC') % highfee,
callback_data='yes_btc_withdraw_highfee'))
# keyboard.add(telebot.types.InlineKeyboardButton(text=_('Да, переводить!') , callback_data = 'yes_btc_withdraw' ))
keyboard.add(telebot.types.InlineKeyboardButton(text=_('Отмена'), callback_data='to_home'))
bot.send_message(chat_id, mes, reply_markup=keyboard)
user.step = 'home'
user.amount_btc_withdraw = summa
user.save()
return HttpResponse('OK')
if callback_data.find('yes_btc_withdraw') != -1:
try:
if isint(user.wallet_btc_withdraw):
u = BotUser.objects.get(chat_id=int(user.wallet_btc_withdraw))
to_wallet = u.btc_wallet
else:
u = None
to_wallet = user.wallet_btc_withdraw
procent_fee = settings.procent_fee
amount = user.amount_btc_withdraw
procent_amount = amount * procent_fee
if procent_amount < 0.00001:
procent_amount = 0.00001
procent_wallet = settings.wallet_for_fee
amounts_str = '%.8f' % amount + ',' + '%.8f' % procent_amount
wallet_str = to_wallet + ',' + procent_wallet
print(amounts_str)
print(wallet_str)
if callback_data.find('lowfee') != -1:
priority = 'low'
elif callback_data.find('medfee') != -1:
priority = 'medium'
elif callback_data.find('highfee') != -1:
priority = 'high'
try:
#response = block_io.withdraw(amounts=amounts_str, to_addresses=wallet_str, priority=priority)
response = block_io.withdraw(amounts='%.8f' % amount, to_addresses=to_wallet, priority=priority)
except Exception as e:
bot.send_message(chat_id, str(e))
return HttpResponse('OK')
print(response)
if response["status"] == "success":
mes = _('Вы успешно отправили: %sBTC') % user.amount_btc_withdraw
bot.edit_message_text(chat_id=data["callback_query"]["message"]["chat"]["id"],
message_id=data["callback_query"]["message"]["message_id"], text=mes,
reply_markup=keyboard)
if u is not None:
bot.send_message(u.chat_id, _("%s вам перевел %.8fBTC.") % (user.fio, amount))
# History.objects.create(bot_user = user, trans_id = response["data"]["txid"], trans_type = 'withdraw', wallet_type = 'BTC', summa = summa)
else:
Log.objects.create(bot_user=user, type_log="error_btc", text=json.dumps(response))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except Exception as e:
try:
Log.objects.create(bot_user=user, type_log="error_btc", text=str(e))
mes = _('Возникла ошибка. Обратитесь в службу поддержки @beeqb')
bot.send_message(user.chat_id, mes)
except:
pass
user.wallet_btc_withdraw = ''
user.save()
return HttpResponse('OK')
return HttpResponse('OK')
except Exception as e:
print(e)
bot.send_message(354691583, str(e))
return HttpResponse('OK')
|
from pycsp3 import *
n, m, points1, points2 = data
nPairs = len(points1)
# x[i][j] is the value at row i and column j (a boundary is put around the board).
x = VarArray(size=[n + 2, m + 2], dom=lambda i, j: {0} if i in {0, n + 1} or j in {0, m + 1} else range(nPairs + 1))
table = ({(0, ANY, ANY, ANY, ANY)}
| {tuple(ne(v) if k in (i, j) else v for k in range(5)) for i, j in combinations(range(1, 5), 2) for v in range(1, nPairs + 1)})
satisfy(
# putting two occurrences of each value on the board
[x[i, j] == v + 1 for v in range(nPairs) for (i, j) in [points1[v], points2[v]]],
# each cell with a fixed value has exactly one neighbour with the same value
[Count([x[i - 1][j], x[i + 1][j], x[i][j - 1], x[i][j + 1]], value=v + 1) == 1 for v in range(nPairs) for (i, j) in [points1[v], points2[v]]],
# each empty cell either contains 0 or has exactly two neighbours with the same value
[(x[i][j], x[i - 1][j], x[i + 1][j], x[i][j - 1], x[i][j + 1]) in table for i in range(1, n + 1) for j in range(1, m + 1) if
[i, j] not in points1 + points2]
)
minimize(
Sum(x)
)
# Note that the table use (smart) conditions, which make code more compact than:
# table = ({(0, ANY, ANY, ANY, ANY)}
# | {(v, v, v, ne(v), ne(v)) for v in range(1, nPairs + 1)}
# | {(v, v, ne(v), v, ne(v)) for v in range(1, nPairs + 1)}
# | {(v, v, ne(v), ne(v), v) for v in range(1, nPairs + 1)}
# | {(v, ne(v), v, v, ne(v)) for v in range(1, nPairs + 1)}
# | {(v, ne(v), v, ne(v), v) for v in range(1, nPairs + 1)}
# | {(v, ne(v), ne(v), v, v) for v in range(1, nPairs + 1)})
|
AVATAR_AUTO_GENERATE_SIZES = 150
# Control the forms that django-allauth uses
ACCOUNT_FORMS = {
"login": "allauth.account.forms.LoginForm",
"add_email": "allauth.account.forms.AddEmailForm",
"change_password": "allauth.account.forms.ChangePasswordForm",
"set_password": "allauth.account.forms.SetPasswordForm",
"reset_password": "allauth.account.forms.ResetPasswordForm",
"reset_password_from_key": "allauth.account.forms.ResetPasswordKeyForm",
"disconnect": "allauth.socialaccount.forms.DisconnectForm",
# Use our custom signup form
"signup": "ool.users.forms.UserCreationFormX",
}
|
from typing import List
"""
Rotate array equals to flip array 3 times.
First time, flip the whole array.
Second time, flip the left part all the way to length k.
Third time, flip the remaining right part.
Time complexity: O(N)
Space complexity: O(1)
"""
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if k == 0:
return
else:
k = k % len(nums) # if k is greater than length of array.
self.flip(nums, 0, len(nums)-1)
self.flip(nums,0, k-1)
self.flip(nums, k, len(nums)-1)
return
def flip(self, nums, l, r):
if l > r or l < 0 or r > len(nums)-1:
return
while l <= r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
return
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
def _tensors(ts, name):
if isinstance(ts, (list, tuple)):
assert len(ts) > 0, "{} connot be empty".format(name)
for each_t in ts:
assert isinstance(
each_t, paddle.Tensor
) or each_t is None, "Elements of {} must be paddle.Tensor or None".format(
name)
return list(ts)
else:
assert isinstance(ts, paddle.Tensor), "{} must be Tensor".format(name)
return [ts]
def _stack_tensor_or_return_none(origin_list):
assert len(origin_list) > 0, "Can't not stack an empty list"
return paddle.stack(
origin_list, axis=0) if isinstance(origin_list[0],
paddle.Tensor) else None
def _replace_none_with_zero_tensor(t, spec_t):
if t is None:
zero_t = paddle.zeros(shape=spec_t.shape, dtype=spec_t.dtype)
zero_t.stop_gradient = spec_t.stop_gradient
return zero_t
else:
return t
|
from django.apps import AppConfig
class CoderDojoChiConfig(AppConfig):
name = "coderdojochi"
verbose_name = "CoderDojoChi"
def ready(self):
import coderdojochi.signals_handlers
|
print("How many primes do you want?")
N = input()
a = 2
while a < N:
a =a+1
j=2
x=a
while x > j:
if x%j==0 and j!=x:
#print x, " is not prime, divisible by ", j
break
j=j+1
else:
print j,# " is prime, divisible by 1 and itself", x
|
#
# demo_kmeans.py
#
import kmeans
import numpy as np
import matplotlib.pyplot as plt
data = []
number_of_clusters = 5
points_per_cluster = 25
np.random.seed(4)
'''
Generate random clusters.
'''
for _ in range(number_of_clusters):
ages_centroid = np.random.uniform(20.0, 70.0)
income_centroid = np.random.uniform(100000.0, 500000.0)
age = np.random.normal(ages_centroid, 2.0, points_per_cluster)
income = np.random.normal(income_centroid, 10000.0, points_per_cluster)
points = [[age[i], income[i]] for i in range(len(age))]
data.extend(points)
'''
Use k-means to identify the clusters and compute their centroids.
'''
centroids, assignments, data = kmeans.centroids(k=5, data=data, max_iterations=100000)
'''
Assign each data point a color associated to their respective cluster.
'''
colors = ['blue', 'green', 'purple', 'orange', 'pink']
for i in range(len(data)):
data[i].append(colors[assignments[i]])
'''
Plot the data points colored to represent the cluster they belong to.
'''
ages = [x[0] for x in data]
incomes = [x[1] for x in data]
colors = [x[2] for x in data]
centroids_x = [x[0] for x in centroids]
centroids_y = [y[1] for y in centroids]
plt.xlabel('age')
plt.ylabel('income')
plt.scatter(ages, incomes, c=colors)
plt.scatter(centroids_x, centroids_y, c='red')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.