content
stringlengths 5
1.05M
|
|---|
result = float('inf')
for i in range(3):
result = min(len(input()), result)
print(result)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class Inception_v3_version2(nn.Module):
def __init__(self, config):
super(Inception_v3_version2, self).__init__()
if config.pretrained is not None:
inception = models.inception_v3(pretrained=False)
state_dict = torch.load(config.pretrained)
inception.load_state_dict(state_dict)
else:
inception = models.inception_v3(pretrained=True)
self.Conv2d_1a_3x3 = inception.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = inception.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = inception.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = inception.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = inception.Conv2d_4a_3x3
self.Mixed_5b = inception.Mixed_5b
self.Mixed_5c = inception.Mixed_5c
self.Mixed_5d = inception.Mixed_5d
self.Mixed_6a = inception.Mixed_6a
self.Mixed_6b = inception.Mixed_6b
self.Mixed_6c = inception.Mixed_6c
self.Mixed_6d = inception.Mixed_6d
self.Mixed_6e = inception.Mixed_6e
self.lateral_conv = nn.Conv2d(1056, 256, kernel_size=1, stride=1, padding=0)
def forward(self, x):
outputs = []
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
#(87,157)
prev_shape = x.shape[-2:]
outputs.append(x)
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
out = F.interpolate(x, size=prev_shape, mode='bilinear', align_corners=True)
outputs.append(out)
outputs = torch.cat(outputs, dim=1)
outputs = self.lateral_conv(outputs)
return x,outputs
|
#!/usr/bin/env python
import click
import pycomplete
@click.group()
@click.version_option()
def cli():
pass
@cli.command()
@click.argument(
"shell", default=None, required=False, help="The shell to generate script for"
)
@click.pass_context
def completion(ctx, shell=None):
"""Show completion script for given shell"""
completer = pycomplete.Completer(ctx)
print(completer.render(shell))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ScanCodeNotificationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ScanCodeNotification')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RealCostAmount(self):
return self.get_query_params().get('RealCostAmount')
def set_RealCostAmount(self,RealCostAmount):
self.add_query_param('RealCostAmount',RealCostAmount)
def get_SalePrice(self):
return self.get_query_params().get('SalePrice')
def set_SalePrice(self,SalePrice):
self.add_query_param('SalePrice',SalePrice)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_HolderId(self):
return self.get_query_params().get('HolderId')
def set_HolderId(self,HolderId):
self.add_query_param('HolderId',HolderId)
def get_DeviceType(self):
return self.get_query_params().get('DeviceType')
def set_DeviceType(self,DeviceType):
self.add_query_param('DeviceType',DeviceType)
def get_DeviceCode(self):
return self.get_query_params().get('DeviceCode')
def set_DeviceCode(self,DeviceCode):
self.add_query_param('DeviceCode',DeviceCode)
def get_ApplyPrice(self):
return self.get_query_params().get('ApplyPrice')
def set_ApplyPrice(self,ApplyPrice):
self.add_query_param('ApplyPrice',ApplyPrice)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_OuterCode(self):
return self.get_query_params().get('OuterCode')
def set_OuterCode(self,OuterCode):
self.add_query_param('OuterCode',OuterCode)
def get_QueryStr(self):
return self.get_query_params().get('QueryStr')
def set_QueryStr(self,QueryStr):
self.add_query_param('QueryStr',QueryStr)
def get_Phase(self):
return self.get_query_params().get('Phase')
def set_Phase(self,Phase):
self.add_query_param('Phase',Phase)
def get_BizResult(self):
return self.get_query_params().get('BizResult')
def set_BizResult(self,BizResult):
self.add_query_param('BizResult',BizResult)
def get_TaskType(self):
return self.get_query_params().get('TaskType')
def set_TaskType(self,TaskType):
self.add_query_param('TaskType',TaskType)
def get_BrandUserId(self):
return self.get_query_params().get('BrandUserId')
def set_BrandUserId(self,BrandUserId):
self.add_query_param('BrandUserId',BrandUserId)
def get_Sex(self):
return self.get_query_params().get('Sex')
def set_Sex(self,Sex):
self.add_query_param('Sex',Sex)
def get_CostDetail(self):
return self.get_query_params().get('CostDetail')
def set_CostDetail(self,CostDetail):
self.add_query_param('CostDetail',CostDetail)
def get_ProxyUserId(self):
return self.get_query_params().get('ProxyUserId')
def set_ProxyUserId(self,ProxyUserId):
self.add_query_param('ProxyUserId',ProxyUserId)
def get_AlipayOpenId(self):
return self.get_query_params().get('AlipayOpenId')
def set_AlipayOpenId(self,AlipayOpenId):
self.add_query_param('AlipayOpenId',AlipayOpenId)
def get_BizType(self):
return self.get_query_params().get('BizType')
def set_BizType(self,BizType):
self.add_query_param('BizType',BizType)
def get_BrandNick(self):
return self.get_query_params().get('BrandNick')
def set_BrandNick(self,BrandNick):
self.add_query_param('BrandNick',BrandNick)
def get_V(self):
return self.get_query_params().get('V')
def set_V(self,V):
self.add_query_param('V',V)
def get_ChargeTag(self):
return self.get_query_params().get('ChargeTag')
def set_ChargeTag(self,ChargeTag):
self.add_query_param('ChargeTag',ChargeTag)
def get_Age(self):
return self.get_query_params().get('Age')
def set_Age(self,Age):
self.add_query_param('Age',Age)
def get_ChannelId(self):
return self.get_query_params().get('ChannelId')
def set_ChannelId(self,ChannelId):
self.add_query_param('ChannelId',ChannelId)
def get_Cid(self):
return self.get_query_params().get('Cid')
def set_Cid(self,Cid):
self.add_query_param('Cid',Cid)
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test list builder."""
import abc
import ast
import collections.abc
import copy
import json
import logging
import os
from cros.factory.test import i18n
from cros.factory.test.i18n import translation
from cros.factory.test.rules import phase
from cros.factory.test import state
from cros.factory.test.state import TestState
from cros.factory.test.test_lists import test_object as test_object_module
from cros.factory.test.utils import selector_utils
from cros.factory.utils import config_utils
from cros.factory.utils import debug_utils
from cros.factory.utils import shelve_utils
from cros.factory.utils import type_utils
# String prefix to indicate this value needs to be evaluated
EVALUATE_PREFIX = 'eval! '
# String prefix to indicate this value needs to be translated
TRANSLATE_PREFIX = 'i18n! '
# used for loop detection
_DUMMY_CACHE = object()
# logged name for debug_utils.CatchException
_LOGGED_NAME = 'TestListManager'
def MayTranslate(obj, force=False):
"""Translate a string if it starts with 'i18n! ' or force=True.
Args:
force: force translation even if the string does not start with 'i18n! '.
Returns:
A translation dict or string
"""
if isinstance(obj, dict):
return obj
if not isinstance(obj, str):
raise TypeError('not a string')
if obj.startswith(TRANSLATE_PREFIX):
return i18n.Translated(obj[len(TRANSLATE_PREFIX):])
return i18n.Translated(obj) if force else obj
class Options:
"""Test list options.
These may be set by assigning to the options variable in a test list,
e.g.::
test_list.options.auto_run_on_start = False
"""
# Allowable types for an option (defaults to the type of the default
# value).
_types = {}
auto_run_on_start = True
"""If set to True, then the test list is automatically started when
the test harness starts. If False, then the operator will have to
manually start a test."""
retry_failed_on_start = False
"""If set to True, then the failed tests are automatically retried
when the test harness starts. It is effective when auto_run_on_start
is set to True."""
clear_state_on_start = False
"""If set to True, the state of all tests is cleared each time the
test harness starts."""
auto_run_on_keypress = False
"""If set to True, the test harness will perform an auto-run whenever
the operator switches to any test."""
ui_locale = translation.DEFAULT_LOCALE
"""The default UI locale."""
engineering_password_sha1 = None
"""SHA1 hash for a engineering password in the UI. Use None to
always enable engingeering mode.
To enter engineering mode, an operator may press Ctrl-Alt-0 and
enter this password. Certain special functions in the UI (such as
being able to arbitrarily run any test) will be enabled. Pressing
Ctrl-Alt-0 will exit engineering mode.
In order to keep the password hidden from operator (even if they
happen to see the test list file), the actual password is not stored
in the test list; rather, a hash is. To generate the hash, run:
.. parsed-literal::
echo -n `password` | sha1sum
For example, for a password of ``test0000``, run::
echo -n test0000 | sha1sum
This will display a hash of ``266abb9bec3aff5c37bd025463ee5c14ac18bfca``,
so you should set::
test.list.options.engineering_password_sha1 = \
'266abb9bec3aff5c37bd025463ee5c14ac18bfca'
"""
_types['engineering_password_sha1'] = (type(None), str)
sync_event_log_period_secs = None
"""Send events to the factory server when it is reachable at this
interval. Set to ``None`` to disable."""
_types['sync_event_log_period_secs'] = (type(None), int)
update_period_secs = None
"""Automatically check for updates at the given interval. Set to
``None`` to disable."""
_types['update_period_secs'] = (type(None), int)
stop_on_failure = False
"""Whether to stop on any failure."""
hooks_class = 'cros.factory.goofy.hooks.Hooks'
"""Hooks class for the factory test harness. Defaults to a dummy class."""
testlog_hooks = 'cros.factory.testlog.hooks.Hooks'
"""Hooks class for Testlog event. Defaults to a dummy class."""
phase = None
"""Name of a phase to set. If None, the phase is unset and the
strictest (PVT) checks are applied."""
_types['phase'] = (type(None), str)
dut_options = {}
"""Options for DUT target. Automatically inherits from parent node.
Valid options include::
{'link_class': 'LocalLink'}, # To run tests locally.
{'link_class': 'ADBLink'}, # To run tests via ADB.
{'link_class': 'SSHLink', 'host': TARGET_IP}, # To run tests over SSH.
See :py:attr:`cros.factory.device.device_utils` for more information."""
plugin_config_name = 'goofy_plugin_chromeos'
"""Name of the config to be loaded for running Goofy plugins."""
_types['plugin_config_name'] = (type(None), str)
read_device_data_from_vpd_on_init = True
"""Read device data from VPD in goofy._InitStates()."""
skipped_tests = {}
"""A list of tests that should be skipped.
The content of ``skipped_tests`` should be::
{
"<phase>": [ <pattern> ... ],
"<run_if expr>": [ <pattern> ... ]
}
For example::
{
'PROTO': [
'SMT.AudioJack',
'SMT.SpeakerDMic',
'*.Fingerprint'
],
'EVT': [
'SMT.AudioJack',
],
'not device.component.has_touchscreen': [
'*.Touchscreen'
],
'device.factory.end_SMT': [
'SMT'
]
}
If the pattern starts with ``*``, then it will match for all tests with same
suffix. For example, ``*.Fingerprint`` matches ``SMT.Fingerprint``,
``FATP.FingerPrint``, ``FOO.BAR.Fingerprint``. But it does not match for
``SMT.Fingerprint_2`` (Generated ID when there are duplicate IDs).
"""
waived_tests = {}
"""Tests that should be waived according to current phase.
See ``skipped_tests`` for the format"""
def CheckValid(self):
"""Throws a TestListError if there are any invalid options."""
# Make sure no errant options, or options with weird types,
# were set.
default_options = Options()
errors = []
for key in sorted(self.__dict__):
if not hasattr(default_options, key):
errors.append('Unknown option %s' % key)
continue
value = getattr(self, key)
allowable_types = Options._types.get(
key, [type(getattr(default_options, key))])
if not any(isinstance(value, x) for x in allowable_types):
errors.append('Option %s has unexpected type %s (should be %s)' %
(key, type(value), allowable_types))
if errors:
raise type_utils.TestListError('\n'.join(errors))
def ToDict(self):
"""Returns a dict containing all values of the Options.
This include default values for keys not set on the Options.
"""
result = {
k: v
for k, v in self.__class__.__dict__.items() if k[0].islower()
}
result.update(self.__dict__)
return result
class FactoryTestList(test_object_module.FactoryTest):
"""The root node for factory tests.
Properties:
path_map: A map from test paths to FactoryTest objects.
source_path: The path to the file in which the test list was defined,
if known. For new-style test lists only.
"""
def __init__(self, subtests, state_instance, options, test_list_id,
label=None, finish_construction=True, constants=None):
"""Constructor.
Args:
subtests: A list of subtests (FactoryTest instances).
state_instance: The state instance to associate with the list.
This may be left empty and set later.
options: A TestListOptions object. This may be left empty
and set later (before calling FinishConstruction).
test_list_id: An optional ID for the test list. Note that this is
separate from the FactoryTest object's 'id' member, which is always
None for test lists, to preserve the invariant that a test's
path is always starts with the concatenation of all 'id's of its
ancestors.
label: An optional label for the test list.
finish_construction: Whether to immediately finalize the test
list. If False, the caller may add modify subtests and options and
then call FinishConstruction().
constants: A type_utils.AttrDict object, which will be used to resolve
'eval! ' dargs. See test.test_lists.manager.ITestList.ResolveTestArgs
for how it is used.
"""
super(FactoryTestList, self).__init__(_root=True, subtests=subtests)
self.state_instance = state_instance
self.subtests = list(filter(None, type_utils.FlattenList(subtests)))
self.path_map = {}
self.root = self
self.test_list_id = test_list_id
self.state_change_callback = None
self.options = options
self.label = label
self.source_path = None
self.constants = type_utils.AttrDict(constants or {})
if finish_construction:
self.FinishConstruction()
def FinishConstruction(self):
"""Finishes construction of the test list.
Performs final validity checks on the test list (e.g., resolve duplicate
IDs, check if required tests exist) and sets up some internal data
structures (like path_map). This must be invoked after all nodes and
options have been added to the test list, and before the test list is used.
If finish_construction=True in the constructor, this is invoked in
the constructor and the caller need not invoke it manually.
When this function is called, self.state_instance might not be set
(normally, it is set by goofy **after** FinishConstruction is called).
Raises:
TestListError: If the test list is invalid for any reason.
"""
self._init(self.test_list_id + ':', self.path_map)
# Resolve require_run paths to the actual test objects.
for test in self.Walk():
for requirement in test.require_run:
requirement.test = self.LookupPath(
self.ResolveRequireRun(test.path, requirement.path))
if not requirement.test:
raise type_utils.TestListError(
"Unknown test %s in %s's require_run argument (note "
'that full paths are required)'
% (requirement.path, test.path))
self.options.CheckValid()
self._check()
@staticmethod
def ResolveRequireRun(test_path, requirement_path):
"""Resolve the test path for a requirement in require_run.
If the path for the requirement starts with ".", then it will be
interpreted as relative path to parent of test similar to Python's relative
import syntax.
For example:
test_path | requirement_path | returned path
-----------+------------------+---------------
a.b.c.d | e.f | e.f
a.b.c.d | .e.f | a.b.c.e.f
a.b.c.d | ..e.f | a.b.e.f
a.b.c.d | ...e.f | a.e.f
"""
if requirement_path.startswith('.'):
while requirement_path.startswith('.'):
test_path = shelve_utils.DictKey.GetParent(test_path)
requirement_path = requirement_path[1:]
requirement_path = shelve_utils.DictKey.Join(test_path, requirement_path)
return requirement_path
def GetAllTests(self):
"""Returns all FactoryTest objects."""
return list(self.path_map.values())
def GetStateMap(self):
"""Returns a map of all FactoryTest objects to their TestStates."""
# The state instance may return a dict (for the XML/RPC proxy)
# or the TestState object itself. Convert accordingly.
return dict(
(self.LookupPath(k), TestState.FromDictOrObject(v))
for k, v in self.state_instance.GetTestStates().items())
def LookupPath(self, path):
"""Looks up a test from its path."""
if ':' not in path:
path = self.test_list_id + ':' + path
return self.path_map.get(path, None)
def _UpdateTestState(self, path, **kwargs):
"""Updates a test state, invoking the state_change_callback if any.
Internal-only; clients should call update_state directly on the
appropriate TestState object.
"""
ret, changed = self.state_instance.UpdateTestState(path=path, **kwargs)
if changed and self.state_change_callback:
self.state_change_callback( # pylint: disable=not-callable
self.LookupPath(path), ret)
return ret
def ToTestListConfig(self, recursive=True):
"""Output a JSON object that is a valid test_lists.schema.json object."""
config = {
'inherit': [],
'label': self.label,
'options': self.options.ToDict(),
'constants': dict(self.constants),
}
if recursive:
config['tests'] = [subtest.ToStruct() for subtest in self.subtests]
return config
def __repr__(self, recursive=False):
if recursive:
return json.dumps(self.ToTestListConfig(recursive=True), indent=2,
sort_keys=True, separators=(',', ': '))
return json.dumps(self.ToTestListConfig(recursive=False), sort_keys=True)
class ITestList(metaclass=abc.ABCMeta):
"""An interface of test list object."""
# Declare instance variables to make __setattr__ happy.
_checker = None
def __init__(self, checker):
self._checker = checker
@abc.abstractmethod
def ToFactoryTestList(self):
"""Convert this object to a FactoryTestList object.
Returns:
:rtype: cros.factory.test.test_lists.test_list.FactoryTestList
"""
raise NotImplementedError
def CheckValid(self):
"""Check if this can be convert to a FactoryTestList object."""
if not self.ToFactoryTestList():
raise type_utils.TestListError('Cannot convert to FactoryTestList')
def __getattr__(self, name):
"""Redirects attribute lookup to ToFactoryTestList()."""
logging.debug('getting: %s', name)
return getattr(self.ToFactoryTestList(), name)
def __setattr__(self, name, value):
# Can only set an attribute that already exists.
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError('cannot set attribute %r' % name)
@abc.abstractproperty
def modified(self):
raise NotImplementedError
def ReloadIfModified(self):
"""Reloads the test list (when self.modified == True)."""
# default behavior, does nothing
return
@abc.abstractproperty
def constants(self):
raise NotImplementedError
def ResolveTestArgs(
self, test_args, dut, station, constants=None, options=None,
locals_=None, state_proxy=None):
self._checker.AssertValidArgs(test_args)
if constants is None:
constants = self.constants
if options is None:
options = self.options
if state_proxy is None:
state_proxy = state.GetInstance()
locals_ = type_utils.AttrDict(locals_ or {})
def ConvertToBasicType(value):
if isinstance(value, collections.abc.Mapping):
return {k: ConvertToBasicType(v) for k, v in value.items()}
if isinstance(value, str):
return value
if isinstance(value, (list, tuple)):
return type(value)(ConvertToBasicType(v) for v in value)
if isinstance(value, collections.abc.Sequence):
return [ConvertToBasicType(v) for v in value]
return value
def ResolveArg(key, value):
if isinstance(value, collections.abc.Mapping):
return {k: ResolveArg('%s[%r]' % (key, k), v)
for k, v in value.items()}
if isinstance(value, collections.abc.Sequence):
if not isinstance(value, str):
return [
ResolveArg('%s[%d]' % (key, i), v) for i, v in enumerate(value)
]
if not isinstance(value, str):
return value
if value.startswith(EVALUATE_PREFIX):
logging.debug('Resolving argument %s: %s', key, value)
expression = value[len(EVALUATE_PREFIX):] # remove prefix
return self.EvaluateExpression(
expression, dut, station, constants, options, locals_, state_proxy)
return MayTranslate(value)
return ConvertToBasicType(
{k: ResolveArg(k, v) for k, v in test_args.items()})
@debug_utils.CatchException(_LOGGED_NAME)
def SetSkippedAndWaivedTests(self):
"""Set skipped and waived tests according to phase and options.
Since SKIPPED status is saved in state_instance, self.state_instance must be
available at this moment. This functions reads skipped_tests and
waived_tests options from self.options, for the format of these options,
please check `cros.factory.test.test_lists.test_list.Options`.
"""
assert self.state_instance is not None
current_phase = self.options.phase
patterns = []
def _AddPattern(pattern, action):
pattern = pattern.split(':')[-1] # To remove test_list_id
if pattern.startswith('*'):
patterns.append((lambda s: s.endswith(pattern[1:]), action))
else:
patterns.append((lambda s: s == pattern, action))
def _CollectPatterns(option, action):
"""Collect enabled patterns from test list options.
Args:
option: this should be `self.options.skipped_tests` or
`self.options.waived_tests`
action: the action that will be passed to _AddPattern
"""
for key in option:
if key in phase.PHASE_NAMES:
if key != current_phase:
continue
else: # Assume key is a run_if expression
if not self._EvaluateRunIf(
run_if=key,
source='test list options',
test_list=self,
default=False):
continue
for pattern in option[key]:
_AddPattern(pattern, action)
def _MarkSkipped(test):
"""Mark a test as skipped.
The test (and its subtests) statuses will become SKIPPED if they were not
PASSED. And test.run_if will become constant false. So Goofy will always
skip it.
"""
test.Skip(forever=True)
def _MarkWaived(test):
"""Mark all test and its subtests as waived.
subtests should also be waived, so that subtests will become
FAILED_AND_WAIVED when failed. And the status will be propagated to
parents (this test).
"""
test.Waive()
_CollectPatterns(self.options.skipped_tests, _MarkSkipped)
_CollectPatterns(self.options.waived_tests, _MarkWaived)
for test_path, test in self.path_map.items():
test_path = test_path.split(':')[-1] # To remove test_list_id
for match, action in patterns:
if match(test_path):
action(test)
@staticmethod
def EvaluateExpression(expression, dut, station, constants, options, locals_,
state_proxy):
namespace = {
'dut': dut,
'station': station,
'constants': constants,
'options': options,
'locals': locals_,
'state_proxy': state_proxy,
'device': state_proxy.data_shelf.device, }
syntax_tree = ast.parse(expression, mode='eval')
syntax_tree = NodeTransformer_AddGet(['device']).visit(syntax_tree)
code_object = compile(syntax_tree, '<string>', 'eval')
return eval(code_object, namespace) # pylint: disable=eval-used
@staticmethod
def EvaluateRunIf(test, test_list):
"""Evaluate the run_if value of this test.
Evaluates run_if argument to decide skipping the test or not. If run_if
argument is not set, the test will never be skipped.
Args:
test: a FactoryTest object whose run_if will be checked
test_list: the test list which is currently running, will get
state_instance and constants from it.
Returns:
True if this test should be run, otherwise False
"""
return ITestList._EvaluateRunIf(
test.run_if, test.path, test_list, default=True)
@staticmethod
def _EvaluateRunIf(run_if, source, test_list, default):
"""Real implementation of EvaluateRunIf.
If anything went wrong, `default` will be returned.
"""
if not isinstance(run_if, str):
# run_if is not a function, not a string, just return default value
return default
state_instance = test_list.state_instance
namespace = {
'device': selector_utils.DataShelfSelector(
state_instance, key='device'),
'constants': selector_utils.DictSelector(value=test_list.constants),
}
try:
syntax_tree = ast.parse(run_if, mode='eval')
syntax_tree = NodeTransformer_AddGet(
['device', 'constants']).visit(syntax_tree)
code_object = compile(syntax_tree, '<string>', 'eval')
return eval(code_object, namespace) # pylint: disable=eval-used
except Exception:
logging.exception('Unable to evaluate run_if %r for %s', run_if, source)
return default
# the following properties are required by goofy
@abc.abstractproperty
def state_instance(self):
raise NotImplementedError
@state_instance.setter
def state_instance(self, state_instance):
raise NotImplementedError
@abc.abstractproperty
def state_change_callback(self):
raise NotImplementedError
@state_change_callback.setter
def state_change_callback(self, state_change_callback):
raise NotImplementedError
class NodeTransformer_AddGet(ast.NodeTransformer):
"""Given a list of names, we will call `Get` function for you.
For example, name_list=['device']::
"device.foo.bar" ==> "device.foo.bar.Get(None)"
where `None` is the default value for `Get` function.
And `device.foo.bar.Get` will still be `device.foo.bar.Get`.
"""
def __init__(self, name_list):
super(NodeTransformer_AddGet, self).__init__()
if not isinstance(name_list, list):
name_list = [name_list]
self.name_list = name_list
def visit_Attribute(self, node):
"""Convert the attribute.
An attribute node will be: `var.foo.bar.baz`, and the node we got is the
last attribute node (that is, we will visit `var.foo.bar.baz`, not
`var.foo.bar` or its prefix). And NodeTransformer will not recursively
process a node if it is processed, so we only need to worry about process a
node twice.
This will fail for code like::
"eval! any(v.baz.Get() for v in [device.foo, device.bar])"
But you can always rewrite it to::
"eval! any(v for v in [device.foo.baz, device.bar.baz])"
So it should be fine.
"""
if isinstance(node.ctx, ast.Load) and node.attr != 'Get':
v = node
while isinstance(v, ast.Attribute):
v = v.value
if isinstance(v, ast.Name) and v.id in self.name_list:
new_node = ast.Call(
func=ast.Attribute(
attr='Get',
value=node,
ctx=node.ctx),
# Use `None` as default value
args=[ast.Name(id='None', ctx=ast.Load())],
kwargs=None,
keywords=[])
ast.copy_location(new_node, node)
return ast.fix_missing_locations(new_node)
return node
class TestList(ITestList):
"""A test list object represented by test list config.
This object should act like a
``cros.factory.test.test_lists.test_list.FactoryTestList`` object.
"""
# Declare instance variables to make __setattr__ happy.
_loader = None
_config = None
_state_instance = None
_state_change_callback = None
# variables starts with '_cached_' will be cleared by ReloadIfModified
_cached_test_list = None
_cached_options = None
_cached_constants = None
def __init__(self, config, checker, loader):
super(TestList, self).__init__(checker)
self._loader = loader
self._config = config
self._cached_test_list = None
self._cached_options = None
self._cached_constants = None
self._state_instance = None
self._state_change_callback = None
def ToFactoryTestList(self):
self.ReloadIfModified()
if self._cached_test_list:
return self._cached_test_list
return self._ConstructFactoryTestList()
@debug_utils.NoRecursion
def _ConstructFactoryTestList(self):
subtests = []
cache = {}
for test_object in self._config['tests']:
subtests.append(self.MakeTest(test_object, cache))
# this might cause recursive call if self.options is not implemented
# correctly. Put it in a single line for easier debugging.
options = self.options
self._cached_test_list = FactoryTestList(
subtests, self._state_instance, options,
test_list_id=self._config.test_list_id,
label=MayTranslate(self._config['label'], force=True),
finish_construction=True,
constants=self.constants)
# Handle override_args
if 'override_args' in self._config:
for key, override in self._config['override_args'].items():
test = self._cached_test_list.LookupPath(key)
if test:
config_utils.OverrideConfig(test.dargs, override)
self._cached_test_list.state_change_callback = self._state_change_callback
self._cached_test_list.source_path = self._config.source_path
if self._state_instance:
# Make sure the state server knows about all the tests, defaulting to an
# untested state.
# TODO(stimim): this code is copied from goofy/goofy.py, we should check
# if we really need both.
for test in self._cached_test_list.GetAllTests():
test.UpdateState(update_parent=False)
return self._cached_test_list
def MakeTest(self,
test_object,
cache,
default_action_on_failure=None,
locals_=None):
"""Convert a test_object to a FactoryTest object."""
test_object = self.ResolveTestObject(
test_object=test_object,
test_object_name=None,
cache=cache)
if locals_ is None:
locals_ = {}
if 'locals' in test_object:
locals_ = config_utils.OverrideConfig(
locals_,
self.ResolveTestArgs(
test_object.pop('locals'),
dut=None,
station=None,
locals_=locals_),
copy_on_write=True)
if not test_object.get('action_on_failure', None):
test_object['action_on_failure'] = default_action_on_failure
default_action_on_failure = test_object.pop('child_action_on_failure',
default_action_on_failure)
kwargs = copy.deepcopy(test_object)
class_name = kwargs.pop('inherit', 'FactoryTest')
subtests = []
for subtest in test_object.get('subtests', []):
subtests.append(self.MakeTest(
subtest, cache, default_action_on_failure, locals_))
# replace subtests
kwargs['subtests'] = subtests
kwargs['dargs'] = kwargs.pop('args', {})
kwargs['locals_'] = locals_
kwargs.pop('__comment', None)
if kwargs.get('label'):
kwargs['label'] = MayTranslate(kwargs['label'], force=True)
# check if expressions are valid.
self._checker.AssertValidArgs(kwargs['dargs'])
if 'run_if' in kwargs and isinstance(kwargs['run_if'], str):
self._checker.AssertValidRunIf(kwargs['run_if'])
return getattr(test_object_module, class_name)(**kwargs)
def ResolveTestObject(self, test_object, test_object_name, cache):
"""Returns a test object inherits all its parents field."""
if test_object_name in cache:
if cache[test_object_name] == _DUMMY_CACHE:
raise type_utils.TestListError(
'Detected loop inheritance dependency of %s' % test_object_name)
return cache[test_object_name]
# syntactic sugar: if a test_object is just a string, it's equivalent to
# {"inherit": string}, e.g.:
# "test_object" === {"inherit": "test_object"}
if isinstance(test_object, str):
resolved = self.ResolveTestObject({'inherit': test_object},
test_object_name, cache)
return resolved
parent_name = test_object.get('inherit', 'FactoryTest')
if parent_name not in self._config['definitions']:
raise type_utils.TestListError(
'%s inherits %s, which is not defined' % (test_object_name,
parent_name))
if parent_name == test_object_name:
# this test object inherits itself, it means that this object is a class
# defined in cros.factory.test.test_lists.test_object
# just save the object and return
cache[test_object_name] = test_object
return test_object
if test_object_name:
cache[test_object_name] = _DUMMY_CACHE
# syntax sugar, if id is not given, set id as test object name.
#
# According to test_object.py, considering I18n, the priority is:
# 1. `label` must be specified, or it should come from pytest_name
# 2. If not specified, `id` comes from label by stripping spaces and dots.
# Resolved id may be changed in _init when there are duplicated id's found
# in same path.
#
# However, in most of the case, test_object_name would be more like an ID,
# for example,
# "ThermalSensors": {
# "pytest_name": "thermal_sensors"
# }
# The label will be derived from pytest_name, "Thermal Sensors", while the
# ID will be test_object_name, "ThermalSensors".
if 'id' not in test_object:
test_object['id'] = test_object_name
parent_object = self._config['definitions'][parent_name]
parent_object = self.ResolveTestObject(parent_object, parent_name, cache)
test_object = config_utils.OverrideConfig(copy.deepcopy(parent_object),
test_object)
test_object['inherit'] = parent_object['inherit']
if test_object_name:
cache[test_object_name] = test_object
return test_object
def ToTestListConfig(self, recursive=True):
if recursive:
return self._config.ToDict()
ret = self._config.ToDict()
ret.pop('tests', None)
return ret
def ReloadIfModified(self):
if not self.modified:
return
self._Reload()
def ForceReload(self):
"""Bypass modification detection, force reload."""
logging.info('Force reloading test list')
self._Reload()
@debug_utils.NoRecursion
def _Reload(self):
logging.debug('reloading test list %s', self._config.test_list_id)
note = {
'name': _LOGGED_NAME
}
try:
new_config = self._loader.Load(self._config.test_list_id)
# make sure the new test list is working, if it's not, will raise an
# exception and self._config will not be changed.
TestList(new_config, self._checker, self._loader).CheckValid()
self._config = new_config
for key in self.__dict__:
if key.startswith('_cached_'):
self.__dict__[key] = None
self.SetSkippedAndWaivedTests()
note['level'] = 'INFO'
note['text'] = ('Test list %s is reloaded.' % self._config.test_list_id)
except Exception:
logging.exception('Failed to reload latest test list %s.',
self._config.test_list_id)
self._PreventReload()
note['level'] = 'WARNING'
note['text'] = ('Failed to reload latest test list %s.' %
self._config.test_list_id)
try:
self._state_instance.AddNote(note)
except Exception:
pass
def _PreventReload(self):
"""Update self._config to prevent reloading invalid test list."""
self._config.UpdateDependTimestamp()
@property
def modified(self):
"""Return True if the test list is considered modified, need to be reloaded.
self._config.timestamp is when was the config last modified, if the config
file or any of config files it inherits is changed after the timestamp, this
function will return True.
Returns:
True if the test list config is modified, otherwise False.
"""
# Note that this method can't catch all kind of potential modification.
# For example, this property won't become `True` if the user add an
# additional test list in /var/factory/config/ to override an existing one.
for config_file, timestamp in self._config.GetDepend().items():
if os.path.exists(config_file):
if timestamp != os.stat(config_file).st_mtime:
return True
elif timestamp is not None:
# the file doesn't exist, and we think it should exist
return True
return False
@property
def constants(self):
self.ReloadIfModified()
if self._cached_constants:
return self._cached_constants
self._cached_constants = type_utils.AttrDict(self._config['constants'])
return self._cached_constants
# the following functions / properties are required by goofy
@property
def options(self):
self.ReloadIfModified()
if self._cached_options:
return self._cached_options
self._cached_options = Options()
class NotAccessable:
def __getattribute__(self, name):
raise KeyError('options cannot depend on options')
resolved_options = self.ResolveTestArgs(
self._config['options'],
constants=self.constants,
options=NotAccessable(),
dut=None,
station=None)
for key, value in resolved_options.items():
setattr(self._cached_options, key, value)
self._cached_options.CheckValid()
return self._cached_options
@property
def state_instance(self):
return self._state_instance
@state_instance.setter
def state_instance(self, state_instance): # pylint: disable=arguments-differ
self._state_instance = state_instance
self.ToFactoryTestList().state_instance = state_instance
@property
def state_change_callback(self):
return self.ToFactoryTestList().state_change_callback
# pylint: disable=arguments-differ
@state_change_callback.setter
def state_change_callback(self, state_change_callback):
self._state_change_callback = state_change_callback
self.ToFactoryTestList().state_change_callback = state_change_callback
|
#!/usr/bin/env python3
from aws_cdk import core
import os
from vpc.vpc_stack import VpcStack
app = core.App()
VpcStack(app, "vpc", env={'account': os.environ['CDK_DEFAULT_ACCOUNT'],
'region': os.environ['CDK_DEFAULT_REGION']})
app.synth()
|
from .env import collect_env
from .logger import get_root_logger
from .lr_scheduler import ClosedFormCosineLRScheduler
from .metric import accuarcy, mIoU
from .losses import BinaryFocalLoss, FocalLoss
from .window_utils import flat_gts, average_preds, flat_paths
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Original.name'
db.delete_column('cropper_original', 'name')
# Deleting field 'Cropped.name'
db.delete_column('cropper_cropped', 'name')
def backwards(self, orm):
# Adding field 'Original.name'
db.add_column('cropper_original', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=255), keep_default=False)
# Adding field 'Cropped.name'
db.add_column('cropper_cropped', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=255), keep_default=False)
models = {
'cropper.cropped': {
'Meta': {'object_name': 'Cropped'},
'h': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'h_display': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cropped'", 'to': "orm['cropper.Original']"}),
'w': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'w_display': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'cropper.original': {
'Meta': {'object_name': 'Original'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_height': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'image_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['cropper']
|
from __future__ import annotations
import e2cnn.group
from e2cnn.group import Representation
from typing import Callable, Any, List, Union, Dict
import numpy as np
__all__ = ["IrreducibleRepresentation"]
class IrreducibleRepresentation(Representation):
def __init__(self,
group: e2cnn.group.Group,
name: str,
representation: Union[Dict[Any, np.ndarray], Callable[[Any], np.ndarray]],
size: int,
sum_of_squares_constituents: int,
supported_nonlinearities: List[str],
character: Union[Dict[Any, float], Callable[[Any], float]] = None,
**kwargs
):
"""
Describes an "*irreducible representation*" (*irrep*).
It is a subclass of a :class:`~e2cnn.group.Representation`.
Irreducible representations are the building blocks into which any other representation decomposes under a
change of basis.
Indeed, any :class:`~e2cnn.group.Representation` is internally decomposed into a direct sum of irreps.
Args:
group (Group): the group which is being represented
name (str): an identification name for this representation
representation (dict or callable): a callable implementing this representation or a dictionary
mapping each of the group's elements to its representation.
size (int): the size of the vector space where this representation is defined (i.e. the size of the matrices)
sum_of_squares_constituents (int): the sum of the squares of the multiplicities of pairwise distinct
irreducible constituents of the character of this representation over a non-splitting field
supported_nonlinearities (list): list of nonlinearitiy types supported by this representation.
character (callable or dict, optional): a callable returning the character of this representation for an
input element or a dictionary mapping each element to its character.
**kwargs: custom attributes the user can set and, then, access from the dictionary
in :attr:`e2cnn.group.Representation.attributes`
Attributes:
sum_of_squares_constituents (int): the sum of the squares of the multiplicities of pairwise distinct
irreducible constituents of the character of this representation over a non-splitting field (see
`Character Orthogonality Theorem <https://groupprops.subwiki.org/wiki/Character_orthogonality_theorem#Statement_over_general_fields_in_terms_of_inner_product_of_class_functions>`_
over general fields)
"""
super(IrreducibleRepresentation, self).__init__(group,
name,
[name],
np.eye(size),
supported_nonlinearities,
representation=representation,
character=character,
**kwargs)
self.irreducible = True
self.sum_of_squares_constituents = sum_of_squares_constituents
|
import os
from sleap.io.videowriter import VideoWriter, VideoWriterOpenCV
def test_video_writer(tmpdir, small_robot_mp4_vid):
out_path = os.path.join(tmpdir, "clip.avi")
# Make sure video writer works
writer = VideoWriter.safe_builder(
out_path,
height=small_robot_mp4_vid.height,
width=small_robot_mp4_vid.width,
fps=small_robot_mp4_vid.fps,
)
writer.add_frame(small_robot_mp4_vid[0][0])
writer.add_frame(small_robot_mp4_vid[1][0])
writer.close()
assert os.path.exists(out_path)
def test_cv_video_writer(tmpdir, small_robot_mp4_vid):
out_path = os.path.join(tmpdir, "clip.avi")
# Make sure OpenCV video writer works
writer = VideoWriterOpenCV(
out_path,
height=small_robot_mp4_vid.height,
width=small_robot_mp4_vid.width,
fps=small_robot_mp4_vid.fps,
)
writer.add_frame(small_robot_mp4_vid[0][0])
writer.add_frame(small_robot_mp4_vid[1][0])
writer.close()
assert os.path.exists(out_path)
|
import glob
import pandas as pd
import os
from config import *
if __name__ == '__main__':
cm = pd.read_csv(os.path.join(DATASET_PATH, 'cm_norm.tsv'), header=0, sep='\t', index_col=0)
if CONDITION_COLUMN:
cm_ = cm.loc[cm[CONDITION_COLUMN] == CONDITION]
cm = cm_
col_cm = list(cm.index)
img_files = glob.glob(TILE_PATH+'/*/*.jpeg')
sorted_img = []
sorted_cm = []
for img in img_files:
id_img = os.path.splitext(os.path.basename(img))[0].replace("-", "_")
for c in col_cm:
id_c = c.replace("x", "_")
if id_img == id_c:
sorted_img.append(img)
sorted_cm.append(c)
cm = cm.reindex(sorted_cm)
df = pd.DataFrame(data={'img':sorted_img,
'cm':sorted_cm,
'label':cm[LABEL_COLUMN]})
df.to_csv(os.path.join(DATASET_PATH, 'dataset.tsv'), sep='\t')
cm.to_csv(os.path.join(DATASET_PATH, "cm_final.tsv"), sep='\t')
|
from django.conf.urls import *
from . import views
urlpatterns = [
url(r'', views.index, name='reports.views.index'),
url(r'staff/', views.staff, name='reports.views.staff'),
url(r'superuser', views.superuser, name='reports.views.superuser'),
]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 16 18:58:10 2018
@author: jakec
"""
import os
import csv
row_list = []
csvpath = os.path.join('raw_data/budget_data_1.csv')
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
row_list.append(row)
file = os.path.join('raw_data/budget_data_2.csv')
with open(file, newline='') as file:
has_header = csv.Sniffer().has_header(file.read(1024))
file.seek(0) # Rewind.
csvreader = csv.reader(file)
if has_header:
next(csvreader) # Skip header row.
for row in csvreader:
row[0] = row[0][:4] + row[0][6:]
row_list.append(row)
months = 0
revenue = 0
max_rev = 0
min_rev = 0
for item in row_list[1:]:
months = months + 1
revenue = revenue + int(item[1])
if int(item[1]) > max_rev:
max_date = item[0]
max_rev = int(item[1])
if int(item[1]) < min_rev:
min_date = item[0]
min_rev = int(item[1])
avg_revenue = round(revenue / months, )
print('')
print('Financial Analysis')
print('---------------')
print('Total Months: ' + str(months))
print('Total Revenue: ' + '$' + str(revenue))
print('Average Revenue Change: ' + '$' + str(avg_revenue))
print('Greatest Increase in Revenue: ' + max_date + ' ' + '($' + str(max_rev) + ')')
print('Greatest Decrease in Revenue: ' + min_date + ' ' + '($' + str(min_rev) + ')')
with open('pybank_output.txt', 'w') as f:
print('', file=f)
print('Financial Analysis', file=f)
print('---------------', file=f)
print('Total Months: ' + str(months), file=f)
print('Total Revenue: ' + '$' + str(revenue), file=f)
print('Average Revenue Change: ' + '$' + str(avg_revenue), file=f)
print('Greatest Increase in Revenue: ' + max_date + ' ' + '($' + str(max_rev) + ')', file=f)
print('Greatest Decrease in Revenue: ' + min_date + ' ' + '($' + str(min_rev) + ')', file=f)
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for madi.utils.evaluation_utils."""
from madi.utils import evaluation_utils
class TestEvaluationUtils:
def test_compute_auc_max(self):
y_actual = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
auc = evaluation_utils.compute_auc(y_actual, y_actual)
assert auc == 1.0
def test_compute_auc_min(self):
y_actual = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
y_predicted = [float(not bool(y)) for y in y_actual]
auc = evaluation_utils.compute_auc(y_actual, y_predicted)
assert auc == 0.0
|
# -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-09-08 14:05:31
LastEditors: TJUZQC
LastEditTime: 2020-09-27 17:26:24
Description: None
'''
import glob
import os
import threading
import cv2
import multiresolutionimageinterface as mir
import numpy as np
from libtiff import TIFF
from matplotlib import pyplot as plt
from PIL import Image
# 从xml标注中得到一个Annotation的边界信息,所得边界比真实边界大200px
def getPositionAndSize(annotation):
X_min = None
Y_min = None
X_max = None
Y_max = None
for coordinate in annotation.getCoordinates():
if coordinate.getX() > X_max or X_max is None:
X_max = coordinate.getX()
if coordinate.getX() < X_min or X_min is None:
X_min = coordinate.getX()
if coordinate.getY() < Y_min or Y_min is None:
Y_min = coordinate.getY()
if coordinate.getY() > Y_max or Y_max is None:
Y_max = coordinate.getY()
return int(X_min)-200, int(Y_min)-200, int(X_max - X_min)+400, int(Y_max - Y_min) + 400
# 切patch
def __getPatch(pathlist, start, end):
print('getting start from {} to {}'.format(start, end))
pathlist = pathlist[start:end] if end != -1 else pathlist[start:]
for path in pathlist:
print(path)
img_name = glob.glob(os.path.join(path, '*.ndpi'))
xml_name = glob.glob(os.path.join(path, '*.xml'))
mask_name = glob.glob(os.path.join(path, '*_mask.tiff'))
assert len(
img_name) == 1, 'failed to get image {} : no image or multi image'.format(img_name)
assert len(
xml_name) == 1, 'failed to get xml label {} : no xml label or multi xml label'.format(xml_name)
assert len(mask_name) == 1, 'failed to get mask {} : no mask or multi mask'.format(
mask_name)
img_name = img_name[0]
xml_name = xml_name[0]
mask_name = mask_name[0]
img_reader = mir.MultiResolutionImageReader()
mask_reader = mir.MultiResolutionImageReader()
img = img_reader.open(img_name)
mask = mask_reader.open(mask_name)
annotation_list = mir.AnnotationList()
xml_repository = mir.XmlRepository(annotation_list)
xml_repository.setSource(xml_name)
xml_repository.load()
# annotation_group = annotation_list.getGroup('Annotation Group 0')
annotations = annotation_list.getAnnotations()
del xml_repository
if not os.path.exists(os.path.join(path, 'patch')):
os.mkdir(os.path.join(path, 'patch'))
if not os.path.exists(os.path.join(path, 'patch', 'imgs')):
os.mkdir(os.path.join(path, 'patch', 'imgs'))
if not os.path.exists(os.path.join(path, 'patch', 'masks')):
os.mkdir(os.path.join(path, 'patch', 'masks'))
for idx, annotation in enumerate(annotations):
x, y, width, height = getPositionAndSize(annotation)
level_0_width, level_0_height = img.getLevelDimensions(0)
level_1_width, level_1_height = img.getLevelDimensions(1)
# x *= level_1_width/level_0_width
# y *= level_1_height/level_0_height
width *= level_1_width/level_0_width
height *= level_1_height/level_0_height
x, y, width, height = int(x), int(y), int(width), int(height)
patch_img = img.getUInt16Patch(x, y, width, height, 1)
patch_img = np.array(patch_img, dtype=np.int8)
patch_img = Image.fromarray(patch_img, mode='RGB')
patch_img.save(os.path.join(
path, 'patch', 'imgs', os.path.splitext(os.path.basename(img_name))[0]+'-{}.png'.format(idx)))
del patch_img
patch_mask = mask.getUInt16Patch(x, y, width, height, 1)
patch_mask = np.array(patch_mask, dtype=np.int8)
patch_mask = Image.fromarray(patch_mask[:, :, 0], mode='L')
patch_mask.save(os.path.join(
path, 'patch', 'masks', os.path.splitext(os.path.basename(img_name))[0]+'-{}.png'.format(idx)))
del patch_mask
def getPatch(pathlist, num_works):
num = int(len(pathlist)/num_works) if int(len(pathlist)/num_works) > 1 else 1
threads = []
for work_idx in range(num_works):
threads.append(threading.Thread(target=__getPatch, args=(
pathlist, work_idx*num, (work_idx+1)*num)))
for t in threads:
t.setDaemon(True)
t.start()
t.join()
print('All threads is done!')
if __name__ == '__main__':
path = 'G:\TJUZQC\DataSet\Beijing-small_cell_lung_cancer-pathology\\2020-01-20 10.39.42'
pathlist = glob.glob(path)
getPatch(pathlist, 6)
|
import torch
import numpy as np
import math
from scripts.config import Config
import cv2
# Math utilities for model creating, training, and testing.
cfg = Config()
def calculate_psnr(img1, img2, mask):
maskSize = cv2.countNonZero(mask.numpy())
print(maskSize)
mse = np.sum( (img2 - img1) ** 2 ) / (maskSize*3) # * 3 is for Channel
if mse == 0:
return 100
PIXEL_MAX = cfg.max_pixel_value
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Input:
images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
strides: [stride_rows, stride_cols]
rates: [dilation_rows, dilation_cols]
Output:
A Tensor
Description:
Extract patches from images and put them in the C output dimension.
padding
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def make_color_wheel():
"""
Input:
None
Output:
colorwheel
Description:
"""
RY, YG, GC, CB, BM, MR = (15, 6, 4, 11, 13, 6)
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG))
colorwheel[col:col + YG, 1] = 255
col += YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB))
colorwheel[col:col + CB, 2] = 255
col += CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col + MR, 0] = 255
return colorwheel
def compute_color(u, v):
"""
Input:
flow:
Output:
Img array:
Description:
"""
h, w = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
# colorwheel = COLORWHEEL
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols + 1] = 1
f = fk - k0
for i in range(np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k0 - 1] / 255
col1 = tmp[k1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))
return img
def same_padding(images, ksizes, strides, rates):
"""
Input:
Output:
Description:
"""
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def flow_to_image(flow):
"""
Input:
flow:
Output:
Img array:
Description:
Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
def reduce_mean(x, axis=None, keepdim=False):
"""
Input:
Output:
Description:
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
"""
Input:
Output:
Description:
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
def random_bbox():
"""
Input:
none
Output:
tuple: (top, left, height, width)
Description:
Generate a random tlhw with configuration.
"""
img_height, img_width, _ = cfg.context_image_shape
h, w = cfg.context_mask_shape
margin_height, margin_width = cfg.context_margin
maxt = img_height - margin_height - h
maxl = img_width - margin_width - w
bbox_list = []
if cfg.mask_batch_same:
t = np.random.randint(margin_height, maxt)
l = np.random.randint(margin_width, maxl)
bbox_list.append((t, l, h, w))
bbox_list = bbox_list * cfg.context_batch_size
else:
for i in range(cfg.context_batch_size):
t = np.random.randint(margin_height, maxt)
l = np.random.randint(margin_width, maxl)
bbox_list.append((t, l, h, w))
return torch.tensor(bbox_list, dtype=torch.int64)
def local_patch(x, bbox_list):
"""
Input:
Output:
Description:
"""
assert len(x.size()) == 4
patches = []
for i, bbox in enumerate(bbox_list):
t, l, h, w = bbox
patches.append(x[i, :, t:t + h, l:l + w])
return torch.stack(patches, dim=0)
def spatial_discounting_mask():
"""
Input:
config: Config should have configuration including HEIGHT, WIDTH,
DISCOUNTED_MASK.
Output:
tf.Tensor: spatial discounting mask
Description:
Generate spatial discounting mask constant.
Spatial discounting mask is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
"""
gamma = cfg.spatial_discounting_mask
height, width = cfg.context_mask_shape
shape = [1, 1, height, width]
if cfg.discounted_mask:
mask_values = np.ones((height, width))
for i in range(height):
for j in range(width):
mask_values[i, j] = max(
gamma ** min(i, height - i),
gamma ** min(j, width - j))
mask_values = np.expand_dims(mask_values, 0)
mask_values = np.expand_dims(mask_values, 0)
else:
mask_values = np.ones(shape)
spatial_discounting_mask_tensor = torch.tensor(mask_values, dtype=torch.float32)
if cfg.use_cuda:
spatial_discounting_mask_tensor = spatial_discounting_mask_tensor.cuda()
return spatial_discounting_mask_tensor
|
# Tests for the hicstuff digest module
# 20190402
from tempfile import NamedTemporaryFile
import os
import pandas as pd
from os.path import join
from hicstuff import digest as hcd
from Bio import SeqIO
import filecmp
def test_write_frag_info():
"""Test generation of fragments_list.txt and info_contigs.txt"""
genome = NamedTemporaryFile(delete=False, mode="w")
seq = "GGAATAGATCAAATGATCCACAGATC"
genome.write(">seq1\n")
genome.write(seq)
genome.close()
out_dir, tigs, frags = "test_data", "test_tigs", "test_frags"
hcd.write_frag_info(
genome.name,
"DpnII",
output_contigs=tigs,
output_frags=frags,
output_dir=out_dir,
)
tigs_df = pd.read_csv(join(out_dir, tigs), delimiter="\t")
frags_df = pd.read_csv(join(out_dir, frags), delimiter="\t")
assert tigs_df.length.tolist()[0] == len(seq)
assert frags_df.start_pos.tolist() == [0, 6, 14, 22]
os.unlink(genome.name)
os.remove(join(out_dir, tigs))
os.remove(join(out_dir, frags))
def test_attribute_fragments():
"""Test the attribution of reads to restriction fragments"""
idx_pairs = NamedTemporaryFile(delete=False)
idx_pairs.close()
restriction_table = {}
for record in SeqIO.parse("test_data/genome/seq.fa", "fasta"):
# Get chromosome restriction table
restriction_table[record.id] = hcd.get_restriction_table(
record.seq, "DpnII"
)
hcd.attribute_fragments(
"test_data/valid.pairs", idx_pairs.name, restriction_table
)
assert filecmp.cmp("test_data/valid_idx.pairs", idx_pairs.name)
|
try:
from unittest2 import TestCase
from unittest2 import skipIf
except ImportError:
from unittest import TestCase
from unittest import skipIf
|
"""
Contains URL patterns for a basic API using `Tastypie`_.
.. _tastypie: https://github.com/toastdriven/django-tastypie
"""
from django.conf.urls.defaults import patterns, include, url
from apis.api import v1_api
urlpatterns = patterns('',
url(r'^api/', include(v1_api.urls)),
)
|
import pytest
import packerlicious.builder as builder
class TestVMwareIsoBuilder(object):
def test_required_fields_missing(self):
b = builder.VMwareIso()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
def test_iso_checksum_mutually_exclusive(self):
b = builder.VMwareIso(
iso_url="/url/to/iso",
iso_checksum_type=builder.VirtualboxIso.MD5,
iso_checksum="my_checksum",
iso_checksum_url="my_checksum_url",
)
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'VMwareIso: only one of the following can be specified: iso_checksum, iso_checksum_url' == str(
excinfo.value)
class TestVMwareVmxBuilder(object):
def test_required_fields_missing(self):
b = builder.VMwareVmx()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
|
import unittest
from scrapqd.client import execute_sync
from tests import MockServer
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = MockServer()
@classmethod
def tearDownClass(cls):
del cls.server
def setUp(self):
self.query = r"""
query test_query($url: String!, $name: GenericScalar!) {
result: fetch(url: $url) {
name: constant(value: $name)
website: link(xpath: "//a[contains(@class, 'site-link')]")
summary: group {
total_emp_expenses: text(xpath: "//*[@id='emp-exp-total']", data_type: INT)
total_shown_expenses: text(xpath: "//*[@id='exp-total']/span[2]", data_type: INT)
total_approved_expenses: text(xpath: "//*[@id='emp-exp-approved']/span[2]", data_type: INT)
}
summary1: group {
total_shown_expenses: regex(xpath: "//*[@id='exp-total']", pattern: "(\\d+)")
}
exp_details: list(xpath: "//div[@class='card']") {
name: text(xpath: "//div[contains(@class,'expense-emp-name')]")
user_id: query_params(xpath: "//a/@href", name: "user")
amount: group {
money: text(xpath: "//h6[contains(@class,'expense-amount')]/span[1]", data_type: INT)
name: text(xpath: "//h6[contains(@class,'expense-amount')]/span[2]")
}
approval_id: attr(xpath: "//button[contains(@class, 'expense-approve')]", name: "id")
}
exp_details_method2: list(xpath: "//div[@class='card']") {
name: text(xpath: "//div[@class='card-title title expense-emp-name']")
}
exp_details_method3: list(xpath: "//div[@class='card']") {
name1: text(xpath: ".//div[@class='card-title title expense-emp-name']")
name2: text(xpath: ".//div[contains(@class,'expense-emp-name')]")
}
}
}
"""
self.expected_result = {
"result": {
"name": "local-testing",
"website": "http://localhost:5000/scrapqd",
"summary": {
"total_emp_expenses": 309,
"total_shown_expenses": 40,
"total_approved_expenses": 4
},
"summary1": {
"total_shown_expenses": [
"40"
]
},
"exp_details": [
{
"name": "Friedrich-Wilhelm, Langern",
"user_id": {
"user": "friwilan0123"
},
"amount": {
"money": 8800,
"name": "egp"
},
"approval_id": "APPROVE-5bbd5c2f-435d-4529-8a5b-f05f1f89db5a"
},
{
"name": "Sebastian, Bien",
"user_id": {
"user": "sb0891"
},
"amount": {
"money": 3365,
"name": "mkd"
},
"approval_id": "APPROVE-cce88426-53cf-4475-9204-32f50268911b"
},
{
"name": "Rosa, Becker",
"user_id": {
"user": "rosbec647"
},
"amount": {
"money": 6700,
"name": "xof"
},
"approval_id": "APPROVE-a3ec6508-2c2c-439d-b090-b10ffef8189e"
},
{
"name": "Ines, Gröttner",
"user_id": {
"user": "inesgro1682"
},
"amount": {
"money": 8427,
"name": "npr"
},
"approval_id": "APPROVE-f8053cc8-9178-4afd-be51-573e749323e7"
},
{
"name": "Clarissa, Bonbach",
"user_id": {
"user": "clarbon1528"
},
"amount": {
"money": 1609,
"name": "fjd"
},
"approval_id": "APPROVE-10b88f2c-82ad-4f5d-bd1b-a03925934f0c"
},
{
"name": "Zbigniew, Stolze",
"user_id": {
"user": "zbisto0543"
},
"amount": {
"money": 8789,
"name": "ern"
},
"approval_id": "APPROVE-c60cf612-50be-437d-9ba8-2492de96e9c4"
},
{
"name": "Ines, Mentzel",
"user_id": {
"user": "inesmen135"
},
"amount": {
"money": 1750,
"name": "srd"
},
"approval_id": "APPROVE-73717d32-2228-48f2-a09f-2e90eeb94056"
},
{
"name": "Rosa, Becker",
"user_id": {
"user": "rosbec098"
},
"amount": {
"money": 7293,
"name": "mga"
},
"approval_id": "APPROVE-2e7ceca7-a40e-4bf5-ab43-f40ec9938b6a"
},
{
"name": "Sigismund, Rosemann",
"user_id": {
"user": "sigros1029"
},
"amount": {
"money": 997,
"name": "lbp"
},
"approval_id": "APPROVE-bb6e32a2-8167-456b-8433-5bb897e65e5d"
},
{
"name": "Edelbert, van der Dussen",
"user_id": {
"user": "edvan1230"
},
"amount": {
"money": 4573,
"name": "azn"
},
"approval_id": "APPROVE-f4fb98be-348a-4171-8d0e-622dcccb67e1"
},
{
"name": "Clarissa, Bonbach",
"user_id": {
"user": "clabon10"
},
"amount": {
"money": 6533,
"name": "mxn"
},
"approval_id": "APPROVE-23bd3e8a-3991-4a60-b2af-9e408dc8567e"
},
{
"name": "Lilli, Heintze",
"user_id": {
"user": "lilhen0987"
},
"amount": {
"money": 3102,
"name": "kwd"
},
"approval_id": "APPROVE-b3a5db8c-b20a-4327-8001-f4a60a034b34"
},
{
"name": "Gabriele, Gerlach",
"user_id": {
"user": "gabger1620"
},
"amount": {
"money": 21,
"name": "wst"
},
"approval_id": "APPROVE-e928c7f0-30e0-4e57-a149-f41b09d8961a"
},
{
"name": "Olivia, Dussen van",
"user_id": {
"user": "olidvan072"
},
"amount": {
"money": 6945,
"name": "kpw"
},
"approval_id": "APPROVE-6e03afc0-d380-4c55-9319-23dda59e19a2"
},
{
"name": "Albina, Neureuther",
"user_id": {
"user": "alb1021"
},
"amount": {
"money": 139,
"name": "kyd"
},
"approval_id": "APPROVE-1ed63b1b-4158-43a5-9553-3b3969776ef2"
},
{
"name": "Friedrich-Wilhelm, Langern",
"user_id": {
"user": "frlan1267"
},
"amount": {
"money": 8250,
"name": "mro"
},
"approval_id": "APPROVE-3d166482-1932-40f3-a96a-db2b72162f24"
},
{
"name": "Lilli, Heintze",
"user_id": {
"user": "lilhei1090"
},
"amount": {
"money": 6258,
"name": "shp"
},
"approval_id": "APPROVE-745869fa-dccb-4660-a043-28b7b9ae3a0d"
},
{
"name": "Clarissa, Bonbach",
"user_id": {
"user": "clabon1331"
},
"amount": {
"money": 7274,
"name": "bhd"
},
"approval_id": "APPROVE-30dc7b09-4b5d-4a2f-a8a1-09b0f36c5ebe"
},
{
"name": "Rolf, Kühnert",
"user_id": {
"user": "rolfk1089"
},
"amount": {
"money": 6077,
"name": "htg"
},
"approval_id": "APPROVE-0e6ed73d-57f8-4178-ad32-1235808ca7dd"
},
{
"name": "Alexa, Reising",
"user_id": {
"user": "alexa0012"
},
"amount": {
"money": 2534,
"name": "huf"
},
"approval_id": "APPROVE-49eeff26-62fc-411c-a02f-b64ae58448f7"
},
{
"name": "Ines, Gröttner",
"user_id": {
"user": "igro1654"
},
"amount": {
"money": 5377,
"name": "ltl"
},
"approval_id": "APPROVE-bc4c6801-f135-4cfd-ab88-999215b6a69c"
},
{
"name": "Gabriele, Gerlach",
"user_id": {
"user": "gab06781"
},
"amount": {
"money": 6478,
"name": "kwd"
},
"approval_id": "APPROVE-2c9b1501-034a-46af-81ad-b4f5ecadb051"
},
{
"name": "Albina, Neureuther",
"user_id": {
"user": "albneu1190"
},
"amount": {
"money": 2495,
"name": "sll"
},
"approval_id": "APPROVE-f9b126b0-ffbe-4873-81f0-bb0fafeec55e"
},
{
"name": "Abdul, Bolnbach",
"user_id": {
"user": "abdul1895"
},
"amount": {
"money": 7068,
"name": "all"
},
"approval_id": "APPROVE-c4f53406-410f-4dce-87dd-119d465a487d"
},
{
"name": "Alexa, Reising",
"user_id": {
"user": "alres1258"
},
"amount": {
"money": 3916,
"name": "cny"
},
"approval_id": "APPROVE-dbb04ad5-c896-42be-9596-0bd07ef0c6bd"
},
{
"name": "Albina, Neureuther",
"user_id": {
"user": "albn1199"
},
"amount": {
"money": 7647,
"name": "bbd"
},
"approval_id": "APPROVE-f61e5f2f-3596-425c-9d3d-114dc8497963"
},
{
"name": "Alida, Niemeier",
"user_id": {
"user": "alida0018"
},
"amount": {
"money": 8568,
"name": "cny"
},
"approval_id": "APPROVE-bd82e97d-8d97-414b-affe-3911b17798b9"
},
{
"name": "Sibylle, Eimer",
"user_id": {
"user": "eimer7610"
},
"amount": {
"money": 2155,
"name": "bam"
},
"approval_id": "APPROVE-bbad5ce0-bb8d-459f-b2ce-7ab42deda8e6"
},
{
"name": "Alida, Niemeier",
"user_id": {
"user": "nie3910"
},
"amount": {
"money": 2261,
"name": "byr"
},
"approval_id": "APPROVE-602e798a-e9d8-40c2-9b9c-abc707910f51"
},
{
"name": "Albina, Neureuther",
"user_id": {
"user": "albe00191"
},
"amount": {
"money": 2345,
"name": "cop"
},
"approval_id": "APPROVE-dcee1983-0edd-4fe3-9e0b-6a91764055c7"
},
{
"name": "Alida, Niemeier",
"user_id": {
"user": "alni10168"
},
"amount": {
"money": 8240,
"name": "lrd"
},
"approval_id": "APPROVE-1ad71dbb-5753-4fa2-98d0-afee10682210"
},
{
"name": "Abdul, Bolnbach",
"user_id": {
"user": "10273458"
},
"amount": {
"money": 1114,
"name": "brl"
},
"approval_id": "APPROVE-2de51261-325b-4f8b-a90f-b1965bd2d968"
},
{
"name": "Clarissa, Bonbach",
"user_id": {
"user": "091110168"
},
"amount": {
"money": 3573,
"name": "ils"
},
"approval_id": "APPROVE-fc2c52a1-05c2-47e2-a04d-035830eebf97"
},
{
"name": "Edelbert, van der Dussen",
"user_id": {
"user": "0912168"
},
"amount": {
"money": 1571,
"name": "zwd"
},
"approval_id": "APPROVE-61ce0628-7562-43a5-9dd2-20e681ca2370"
},
{
"name": "Hans-Georg, Bärer",
"user_id": {
"user": "11210168"
},
"amount": {
"money": 468,
"name": "tjs"
},
"approval_id": "APPROVE-ca739d44-0108-47c2-966f-a24b5ed21eaa"
},
{
"name": "Sebastian, Bien",
"user_id": {
"user": "981010168"
},
"amount": {
"money": 2013,
"name": "mvr"
},
"approval_id": "APPROVE-2f45973e-9ae3-41f8-938a-72b0df5be061"
},
{
"name": "Ines, Gröttner",
"user_id": {
"user": "1010168"
},
"amount": {
"money": 893,
"name": "ggp"
},
"approval_id": "APPROVE-704a6ee6-b5d8-42e2-92f0-ad0094c5b187"
},
{
"name": "Abdul, Bolnbach",
"user_id": {
"user": "56610168"
},
"amount": {
"money": 2065,
"name": "sll"
},
"approval_id": "APPROVE-6bcf66eb-9761-4a7f-a9c1-b90ec6c014fd"
},
{
"name": "Albina, Neureuther",
"user_id": {
"user": "alni10128"
},
"amount": {
"money": 245,
"name": "spl"
},
"approval_id": "APPROVE-513eb588-fb56-450a-8b4a-456b8afb4441"
},
{
"name": "Zbigniew, Stolze",
"user_id": {
"user": "zb1213e"
},
"amount": {
"money": 9453,
"name": "dkk"
},
"approval_id": "APPROVE-13851cd2-9c9d-412d-8fd1-65f99df176fb"
}
],
"exp_details_method2": [
{
"name": "Friedrich-Wilhelm, Langern"
},
{
"name": "Sebastian, Bien"
},
{
"name": "Rosa, Becker"
},
{
"name": "Ines, Gröttner"
},
{
"name": "Clarissa, Bonbach"
},
{
"name": "Zbigniew, Stolze"
},
{
"name": "Ines, Mentzel"
},
{
"name": "Rosa, Becker"
},
{
"name": "Sigismund, Rosemann"
},
{
"name": "Edelbert, van der Dussen"
},
{
"name": "Clarissa, Bonbach"
},
{
"name": "Lilli, Heintze"
},
{
"name": "Gabriele, Gerlach"
},
{
"name": "Olivia, Dussen van"
},
{
"name": "Albina, Neureuther"
},
{
"name": "Friedrich-Wilhelm, Langern"
},
{
"name": "Lilli, Heintze"
},
{
"name": "Clarissa, Bonbach"
},
{
"name": "Rolf, Kühnert"
},
{
"name": "Alexa, Reising"
},
{
"name": "Ines, Gröttner"
},
{
"name": "Gabriele, Gerlach"
},
{
"name": "Albina, Neureuther"
},
{
"name": "Abdul, Bolnbach"
},
{
"name": "Alexa, Reising"
},
{
"name": "Albina, Neureuther"
},
{
"name": "Alida, Niemeier"
},
{
"name": "Sibylle, Eimer"
},
{
"name": "Alida, Niemeier"
},
{
"name": "Albina, Neureuther"
},
{
"name": "Alida, Niemeier"
},
{
"name": "Abdul, Bolnbach"
},
{
"name": "Clarissa, Bonbach"
},
{
"name": "Edelbert, van der Dussen"
},
{
"name": "Hans-Georg, Bärer"
},
{
"name": "Sebastian, Bien"
},
{
"name": "Ines, Gröttner"
},
{
"name": "Abdul, Bolnbach"
},
{
"name": "Albina, Neureuther"
},
{
"name": "Zbigniew, Stolze"
}
],
"exp_details_method3": [
{
"name1": "Friedrich-Wilhelm, Langern",
"name2": "Friedrich-Wilhelm, Langern"
},
{
"name1": "Sebastian, Bien",
"name2": "Sebastian, Bien"
},
{
"name1": "Rosa, Becker",
"name2": "Rosa, Becker"
},
{
"name1": "Ines, Gröttner",
"name2": "Ines, Gröttner"
},
{
"name1": "Clarissa, Bonbach",
"name2": "Clarissa, Bonbach"
},
{
"name1": "Zbigniew, Stolze",
"name2": "Zbigniew, Stolze"
},
{
"name1": "Ines, Mentzel",
"name2": "Ines, Mentzel"
},
{
"name1": "Rosa, Becker",
"name2": "Rosa, Becker"
},
{
"name1": "Sigismund, Rosemann",
"name2": "Sigismund, Rosemann"
},
{
"name1": "Edelbert, van der Dussen",
"name2": "Edelbert, van der Dussen"
},
{
"name1": "Clarissa, Bonbach",
"name2": "Clarissa, Bonbach"
},
{
"name1": "Lilli, Heintze",
"name2": "Lilli, Heintze"
},
{
"name1": "Gabriele, Gerlach",
"name2": "Gabriele, Gerlach"
},
{
"name1": "Olivia, Dussen van",
"name2": "Olivia, Dussen van"
},
{
"name1": "Albina, Neureuther",
"name2": "Albina, Neureuther"
},
{
"name1": "Friedrich-Wilhelm, Langern",
"name2": "Friedrich-Wilhelm, Langern"
},
{
"name1": "Lilli, Heintze",
"name2": "Lilli, Heintze"
},
{
"name1": "Clarissa, Bonbach",
"name2": "Clarissa, Bonbach"
},
{
"name1": "Rolf, Kühnert",
"name2": "Rolf, Kühnert"
},
{
"name1": "Alexa, Reising",
"name2": "Alexa, Reising"
},
{
"name1": "Ines, Gröttner",
"name2": "Ines, Gröttner"
},
{
"name1": "Gabriele, Gerlach",
"name2": "Gabriele, Gerlach"
},
{
"name1": "Albina, Neureuther",
"name2": "Albina, Neureuther"
},
{
"name1": "Abdul, Bolnbach",
"name2": "Abdul, Bolnbach"
},
{
"name1": "Alexa, Reising",
"name2": "Alexa, Reising"
},
{
"name1": "Albina, Neureuther",
"name2": "Albina, Neureuther"
},
{
"name1": "Alida, Niemeier",
"name2": "Alida, Niemeier"
},
{
"name1": "Sibylle, Eimer",
"name2": "Sibylle, Eimer"
},
{
"name1": "Alida, Niemeier",
"name2": "Alida, Niemeier"
},
{
"name1": "Albina, Neureuther",
"name2": "Albina, Neureuther"
},
{
"name1": "Alida, Niemeier",
"name2": "Alida, Niemeier"
},
{
"name1": "Abdul, Bolnbach",
"name2": "Abdul, Bolnbach"
},
{
"name1": "Clarissa, Bonbach",
"name2": "Clarissa, Bonbach"
},
{
"name1": "Edelbert, van der Dussen",
"name2": "Edelbert, van der Dussen"
},
{
"name1": "Hans-Georg, Bärer",
"name2": "Hans-Georg, Bärer"
},
{
"name1": "Sebastian, Bien",
"name2": "Sebastian, Bien"
},
{
"name1": "Ines, Gröttner",
"name2": "Ines, Gröttner"
},
{
"name1": "Abdul, Bolnbach",
"name2": "Abdul, Bolnbach"
},
{
"name1": "Albina, Neureuther",
"name2": "Albina, Neureuther"
},
{
"name1": "Zbigniew, Stolze",
"name2": "Zbigniew, Stolze"
}
]
}
}
self.maxDiff = None
def test_library_sample_query_with_variables(self):
query_variables = {
"url": "http://localhost:5000/scrapqd/sample_page/",
"name": "local-testing"
}
result = execute_sync(self.query, query_variables)
self.assertDictEqual(self.expected_result, result.data)
def test_library_sample_query_without_variables(self):
query = r"""
query test_query {
result: fetch(url: "http://localhost:5000/scrapqd/sample_page/") {
name: constant(value: "local-testing")
website: link(xpath: "//a[contains(@class, 'site-link')]")
summary: group {
total_emp_expenses: text(xpath: "//*[@id='emp-exp-total']", data_type: INT)
total_shown_expenses: text(xpath: "//*[@id='exp-total']/span[2]", data_type: INT)
total_approved_expenses: text(xpath: "//*[@id='emp-exp-approved']/span[2]", data_type: INT)
}
summary1: group {
total_shown_expenses: regex(xpath: "//*[@id='exp-total']", pattern: "(\\d+)")
}
exp_details: list(xpath: "//div[@class='card']") {
name: text(xpath: "//div[contains(@class,'expense-emp-name')]")
user_id: query_params(xpath: "//a/@href", name: "user")
amount: group {
money: text(xpath: "//h6[contains(@class,'expense-amount')]/span[1]", data_type: INT)
name: text(xpath: "//h6[contains(@class,'expense-amount')]/span[2]")
}
approval_id: attr(xpath: "//button[contains(@class, 'expense-approve')]", name: "id")
}
exp_details_method2: list(xpath: "//div[@class='card']") {
name: text(xpath: "//div[@class='card-title title expense-emp-name']")
}
exp_details_method3: list(xpath: "//div[@class='card']") {
name1: text(xpath: ".//div[@class='card-title title expense-emp-name']")
name2: text(xpath: ".//div[contains(@class,'expense-emp-name')]")
}
}
}
"""
result = execute_sync(query)
self.assertDictEqual(self.expected_result, result.data)
|
import graphene
from graphene_django.types import DjangoObjectType
from .models import Countdown
class CountdownType(DjangoObjectType):
class Meta:
model = Countdown
class CountdownCreateInput(graphene.InputObjectType):
target = graphene.String(required=True)
target_date = graphene.DateTime(required=True)
class CountdownDeleteInput(graphene.InputObjectType):
id = graphene.Int()
class Query(graphene.ObjectType):
all_countdowns = graphene.List(CountdownType)
def resolve_all_countdowns(self, info):
return Countdown.objects.order_by("target_date").all()
class CreateCountdown(graphene.Mutation):
id = graphene.Int()
class Arguments:
countdown_data = CountdownCreateInput(required=True)
def mutate(root, info, countdown_data):
countdown = Countdown(
target=countdown_data.target, target_date=countdown_data.target_date
)
countdown.save()
return CreateCountdown(id=countdown.id)
class DeleteCountdown(graphene.Mutation):
is_delete = graphene.Boolean()
class Arguments:
countdown_data = CountdownDeleteInput(required=True)
def mutate(root, info, countdown_data):
countdown = Countdown.objects.get(id=countdown_data.id)
countdown.delete()
return DeleteCountdown(is_delete=True)
class Mutation(graphene.ObjectType):
create_countdown = CreateCountdown.Field()
delete_countdown = DeleteCountdown.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
|
import time
import random
import ostore
import ptrie
import pdscache
from oidfs import OidFS
def rand_perm(txt):
''' Iterates random permutations of ''txt''. '''
txtlen = len(txt)
if txtlen == 0:
yield txt
return
for s in rand_perm(txt[1:]):
randpos = range(0, txtlen)
random.shuffle(randpos)
for i in randpos:
yield s[:i] + txt[0] + s[i:]
def sortedSeq(n):
''' Returns a string of sequence from 1 to n. Because decimal system only has
10 digits and n can be more than 9. We use letter 'a' for 10, 'b' for 11,
and so on. This way we can address up to 9 + 26 = 35 pancakes. '''
if n > 35:
raise ValueError("Sorry, cannot do more than 35.")
seq = ""
for i in range(1, n + 1):
if i < 10:
s = "%d" % i
else:
asciiv = ord('a') + (i - 10)
s = chr(asciiv)
seq += ("%s" % s)
return seq
class Perm(object):
def __init__(self):
self.pstor, self.ofs = ostore.init_ostore()
self.ptrieObj = ptrie.Ptrie(self.pstor)
self.root = ptrie.Nulltrie # Start with Nulltrie as root
self.count = 0
def insert(self, seq):
# # Do GC periodically
# if self.count > 0 and self.count % 5313 == 0:
# before = time.clock()
# print "Doing GC (%d perms so far)" % self.count
# self.pstor.print_stats()
# rootOid = pdscache.write_coid(self.root)
# rootOid, = self.pstor.keepOids([rootOid,])
# self.root = pdscache.read_oid(rootOid)
# print "GC took %d seconds" % (time.clock() - before)
self.root = self.ptrieObj.insert(self.root, seq, None, None)
self.count += 1
def bfwalk(self):
before = time.clock()
for node in self.ptrieObj.bfiter(self.root):
pass
print "BF trie walk took %f seconds" % (time.clock() - before)
self.pstor.print_stats()
def inspect(self):
myname = "random_permutations"
print "bfwalk 1:"
self.bfwalk()
before = time.clock()
self.ofs.store(self.root, myname)
print "Storing took %f seconds" % (time.clock() - before)
self.pstor.print_stats()
print "Doing GC"
before = time.clock()
self.ofs.gc()
print "GC took %f seconds" % (time.clock() - before)
self.root = self.ofs.load(myname)
print "bfwalk 2:"
self.bfwalk()
print "bfwalk 3:"
self.bfwalk()
def close(self):
self.ofs.store(self.root, "random_permutations")
self.ofs.gc()
self.ofs.close()
self.pstor.close()
if __name__ == "__main__":
import sys
import math
if len(sys.argv) < 2:
print "%s: number" % (sys.argv[0])
exit(0)
n = int(sys.argv[1])
seq = sortedSeq(n)
pm = Perm()
before = time.clock()
for s in rand_perm(seq):
#print "Inserting '%s'" % s
pm.insert(s)
print "%d permutations total. %d seconds" % \
(math.factorial(n), time.clock() - before)
pm.inspect()
pm.close()
|
import matplotlib.pyplot as plt
def drawPlot(x, y, f):
plt.style.use('dark_background')
plt.scatter(x, y, color = "m",marker = "o", s = 30)
linearFunction = f[0]*x + f[1]
plt.plot(x, linearFunction, color = "g")
plt.xlabel('Size')
plt.ylabel('Price')
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
+-------------------------------------------------
@Author: cc
@Contact: yaochen@xjh.com
@Site: http://www.xjh.com
@Project: sobookscrawler
@File: base_web_driver_service.py
@Version:
@Time: 2019/5/23 10:15
@Description: TO-DO
+-------------------------------------------------
@Change Activity:
1. Created at 2019/5/23 10:15
2. TO-DO
+-------------------------------------------------
'''
__author__ = 'cc'
from abc import abstractmethod
from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
import configs as cfg
DEFAULT_PROTOCOL = 'https'
DEFAULT_TIMEOUT = 3 * 1000
DEFAULT_PORT = 80
class BaseWebDriverService(object):
_user_agent = UserAgent(verify_ssl=False).Firefox
_headless_mode = cfg.BROWSER_HEADLESS_MODE
_protocol = DEFAULT_PROTOCOL
_domain = None
_port = None
_driver = None
_wait = None
@property
def user_agent(self):
return self._user_agent
@user_agent.setter
def user_agent(self, value):
self._user_agent = value
@property
def headless_mode(self):
return self._headless_mode
# @headless_mode.setter
# def headless_mode(self, value):
# self._headless_mode = value
@property
def driver(self):
return self._driver
@driver.setter
def driver(self, value):
self._driver = value
@property
def wait(self):
return self._wait
@wait.setter
def wait(self, value):
self._wait = value
def __init__(self):
self._init_web_driver()
# Wait timeout
wait_timeout = DEFAULT_TIMEOUT
if None is cfg.TASK_WAIT_TIMEOUT or cfg.TASK_WAIT_TIMEOUT < 1:
wait_timeout = cfg.TASK_WAIT_TIMEOUT
self._driver.set_page_load_timeout(wait_timeout)
self._wait = WebDriverWait(self._driver, wait_timeout)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
if self._driver:
self._driver.quit()
pass
@abstractmethod
def prepare_desired_capabilities(self):
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['pageLoadStrategy'] = 'eager'
return capabilities
@abstractmethod
def prepare_profile(self):
profile = webdriver.FirefoxProfile()
profile.set_preference('general.useragent.override', self._user_agent)
return profile
@abstractmethod
def prepare_options(self):
options = webdriver.FirefoxOptions()
# options.add_argument('--user-agent={}'.format(self._user_agent))
if self._headless_mode:
options.add_argument("--headless")
options.add_argument("--disable-gpu")
return options
def _init_web_driver(self):
self._driver = webdriver.Firefox(
executable_path=cfg.GECKO_EXECUTABLE_PATH,
firefox_binary=cfg.FIREFOX_BINARY_PATH,
desired_capabilities=self.prepare_desired_capabilities(),
firefox_profile=self.prepare_profile(),
firefox_options=self.prepare_options(),
# service_log_path=None,
)
def build_url(self, domain=None, protocol=None, path=None, port=None):
# Init
_protocol = self._protocol
_domain = self._domain
_port = self._port
_path = path
# Init with arguments
if None is not protocol and protocol:
_protocol = protocol
if None is not domain and domain:
_domain = domain
if None is not port and (port > 0 and port != DEFAULT_PORT):
_port = port
# Protocol & domain
result = '{}://{}'.format(_protocol, _domain)
# Port
if _port and DEFAULT_PORT != _port:
result = '{}:{}'.format(result, _port)
# Path
if None is not path and path:
if '/' != path[0]:
path = '/' + path
result = '{}{}'.format(result, path)
return result
|
def validate_params(**kwargs):
exclude = ['self', 'kwargs']
# First remove self from the dict and any values that are None
params = {}
# update kwargs with the nested kwargs
#kwargs.update(kwargs['kwargs'])
for key, val in kwargs.items():
# if none or self continue
if val is None or key in exclude:
continue
# If we have a validator, validate the param, otherwise just add to
# return dict
if hasattr(Validators, key):
params[key] = getattr(Validators, key)(val)
else:
params[key] = val
return params
class Validators(object):
@staticmethod
def _search_criteria(dct):
'''Accepts a mapping, or a string.'''
# http://support.brightcove.com/en/docs/searching-videos-media-api
valid_search_fields = ['display_name', 'reference_id', 'tag',
'custom_fields', 'search_text']
try:
assert all(key in valid_search_fields for key in dct.keys()), (
'%s is not a valid search field.' % key)
return ','.join('%s:%s' % (key, val) for key, val in dct.items())
except AttributeError:
# We were not given a mapping, so just return the value
return dct
@staticmethod
def sort_by(dct):
valid_sort_fields = ['DISPLAY_NAME', 'REFERENCE_ID', 'PUBLISH_DATE',
'CREATION_DATE', 'MODIFIED_DATE', 'START_DATE',
'PLAYS_TRAILING_WEEK', 'PLAYS_TOTAL']
try:
for key, val in dct.items():
assert(key) in valid_sort_fields, 'Invalid sort field %s' % key
assert(val) in SortOrderType._fields, (
'Invalid sort direction %s' % val)
return ','.join('%s:%s' % (key, val) for key, val in dct.items())
except AttributeError:
# Not given a mapping
return ','.join(dct)
# Doesn't currently check if we pass a single string here, we will
# end up joining every letter with a comma
@staticmethod
def all(dct):
return Validators._search_criteria(dct)
@staticmethod
def any(dct):
return Validators._search_criteria(dct)
@staticmethod
def none(dct):
return Validators._search_criteria(dct)
@staticmethod
def fields(fields):
for field in fields:
assert field in Video._fields, (
'%s is not a valid Video field.' % field)
return ','.join(fields)
@staticmethod
def video_ids(ids):
return ','.join(str(_id) for _id in ids)
@staticmethod
def playlist_ids(ids):
return ','.join(str(_id) for _id in ids)
@staticmethod
def reference_ids(ids):
return ','.join(str(_id) for _id in ids)
def requires_or(*_args):
'''This decorator annotes functions where at least one of a set of
arguments is required.
'''
def requires_or_decorator(f):
def wrapper(*args, **kwargs):
assert any(name in kwargs for name in _args), (
'%s requires at least one of the following arguments: %s' %
(f.__name__, ', '.join(_args)))
return f(*args, **kwargs)
return wrapper
return requires_or_decorator
|
from platform import system
import pypython
from matplotlib import pyplot as plt
DEFAULT_DISTANCE = 100 * pypython.constants.PARSEC
SCALED_DISTANCE = 100 * 1e6 * pypython.constants.PARSEC
SCALE_FACTOR = DEFAULT_DISTANCE**2 / SCALED_DISTANCE**2
m_bh = "3e6"
root = "tde_opt_spec"
sm = 10
lw = 1.7
al = 0.75
inclination = "60"
if system() == "Darwin":
home = "/Users/saultyevil/"
else:
home = "/home/saultyevil/"
home += "PySims/tde_optical/p_response/12_grid_final/" + m_bh + "/"
important_lines = [[r"He \textsc{i}", 3889], [r"H$~\delta$", 4100], [r"H$~\gamma$", 4340], [r"He \textsc{ii}", 4686],
[r"H$~\beta$", 4861], [r"He \textsc{i}", 5877], [r"H$~\alpha$", 6564], [r"He \textsc{i}", 7067]]
# ##############################################################################
#
# Actual fiducial model: optical spectrum
#
# ##############################################################################
xmin = 4000
xmax = 7250
s = pypython.Spectrum(root, home + "Mdot_acc/0_15", log_spec=False, smooth=sm, distance=100 * 1e6)
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
for inclination in s.inclinations:
x, y = pypython.get_xy_subset(s["Lambda"], s[inclination], xmin, xmax)
ax.plot(x, y, alpha=al, label=str(inclination) + r"$^{\circ}$")
ax.set_xlim(xmin, xmax)
# ax.set_ylim(1e-5 * SCALE_FACTOR, 3e-3 * SCALE_FACTOR)
ax.legend(loc="lower left", ncol=5)
ax.set_ylabel("Flux density at 100 Mpc\n" + r"[erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$]")
ax.set_xlabel(r"Rest-frame wavelength [\AA]")
ax = pypython.plot.set_axes_scales(ax, "logy")
ax = pypython.spectrum.plot.add_line_ids(ax, important_lines, ynorm=0.92, linestyle="none", offset=0)
fig.tight_layout(rect=[0.02, 0.02, 0.98, 0.98])
fig.savefig("../p_figures/figure5_fiducial_optical_spectrum.pdf", dpi=300)
plt.show()
|
#!/usr/bin/env python
# Project FartCHECKER
# Dmitriy Vetutnev, 2021
import serial
from uart import Receiver
from time import localtime, asctime
def get_concentration(packet):
return (packet[1] * 256) + packet[2]
def callback(packet):
time = asctime(localtime())
concentration = get_concentration(packet)
print("%s %s ppm" % (time, concentration))
def main():
print("Project FartCHECKER")
port = serial.Serial("/dev/ttyUSB0")
rx = Receiver(callback)
while True:
b = port.read()
rx.input(b)
if __name__ == "__main__":
main()
|
from math import inf
from copy import deepcopy
import pickle
import os
import biorbd
import casadi
from casadi import MX, vertcat, sum1
from .enums import OdeSolver
from .mapping import BidirectionalMapping
from .path_conditions import Bounds, InitialConditions, InterpolationType
from .constraints import ConstraintFunction, Constraint
from .objective_functions import Objective, ObjectiveFunction
from .plot import OnlineCallback, CustomPlot
from .integrator import RK4
from .biorbd_interface import BiorbdInterface
from .variable_optimization import Data
from .__version__ import __version__
class OptimalControlProgram:
"""
Constructor calls __prepare_dynamics and __define_multiple_shooting_nodes methods.
To solve problem you have to call : OptimalControlProgram().solve()
"""
def __init__(
self,
biorbd_model,
problem_type,
number_shooting_points,
phase_time,
X_init,
U_init,
X_bounds,
U_bounds,
objective_functions=(),
constraints=(),
external_forces=(),
ode_solver=OdeSolver.RK,
all_generalized_mapping=None,
q_mapping=None,
q_dot_mapping=None,
tau_mapping=None,
plot_mappings=None,
is_cyclic_objective=False,
is_cyclic_constraint=False,
nb_threads=1,
):
"""
Prepare CasADi to solve a problem, defines some parameters, dynamic problem and ode solver.
Defines also all constraints including continuity constraints.
Defines the sum of all objective functions weight.
:param biorbd_model: Biorbd model loaded from the biorbd.Model() function
:param problem_type: A selected method handler of the class problem_type.ProblemType.
:param ode_solver: Name of chosen ode, available in OdeSolver enum class.
:param number_shooting_points: Subdivision number.
:param phase_time: Simulation time in seconds.
:param objective_functions: Tuple of tuple of objectives functions handler's and weights.
:param X_bounds: Instance of the class Bounds.
:param U_bounds: Instance of the class Bounds.
:param constraints: Tuple of constraints, instant (which node(s)) and tuple of geometric structures used.
"""
if isinstance(biorbd_model, str):
biorbd_model = [biorbd.Model(biorbd_model)]
elif isinstance(biorbd_model, biorbd.biorbd.Model):
biorbd_model = [biorbd_model]
elif isinstance(biorbd_model, (list, tuple)):
biorbd_model = [biorbd.Model(m) if isinstance(m, str) else m for m in biorbd_model]
else:
raise RuntimeError("biorbd_model must either be a string or an instance of biorbd.Model()")
self.version = {"casadi": casadi.__version__, "biorbd": biorbd.__version__, "biorbd_optim": __version__}
self.nb_phases = len(biorbd_model)
biorbd_model_path = [m.path().relativePath().to_string() for m in biorbd_model]
self.original_values = {
"biorbd_model": biorbd_model_path,
"problem_type": problem_type,
"number_shooting_points": number_shooting_points,
"phase_time": phase_time,
"X_init": X_init,
"U_init": U_init,
"X_bounds": X_bounds,
"U_bounds": U_bounds,
"objective_functions": [],
"constraints": [],
"external_forces": external_forces,
"ode_solver": ode_solver,
"all_generalized_mapping": all_generalized_mapping,
"q_mapping": q_mapping,
"q_dot_mapping": q_dot_mapping,
"tau_mapping": tau_mapping,
"plot_mappings": plot_mappings,
"is_cyclic_objective": is_cyclic_objective,
"is_cyclic_constraint": is_cyclic_constraint,
"nb_threads": nb_threads,
}
self.nlp = [{} for _ in range(self.nb_phases)]
self.__add_to_nlp("model", biorbd_model, False)
self.__add_to_nlp("phase_idx", [i for i in range(self.nb_phases)], False)
# Prepare some variables
constraints = self.__init_penalty(constraints, "constraints")
objective_functions = self.__init_penalty(objective_functions, "objective_functions")
# Define some aliases
self.__add_to_nlp("ns", number_shooting_points, False)
for nlp in self.nlp:
if nlp["ns"] < 1:
raise RuntimeError("Number of shooting points must be at least 1")
self.initial_phase_time = phase_time
phase_time, initial_time_guess, time_min, time_max = self.__init_phase_time(
phase_time, objective_functions, constraints
)
self.__add_to_nlp("tf", phase_time, False)
self.__add_to_nlp("t0", [0] + [nlp["tf"] for i, nlp in enumerate(self.nlp) if i != len(self.nlp) - 1], False)
self.__add_to_nlp(
"dt", [self.nlp[i]["tf"] / max(self.nlp[i]["ns"], 1) for i in range(self.nb_phases)], False,
)
self.is_cyclic_constraint = is_cyclic_constraint
self.is_cyclic_objective = is_cyclic_objective
self.nb_threads = nb_threads
# External forces
if external_forces != ():
external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)
self.__add_to_nlp("external_forces", external_forces, False)
# Compute problem size
if all_generalized_mapping is not None:
if q_mapping is not None or q_dot_mapping is not None or tau_mapping is not None:
raise RuntimeError("all_generalized_mapping and a specified mapping cannot be used alongside")
q_mapping = q_dot_mapping = tau_mapping = all_generalized_mapping
self.__add_to_nlp("q_mapping", q_mapping, q_mapping is None, BidirectionalMapping)
self.__add_to_nlp("q_dot_mapping", q_dot_mapping, q_dot_mapping is None, BidirectionalMapping)
self.__add_to_nlp("tau_mapping", tau_mapping, tau_mapping is None, BidirectionalMapping)
plot_mappings = plot_mappings if plot_mappings is not None else {}
reshaped_plot_mappings = []
for i in range(self.nb_phases):
reshaped_plot_mappings.append({})
for key in plot_mappings:
reshaped_plot_mappings[i][key] = plot_mappings[key][i]
self.__add_to_nlp("plot_mappings", reshaped_plot_mappings, False)
self.__add_to_nlp("problem_type", problem_type, False)
for i in range(self.nb_phases):
self.__initialize_nlp(self.nlp[i])
self.nlp[i]["problem_type"](self.nlp[i])
# Prepare path constraints
self.__add_to_nlp("X_bounds", X_bounds, False)
self.__add_to_nlp("U_bounds", U_bounds, False)
for i in range(self.nb_phases):
self.nlp[i]["X_bounds"].check_and_adjust_dimensions(self.nlp[i]["nx"], self.nlp[i]["ns"])
self.nlp[i]["U_bounds"].check_and_adjust_dimensions(self.nlp[i]["nu"], self.nlp[i]["ns"] - 1)
# Prepare initial guesses
self.__add_to_nlp("X_init", X_init, False)
self.__add_to_nlp("U_init", U_init, False)
for i in range(self.nb_phases):
self.nlp[i]["X_init"].check_and_adjust_dimensions(self.nlp[i]["nx"], self.nlp[i]["ns"])
self.nlp[i]["U_init"].check_and_adjust_dimensions(self.nlp[i]["nu"], self.nlp[i]["ns"] - 1)
# Variables and constraint for the optimization program
self.V = []
self.V_bounds = Bounds(interpolation_type=InterpolationType.CONSTANT)
self.V_init = InitialConditions(interpolation_type=InterpolationType.CONSTANT)
for i in range(self.nb_phases):
self.__define_multiple_shooting_nodes_per_phase(self.nlp[i], i)
# Declare the parameters to optimize
self.param_to_optimize = {}
self.__define_variable_time(initial_time_guess, time_min, time_max)
# Define dynamic problem
self.__add_to_nlp("ode_solver", ode_solver, True)
for i in range(self.nb_phases):
if self.nlp[0]["nx"] != self.nlp[i]["nx"] or self.nlp[0]["nu"] != self.nlp[i]["nu"]:
raise RuntimeError("Dynamics with different nx or nu is not supported yet")
self.__prepare_dynamics(self.nlp[i])
# Prepare constraints
self.g = []
self.g_bounds = []
ConstraintFunction.continuity(self)
if len(constraints) > 0:
for i, constraint_phase in enumerate(constraints):
for constraint in constraint_phase:
self.add_constraint(constraint, i)
# Objective functions
self.J = []
ObjectiveFunction.continuity(self)
if len(objective_functions) > 0:
for i, objective_functions_phase in enumerate(objective_functions):
for objective_function in objective_functions_phase:
self.add_objective_function(objective_function, i)
@staticmethod
def __initialize_nlp(nlp):
nlp["nbQ"] = 0
nlp["nbQdot"] = 0
nlp["nbTau"] = 0
nlp["nbMuscles"] = 0
nlp["plot"] = {}
nlp["x"] = MX()
nlp["u"] = MX()
nlp["J"] = []
nlp["g"] = []
nlp["g_bounds"] = []
def __add_to_nlp(self, param_name, param, duplicate_if_size_is_one, _type=None):
if isinstance(param, (list, tuple)):
if len(param) != self.nb_phases:
raise RuntimeError(
f"{param_name} size({len(param)}) does not correspond to the number of phases({self.nb_phases})."
)
else:
for i in range(self.nb_phases):
self.nlp[i][param_name] = param[i]
else:
if self.nb_phases == 1:
self.nlp[0][param_name] = param
else:
if duplicate_if_size_is_one:
for i in range(self.nb_phases):
self.nlp[i][param_name] = param
else:
raise RuntimeError(f"{param_name} must be a list or tuple when number of phase is not equal to 1")
if _type is not None:
for nlp in self.nlp:
if nlp[param_name] is not None and not isinstance(nlp[param_name], _type):
raise RuntimeError(f"Parameter {param_name} must be a {str(_type)}")
def __prepare_dynamics(self, nlp):
"""
Builds CasaDI dynamics function.
:param dynamics_func: A selected method handler of the class dynamics.Dynamics.
:param ode_solver: Name of chosen ode, available in OdeSolver enum class.
"""
dynamics = nlp["dynamics_func"]
ode_opt = {"t0": 0, "tf": nlp["dt"]}
if nlp["ode_solver"] == OdeSolver.RK or nlp["ode_solver"] == OdeSolver.COLLOCATION:
ode_opt["number_of_finite_elements"] = 5
ode = {"x": nlp["x"], "p": nlp["u"], "ode": dynamics(nlp["x"], nlp["u"])}
nlp["dynamics"] = []
nlp["par_dynamics"] = {}
if nlp["ode_solver"] == OdeSolver.RK:
ode_opt["idx"] = 0
ode["ode"] = dynamics
if "external_forces" in nlp:
for idx in range(len(nlp["external_forces"])):
ode_opt["idx"] = idx
nlp["dynamics"].append(RK4(ode, ode_opt))
else:
nlp["dynamics"].append(RK4(ode, ode_opt))
elif nlp["ode_solver"] == OdeSolver.COLLOCATION:
if isinstance(nlp["tf"], casadi.MX):
raise RuntimeError("OdeSolver.COLLOCATION cannot be used while optimizing the time parameter")
if "external_forces" in nlp:
raise RuntimeError("COLLOCATION cannot be used with external_forces")
nlp["dynamics"].append(casadi.integrator("integrator", "collocation", ode, ode_opt))
elif nlp["ode_solver"] == OdeSolver.CVODES:
if isinstance(nlp["tf"], casadi.MX):
raise RuntimeError("OdeSolver.CVODES cannot be used while optimizing the time parameter")
if "external_forces" in nlp:
raise RuntimeError("CVODES cannot be used with external_forces")
nlp["dynamics"].append(casadi.integrator("integrator", "cvodes", ode, ode_opt))
if len(nlp["dynamics"]) == 1:
if self.nb_threads > 1:
nlp["par_dynamics"] = nlp["dynamics"][0].map(nlp["ns"], "thread", self.nb_threads)
nlp["dynamics"] = nlp["dynamics"] * nlp["ns"]
def __define_multiple_shooting_nodes_per_phase(self, nlp, idx_phase):
"""
For each node, puts X_bounds and U_bounds in V_bounds.
Links X and U with V.
:param nlp: The nlp problem
"""
X = []
U = []
nV = nlp["nx"] * (nlp["ns"] + 1) + nlp["nu"] * nlp["ns"]
V = MX.sym("V_" + str(idx_phase), nV)
V_bounds = Bounds([0] * nV, [0] * nV, interpolation_type=InterpolationType.CONSTANT)
V_init = InitialConditions([0] * nV, interpolation_type=InterpolationType.CONSTANT)
offset = 0
for k in range(nlp["ns"] + 1):
X.append(V.nz[offset : offset + nlp["nx"]])
V_bounds.min[offset : offset + nlp["nx"], 0] = nlp["X_bounds"].min.evaluate_at(shooting_point=k)
V_bounds.max[offset : offset + nlp["nx"], 0] = nlp["X_bounds"].max.evaluate_at(shooting_point=k)
V_init.init[offset : offset + nlp["nx"], 0] = nlp["X_init"].init.evaluate_at(shooting_point=k)
offset += nlp["nx"]
if k != nlp["ns"]:
U.append(V.nz[offset : offset + nlp["nu"]])
V_bounds.min[offset : offset + nlp["nu"], 0] = nlp["U_bounds"].min.evaluate_at(shooting_point=k)
V_bounds.max[offset : offset + nlp["nu"], 0] = nlp["U_bounds"].max.evaluate_at(shooting_point=k)
V_init.init[offset : offset + nlp["nu"], 0] = nlp["U_init"].init.evaluate_at(shooting_point=k)
offset += nlp["nu"]
V_bounds.check_and_adjust_dimensions(nV, 1)
V_init.check_and_adjust_dimensions(nV, 1)
nlp["X"] = X
nlp["U"] = U
self.V = vertcat(self.V, V)
self.V_bounds.concatenate(V_bounds)
self.V_init.concatenate(V_init)
def __init_phase_time(self, phase_time, objective_functions, constraints):
if isinstance(phase_time, (int, float)):
phase_time = [phase_time]
phase_time = list(phase_time)
initial_time_guess, time_min, time_max = [], [], []
has_penalty = self.__define_parameters_phase_time(
objective_functions, initial_time_guess, phase_time, time_min, time_max
)
self.__define_parameters_phase_time(
constraints, initial_time_guess, phase_time, time_min, time_max, has_penalty=has_penalty
)
return phase_time, initial_time_guess, time_min, time_max
def __define_parameters_phase_time(
self, penalty_functions, initial_time_guess, phase_time, time_min, time_max, has_penalty=False
):
for i, penalty_functions_phase in enumerate(penalty_functions):
for pen_fun in penalty_functions_phase:
if (
pen_fun["type"] == Objective.Mayer.MINIMIZE_TIME
or pen_fun["type"] == Objective.Lagrange.MINIMIZE_TIME
or pen_fun["type"] == Constraint.TIME_CONSTRAINT
):
if has_penalty:
raise RuntimeError("Time constraint/objective cannot declare more than once")
has_penalty = True
initial_time_guess.append(phase_time[i])
phase_time[i] = casadi.MX.sym(f"time_phase_{i}", 1, 1)
time_min.append(pen_fun["minimum"] if "minimum" in pen_fun else 0)
time_max.append(pen_fun["maximum"] if "maximum" in pen_fun else inf)
return has_penalty
def __define_variable_time(self, initial_guess, minimum, maximum):
"""
For each variable time, puts X_bounds and U_bounds in V_bounds.
Links X and U with V.
:param nlp: The nlp problem
:param initial_guess: The initial values taken from the phase_time vector
:param minimum: variable time minimums as set by user (default: 0)
:param maximum: variable time maximums as set by user (default: inf)
"""
P = []
for nlp in self.nlp:
if isinstance(nlp["tf"], MX):
self.V = vertcat(self.V, nlp["tf"])
P.append(self.V[-1])
self.param_to_optimize["time"] = P
nV = len(initial_guess)
V_bounds = Bounds(minimum, maximum, interpolation_type=InterpolationType.CONSTANT)
V_bounds.check_and_adjust_dimensions(nV, 1)
self.V_bounds.concatenate(V_bounds)
V_init = InitialConditions(initial_guess, interpolation_type=InterpolationType.CONSTANT)
V_init.check_and_adjust_dimensions(nV, 1)
self.V_init.concatenate(V_init)
def __init_penalty(self, penalties, penalty_type):
if len(penalties) > 0:
if self.nb_phases == 1:
if isinstance(penalties, dict):
penalties = (penalties,)
if isinstance(penalties[0], dict):
penalties = (penalties,)
elif isinstance(penalties, (list, tuple)):
for constraint in penalties:
if isinstance(constraint, dict):
raise RuntimeError(f"Each phase must declares its {penalty_type} (even if it is empty)")
return penalties
def add_objective_function(self, new_objective_function, phase_number=-1):
self.modify_objective_function(new_objective_function, index_in_phase=-1, phase_number=phase_number)
def modify_objective_function(self, new_objective_function, index_in_phase, phase_number=-1):
self._modify_penalty(new_objective_function, index_in_phase, phase_number, "objective_functions")
def add_constraint(self, new_constraint, phase_number=-1):
self.modify_constraint(new_constraint, index_in_phase=-1, phase_number=phase_number)
def modify_constraint(self, new_constraint, index_in_phase, phase_number=-1):
self._modify_penalty(new_constraint, index_in_phase, phase_number, "constraints")
def _modify_penalty(self, new_penalty, index_in_phase, phase_number, penalty_name):
if len(self.nlp) == 1:
phase_number = 0
else:
if phase_number < 0:
raise RuntimeError("phase_number must be specified for multiphase OCP")
while phase_number >= len(self.original_values[penalty_name]):
self.original_values[penalty_name].append([])
if index_in_phase < 0:
self.original_values[penalty_name][phase_number].append(deepcopy(new_penalty))
else:
if index_in_phase >= len(self.original_values[penalty_name][phase_number]):
raise RuntimeError("It is not possible to modify a penalty when the penalty is not defined")
self.original_values[penalty_name][phase_number][index_in_phase] = deepcopy(new_penalty)
if penalty_name == "objective_functions":
ObjectiveFunction.add_or_replace(self, self.nlp[phase_number], new_penalty, index_in_phase)
elif penalty_name == "constraints":
ConstraintFunction.add_or_replace(self, self.nlp[phase_number], new_penalty, index_in_phase)
else:
raise RuntimeError("Unrecognized penalty")
def add_plot(self, fig_name, update_function, phase_number=-1, **parameters):
if "combine_to" in parameters:
raise RuntimeError(
"'combine_to' cannot be specified in add_plot, " "please use same 'fig_name' to combine plots"
)
# --- Solve the program --- #
if len(self.nlp) == 1:
phase_number = 0
else:
if phase_number < 0:
raise RuntimeError("phase_number must be specified for multiphase OCP")
nlp = self.nlp[phase_number]
custom_plot = CustomPlot(update_function, **parameters)
if fig_name in nlp["plot"]:
# Make sure we add a unique name in the dict
custom_plot.combine_to = fig_name
if fig_name:
cmp = 0
while True:
plot_name = f"{fig_name}_{cmp}"
if plot_name not in nlp["plot"]:
break
cmp += 1
else:
plot_name = fig_name
nlp["plot"][plot_name] = custom_plot
def solve(self, solver="ipopt", show_online_optim=False, options_ipopt={}):
"""
Gives to CasADi states, controls, constraints, sum of all objective functions and theirs bounds.
Gives others parameters to control how solver works.
"""
all_J = MX()
for j_nodes in self.J:
for j in j_nodes:
all_J = vertcat(all_J, j)
for nlp in self.nlp:
for obj_nodes in nlp["J"]:
for obj in obj_nodes:
all_J = vertcat(all_J, obj)
all_g = MX()
all_g_bounds = Bounds(interpolation_type=InterpolationType.CONSTANT)
for i in range(len(self.g)):
for j in range(len(self.g[i])):
all_g = vertcat(all_g, self.g[i][j])
all_g_bounds.concatenate(self.g_bounds[i][j])
for nlp in self.nlp:
for i in range(len(nlp["g"])):
for j in range(len(nlp["g"][i])):
all_g = vertcat(all_g, nlp["g"][i][j])
all_g_bounds.concatenate(nlp["g_bounds"][i][j])
nlp = {"x": self.V, "f": sum1(all_J), "g": all_g}
options_common = {}
if show_online_optim:
options_common["iteration_callback"] = OnlineCallback(self)
if solver == "ipopt":
options = {
"ipopt.tol": 1e-6,
"ipopt.max_iter": 1000,
"ipopt.hessian_approximation": "exact", # "exact", "limited-memory"
"ipopt.limited_memory_max_history": 50,
"ipopt.linear_solver": "mumps", # "ma57", "ma86", "mumps"
}
for key in options_ipopt:
ipopt_key = key
if key[:6] != "ipopt.":
ipopt_key = "ipopt." + key
options[ipopt_key] = options_ipopt[key]
opts = {**options, **options_common}
else:
raise RuntimeError("Available solvers are: 'ipopt'")
solver = casadi.nlpsol("nlpsol", solver, nlp, opts)
# Bounds and initial guess
arg = {
"lbx": self.V_bounds.min,
"ubx": self.V_bounds.max,
"lbg": all_g_bounds.min,
"ubg": all_g_bounds.max,
"x0": self.V_init.init,
}
# Solve the problem
return solver.call(arg)
def save(self, sol, file_path):
_, ext = os.path.splitext(file_path)
if ext == "":
file_path = file_path + ".bo"
elif ext != ".bo":
raise RuntimeError(f"Incorrect extension({ext}), it should be (.bo) or (.bob) if you use save_get_data.")
OptimalControlProgram._save_with_pickle(
{"ocp_initilializer": self.original_values, "sol": sol, "versions": self.version}, file_path
)
def save_get_data(self, sol, file_path, **parameters):
_, ext = os.path.splitext(file_path)
if ext == "":
file_path = file_path + ".bob"
elif ext != ".bob":
raise RuntimeError(f"Incorrect extension({ext}), it should be (.bob) or (.bo) if you use save.")
OptimalControlProgram._save_with_pickle({"data": Data.get_data(self, sol["x"], **parameters)}, file_path)
@staticmethod
def _save_with_pickle(dict, file_path):
dir, _ = os.path.split(file_path)
if dir != "" and not os.path.isdir(dir):
os.makedirs(dir)
with open(file_path, "wb") as file:
pickle.dump(dict, file)
@staticmethod
def load(file_path):
with open(file_path, "rb") as file:
data = pickle.load(file)
ocp = OptimalControlProgram(**data["ocp_initilializer"])
for key in data["versions"].keys():
if data["versions"][key] != ocp.version[key]:
raise RuntimeError(
f"Version of {key} from file ({data['versions'][key]}) is not the same as the "
f"installed version ({ocp.version[key]})"
)
sol = data["sol"]
return (ocp, sol)
@staticmethod
def read_information(file_path):
with open(file_path, "rb") as file:
data = pickle.load(file)
original_values = data["ocp_initilializer"]
print("****************************** Informations ******************************")
for key in original_values.keys():
if key not in [
"X_init",
"U_init",
"X_bounds",
"U_bounds",
]:
print(f"{key} : ")
OptimalControlProgram._deep_print(original_values[key])
print("")
@staticmethod
def _deep_print(elem, label=""):
if isinstance(elem, (list, tuple)):
for k in range(len(elem)):
OptimalControlProgram._deep_print(elem[k])
if k != len(elem) - 1:
print("")
elif isinstance(elem, dict):
for key in elem.keys():
OptimalControlProgram._deep_print(elem[key], label=key)
else:
if label == "":
print(f" {elem}")
else:
print(f" [{label}] = {elem}")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tagging.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PageTagging',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page_tags', tagging.fields.TagField(help_text='Please provide a comma-separated list of tags.', max_length=255, verbose_name='Tags', blank=True)),
('page', models.OneToOneField(verbose_name='Page', to='cms.Page')),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
},
bases=(models.Model,),
),
]
|
import os
from os.path import dirname
import src.superannotate as sa
from src.superannotate.lib.core.plugin import VideoPlugin
from tests.integration.base import BaseTestCase
import pytest
class TestVideo(BaseTestCase):
PROJECT_NAME = "test video upload1"
SECOND_PROJECT_NAME = "test video upload2"
PROJECT_DESCRIPTION = "Desc"
PROJECT_TYPE = "Vector"
TEST_FOLDER_NAME = "new_folder"
TEST_VIDEO_FOLDER_PATH = "data_set/sample_videos/single"
TEST_VIDEO_FOLDER_PATH_BIG = "data_set/sample_videos/earth_video"
TEST_VIDEO_NAME = "video.mp4"
TEST_FOLDER_NAME_BIG_VIDEO = "big"
@property
def folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_VIDEO_FOLDER_PATH)
@property
def folder_path_big(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_VIDEO_FOLDER_PATH_BIG)
def setUp(self, *args, **kwargs):
self.tearDown()
self._project = sa.create_project(
self.PROJECT_NAME, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
self._second_project = sa.create_project(
self.SECOND_PROJECT_NAME, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
for project_name in (self.PROJECT_NAME, self.SECOND_PROJECT_NAME):
projects = sa.search_projects(project_name, return_metadata=True)
for project in projects:
sa.delete_project(project)
def test_video_upload_from_folder(self):
sa.upload_videos_from_folder_to_project(
self.PROJECT_NAME, self.folder_path, target_fps=1
)
sa.create_folder(self.PROJECT_NAME, self.TEST_FOLDER_NAME)
sa.upload_videos_from_folder_to_project(
f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME}",
self.folder_path,
target_fps=1,
)
self.assertEqual(len(sa.search_images(self.PROJECT_NAME)), 5)
self.assertEqual(
len(sa.search_images(f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME}")),
len(sa.search_images(self.PROJECT_NAME)),
)
def test_single_video_upload(self):
sa.upload_video_to_project(
self.PROJECT_NAME,
f"{self.folder_path}/{self.TEST_VIDEO_NAME}",
target_fps=1,
)
self.assertEqual(len(sa.search_images(self.PROJECT_NAME)), 5)
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def test_video_big(self):
sa.create_folder(self.PROJECT_NAME, self.TEST_FOLDER_NAME_BIG_VIDEO)
sa.upload_video_to_project(
f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME_BIG_VIDEO}",
f"{self.folder_path_big}/earth.mov",
target_fps=1,
)
self.assertEqual(len(sa.search_images(f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME_BIG_VIDEO}")), 31)
sa.upload_video_to_project(
f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME_BIG_VIDEO}",
f"{self.folder_path_big}/earth.mov",
target_fps=1,
)
self.assertIn("31 already existing images found that won't be uploaded.", self._caplog.text)
def test_frame_extraction(self):
frames_gen = VideoPlugin.frames_generator(
f"{self.folder_path_big}/earth.mov", target_fps=None, start_time=0.0, end_time=None
)
self.assertEqual(len([*frames_gen]), 901)
frames_gen = VideoPlugin.frames_generator(
f"{self.folder_path_big}/earth.mov", target_fps=None, start_time=10.0, end_time=None
)
self.assertGreaterEqual(len([*frames_gen]), 589)
|
from functools import wraps
from flask_admin import Admin, AdminIndexView # type: ignore
from flask_admin.contrib.peewee import ModelView # type: ignore
from flask_login import current_user # type: ignore
from lms.lmsdb.models import Comment, CommentText, Solution
from lms.lmsweb import webapp
from lms.models.errors import fail
def managers_only(func):
# Must have @wraps to work with endpoints.
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.role.is_manager:
return fail(403, 'This user has no permissions to view this page.')
else:
return func(*args, **kwargs)
return wrapper
class AccessibleByAdminMixin:
def is_accessible(self):
return (
current_user.is_authenticated
and current_user.role.is_administrator
)
class MyAdminIndexView(AccessibleByAdminMixin, AdminIndexView):
pass
class AdminModelView(AccessibleByAdminMixin, ModelView):
pass
class AdminSolutionView(AdminModelView):
column_filters = (
Solution.state.name,
)
column_choices = {
Solution.state.name: Solution.STATES.to_choices(),
}
class AdminCommentView(AdminModelView):
column_filters = (
Comment.timestamp.name,
Comment.is_auto.name,
)
class AdminCommentTextView(AdminModelView):
column_filters = (
CommentText.text.name,
CommentText.flake8_key.name,
)
SPECIAL_MAPPING = {
Solution: AdminSolutionView,
Comment: AdminCommentView,
CommentText: AdminCommentTextView,
}
admin = Admin(
webapp,
name='LMS',
template_mode='bootstrap4',
index_view=MyAdminIndexView(), # NOQA
)
|
# (c) @AbirHasan2005
import asyncio
from configs import Config
from pyrogram import Client
from pyrogram.types import Message
from pyrogram.errors import FloodWait
from helpers.filters import FilterMessage
async def ForwardMessage(client: Client, msg: Message):
try:
## --- Check 1 --- ##
can_forward = await FilterMessage(message=msg)
if can_forward == 400:
return 400
## --- Check 2 --- ##
if Config.FORWARD_AS_COPY is True:
await msg.copy(int(Config.FORWARD_TO_CHAT_ID))
else:
await msg.forward(int(Config.FORWARD_TO_CHAT_ID))
except FloodWait as e:
await asyncio.sleep(e.x)
await client.send_message(chat_id="me", text=f"#FloodWait: Stopped Forwarder for `{e.x}s`!")
await asyncio.sleep(Config.SLEEP_TIME)
await ForwardMessage(client, msg)
except Exception as err:
await client.send_message(chat_id="me", text=f"#ERROR: `{err}`")
|
import os
from offenewahlen_api.settings_user import *
from offenewahlen_api.settings import *
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
USER_SETTINGS_EXIST = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'app.sqlite3')
}
}
|
from typing import List
from pickle import load
def clean_urls(urls: List[str], file_name='./cache.pkl') -> List[str]:
"""
Finds and removes duplicate urls from a list of urls.
Args:
urls (List[str]): list of urls
Returns:
List[str]: list of unique urls
"""
try:
with open(file_name, 'rb') as file:
cache = load(file)
return list(set(urls) - set(cache))
except FileNotFoundError:
return urls
|
from setuptools import find_packages, setup
setup(
name="dtumlops",
packages=find_packages(),
version="0.1.0",
description="A simple CNN project, testing cookiecutter",
author="MichaelF",
license="MIT",
)
|
from fastapi import APIRouter, Depends, Request, Response
from sqlalchemy.orm import Session
from typing import List
from uuid import UUID
from api.models.event_type import EventTypeCreate, EventTypeRead, EventTypeUpdate
from api.routes import helpers
from db import crud
from db.database import get_db
from db.schemas.event_type import EventType
router = APIRouter(
prefix="/event/type",
tags=["Event Type"],
)
#
# CREATE
#
def create_event_type(
event_type: EventTypeCreate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
uuid = crud.create(obj=event_type, db_table=EventType, db=db)
response.headers["Content-Location"] = request.url_for("get_event_type", uuid=uuid)
helpers.api_route_create(router, create_event_type)
#
# READ
#
def get_all_event_types(db: Session = Depends(get_db)):
return crud.read_all(db_table=EventType, db=db)
def get_event_type(uuid: UUID, db: Session = Depends(get_db)):
return crud.read(uuid=uuid, db_table=EventType, db=db)
helpers.api_route_read_all(router, get_all_event_types, List[EventTypeRead])
helpers.api_route_read(router, get_event_type, EventTypeRead)
#
# UPDATE
#
def update_event_type(
uuid: UUID,
event_type: EventTypeUpdate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
crud.update(uuid=uuid, obj=event_type, db_table=EventType, db=db)
response.headers["Content-Location"] = request.url_for("get_event_type", uuid=uuid)
helpers.api_route_update(router, update_event_type)
#
# DELETE
#
def delete_event_type(uuid: UUID, db: Session = Depends(get_db)):
crud.delete(uuid=uuid, db_table=EventType, db=db)
helpers.api_route_delete(router, delete_event_type)
|
import subprocess
import datetime
import os
NAME = 'clothstream'.replace('_', '-')
VERSION = __version__ = (0, 1, 0, 'alpha', 0)
__author__ = 'Julien Aubert'
def get_version(): # pragma: no cover
"""Derives a PEP386-compliant version number from VERSION."""
assert len(VERSION) == 5
assert VERSION[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if VERSION[2] == 0 else 3
main = '.'.join(str(x) for x in VERSION[:parts])
sub = ''
if VERSION[3] == 'alpha' and VERSION[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif VERSION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[VERSION[3]] + str(VERSION[4])
elif VERSION[3] == 'final':
if VERSION[4] > 0:
sub += '-%s' % VERSION[4]
return main + sub
def get_git_changeset(): # pragma: no cover
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
from keras import backend as K
import os
def set_keras_backend(backend):
if K.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
try:
from importlib import reload
reload(K) # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
reload(K)
except ImportError:
from imp import reload # Python 3.0 - 3.3
reload(K)
assert K.backend() == backend
set_keras_backend("cntk")
K.set_image_dim_ordering('tf')
import pandas as pd
import numpy as np
from timeit import default_timer as timer
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Input, Dense, Flatten, Embedding
from keras.layers.pooling import GlobalMaxPooling1D,MaxPooling1D
from keras.layers.convolutional import Convolution1D
from keras.layers.core import Lambda
from keras import optimizers
from keras.models import Model
from keras.regularizers import l1
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from sklearn import svm, metrics
from sklearn.model_selection import train_test_split
from IPython.display import SVG
import pydot
from keras.utils.vis_utils import model_to_dot
import re
import io
from nltk.tokenize import TweetTokenizer
from nltk.tokenize import RegexpTokenizer
import num2words
random_seed=1
np.random.seed(random_seed)
base_path = os.environ['HOMEPATH']
data_folder='data'
data_dir = os.path.join(base_path, data_folder)
embedding_folder = os.path.join(base_path, 'vectors')
model_identifier = 'SSWE_Basic_Keras_w_CNTK'
if not os.path.exists(embedding_folder):
os.makedirs(embedding_folder)
max_sequence_length = 15 # each sentence of the input should be padded to have at least this many tokens
embedding_dim = 50 # Embedding layer size
no_filters = 15 # No of filters for the convolution layer
filter_size = 5 # Filter size for the convolution layer
trainable = True # flag specifying whether the embedding layer weights should be changed during the training or not
batch_size = 64 # batch size can be increased to have better gpu utilization
no_epochs = 5 # No of training epochs
# Data preprocessing
pos_emoticons=["(^.^)","(^-^)","(^_^)","(^_~)","(^3^)","(^o^)","(~_^)","*)",":)",":*",":-*",":]",":^)",":}",
":>",":3",":b",":-b",":c)",":D",":-D",":O",":-O",":o)",":p",":-p",":P",":-P",":Þ",":-Þ",":X",
":-X",";)",";-)",";]",";D","^)","^.~","_)m"," ~.^","<=8","<3","<333","=)","=///=","=]","=^_^=",
"=<_<=","=>.<="," =>.>="," =3","=D","=p","0-0","0w0","8D","8O","B)","C:","d'-'","d(>w<)b",":-)",
"d^_^b","qB-)","X3","xD","XD","XP","ʘ‿ʘ","❤","💜","💚","💕","💙","💛","💓","💝","💖","💞",
"💘","💗","😗","😘","😙","😚","😻","😀","😁","😃","☺","😄","😆","😇","😉","😊","😋","😍",
"😎","😏","😛","😜","😝","😮","😸","😹","😺","😻","😼","👍"]
neg_emoticons=["--!--","(,_,)","(-.-)","(._.)","(;.;)9","(>.<)","(>_<)","(>_>)","(¬_¬)","(X_X)",":&",":(",":'(",
":-(",":-/",":-@[1]",":[",":\\",":{",":<",":-9",":c",":S",";(",";*(",";_;","^>_>^","^o)","_|_",
"`_´","</3","<=3","=/","=\\",">:(",">:-(","💔","☹️","😌","😒","😓","😔","😕","😖","😞","😟",
"😠","😡","😢","😣","😤","😥","😦","😧","😨","😩","😪","😫","😬","😭","😯","😰","😱","😲",
"😳","😴","😷","😾","😿","🙀","💀","👎"]
# Emails
emailsRegex=re.compile(r'[\w\.-]+@[\w\.-]+')
# Mentions
userMentionsRegex=re.compile(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)')
#Urls
urlsRegex=re.compile('r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+') # It may not be handling all the cases like t.co without http
#Numerics
numsRegex=re.compile(r"\b\d+\b")
punctuationNotEmoticonsRegex=re.compile(r'(?<=\w)[^\s\w](?![^\s\w])')
emoticonsDict = {}
for i,each in enumerate(pos_emoticons):
emoticonsDict[each]=' POS_EMOTICON_'+num2words.num2words(i).upper()+' '
for i,each in enumerate(neg_emoticons):
emoticonsDict[each]=' NEG_EMOTICON_'+num2words.num2words(i).upper()+' '
# use these three lines to do the replacement
rep = dict((re.escape(k), v) for k, v in emoticonsDict.items())
emoticonsPattern = re.compile("|".join(rep.keys()))
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens"""
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
return padded_lines
def read_labels(filename):
""" read the tweet labels from the file"""
arr= np.genfromtxt(filename, delimiter='\n')
arr[arr==4]=1 # Encode the positive category as 1
return arr
# Loading Training and Validation Data
texts = []
labels = []
nb_train_samples = 0
nb_valid_samples = 0
print ('Loading Training Labels')
train_labels=read_labels(data_dir+'\\training_label.csv')
print ('Loading Training data')
train_texts=read_data(data_dir+'//training_text.csv')
print (len(train_labels), len(train_texts))
print ("Using Keras tokenizer to tokenize and build word index")
tokenizer = Tokenizer(lower=False, filters='\n\t?"!')
train_texts=[each for each in train_texts]
tokenizer.fit_on_texts(train_texts)
sorted_voc = [wc[0] for wc in sorted(tokenizer.word_counts.items(),reverse=True, key= lambda x:x[1]) ]
tokenizer.word_index = dict(list(zip(sorted_voc, list(range(2, len(sorted_voc) + 2)))))
tokenizer.word_index['<PAD>']=0
tokenizer.word_index['<UNK>']=1
word_index = tokenizer.word_index
reverse_dictionary={v:k for (k,v) in tokenizer.word_index.items()}
vocab_size=len(tokenizer.word_index.keys())
print ('Size of the vocab is', vocab_size)
# Shuffling /Padding the data
print ('Padding sentences and shuffling the data')
sequences = tokenizer.texts_to_sequences(train_texts)
#Pad the sentences to have consistent length
data = pad_sequences(sequences, maxlen=max_sequence_length, padding='post')
labels = to_categorical(np.asarray(train_labels))
indices = np.arange(len(labels))
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
train_x, valid_x, train_y, valid_y=train_test_split(data, labels, test_size=0.2, random_state=random_seed)
train_x=np.array(train_x).astype('float32')
valid_x=np.array(valid_x).astype('float32')
train_y=np.array(train_y)
valid_y=np.array(valid_y)
embedding_matrix = np.zeros((len(word_index) , embedding_dim))
training_word_index=tokenizer.word_index.copy()
# Model Instantiation
print ('Initializing the model')
mcp = ModelCheckpoint('./model_chkpoint', monitor="val_acc", save_best_only=True, save_weights_only=False)
#Creating network
model = Sequential()
model.add(Embedding(len(word_index)+2,
embedding_dim,
input_length=max_sequence_length,
trainable=trainable, name='embedding'))
model.add(Convolution1D(no_filters, filter_size, activation='relu'))
model.add(MaxPooling1D(max_sequence_length - filter_size))
model.add(Flatten())
model.add(Dense(no_filters, activation='tanh'))
model.add(Dense(len(labels[0]), activation='softmax'))
optim=optimizers.Adam(lr=0.1, )
model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['acc'])
model.summary()
# Training
start=timer()
hist=model.fit(train_x, train_y,nb_epoch=no_epochs, batch_size=batch_size,validation_data=(valid_x, valid_y),callbacks=[mcp])
end=timer()
# Exporting the Embedding Matrix and Vocabulary
def export_embeddings(model_orig):
""" export embeddings to file"""
embedding_weights=pd.DataFrame(model_orig.layers[0].get_weights()[0]).reset_index()
word_indices_df=pd.DataFrame.from_dict(training_word_index,orient='index').reset_index()
word_indices_df.columns=['word','index']
print (word_indices_df.shape,embedding_weights.shape)
merged=pd.merge(word_indices_df,embedding_weights)
print (merged.shape)
merged=merged[[each for each in merged.columns if each!='index']]
merged.to_csv(embedding_folder+'//embeddings_{}.tsv'.format(model_identifier), sep='\t',
index=False, header=False,float_format='%.6f',encoding='utf-8')
return embedding_weights, word_indices_df, merged
embedding_weights, word_indices_df, merged_df=export_embeddings(model)
|
n = int(input(f'Digite um numero para ser convertido: '))
o = int(input(f'1 - para binário.\n'
f'2 - para octal.\n'
f'3 - para hexadecimal.\n'
f'Sua opção: '))
if o == 1:
print(f'O numero {n} em binário é igual a : {str(bin(n))[2:]}!')
elif o == 2:
print(f'O numero {n} em octal é igual a: {str(oct(n))[2:]}!')
elif o == 3:
print(f'O numero {n} em hexadecimal é igual a: {str(hex(n))[2:]}!')
else:
print('Opção INVALIDA!')
|
import os
import pickle as pkl
from tqdm import tqdm
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import ujson as json
import nltk
from nltk.tokenize import word_tokenize
from time import time
np.random.seed(int(time()))
SPACE = ' '
def stat_length(seq_length):
print('Seq len info :')
seq_len = np.asarray(seq_length)
idx = np.arange(0, len(seq_len), dtype=np.int32)
print(stats.describe(seq_len))
plt.figure(figsize=(16, 9))
plt.subplot(121)
plt.plot(idx[:], seq_len[:], 'ro')
plt.grid(True)
plt.xlabel('index')
plt.ylabel('seq_len')
plt.title('Scatter Plot')
plt.subplot(122)
plt.hist(seq_len, bins=10, label=['seq_len'])
plt.grid(True)
plt.xlabel('seq_len')
plt.ylabel('freq')
plt.title('Histogram')
plt.show()
def stat_altlex(eng_sentences, sim_sentences, labels):
c_alt, nc_alt = [], []
for eng, sim, label in zip(eng_sentences, sim_sentences, labels):
if label == 0:
nc_alt.append(' '.join(w for w in eng[1]))
nc_alt.append(' '.join(w for w in sim[1]))
else:
c_alt.append(' '.join(w for w in eng[1]))
c_alt.append(' '.join(w for w in sim[1]))
c_alt_set = set(c_alt)
nc_alt_set = set(nc_alt)
co_alt_set = c_alt_set.intersection(nc_alt_set)
co_in_c, co_in_nc = 0, 0
for c, nc in zip(c_alt, nc_alt):
if c in co_alt_set:
co_in_c += 1
if nc in nc_alt_set:
co_in_nc += 1
print('#Altlexes rep casual - {}'.format(len(c_alt_set)))
print('#Altlexes rep non_casual - {}'.format(len(nc_alt_set)))
print('#Altlexes in both set - {}'.format(len(co_alt_set)))
print(co_alt_set)
print('#CoAltlex in causal - {}'.format(co_in_c))
print('#CoAltlex in non_causal - {}'.format(co_in_nc))
def seg_length(sentences):
seg_len = []
for sen in sentences:
seg_len.append((len(sen[0]), len(sen[1]), len(sen[2])))
return seg_len
def check_null(sen):
flag = False
if len(sen) == 3:
# if len(sen[0]) > 0:
# pre = sen[0]
# else:
# pre = ['<NULL>']
# flag = True
# if len(sen[1]) > 0:
# mid = sen[1]
# else:
# mid = ['<NULL>']
# flag = True
# if len(sen[2]) > 0:
# cur = sen[2]
# else:
# cur = ['<NULL>']
# flag = True
pre = sen[0] if len(sen[0]) > 0 else ['<NULL>']
mid = sen[1] if len(sen[1]) > 0 else ['<NULL>']
cur = sen[2] if len(sen[2]) > 0 else ['<NULL>']
else:
pre = sen[0] if len(sen[0]) > 0 else ['<NULL>']
mid = ['<NULL>']
cur = ['<NULL>']
flag = True
return pre, mid, cur, flag
def preprocess_train(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
engs, sims = [], []
seg_engs, seg_sims, labels = [], [], []
data_path = os.path.join(file_path, file_name)
lines = open(data_path, 'r', encoding='ISO-8859-1').readlines()
for line in lines:
line = line.strip().split('\t')
if line[0] == 'label':
continue
labels.append(int(line[0]))
del line[0]
if is_build:
engs.append(word_tokenize(SPACE.join(line[:3]).strip()))
sims.append(word_tokenize(SPACE.join(line[3:]).strip()))
seg_engs.append([word_tokenize(seg) for seg in line[:3]])
seg_sims.append([word_tokenize(seg) for seg in line[3:]])
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%',
'"', '``', '-', '\'\'']
if is_build:
eng_filtered = [[word.lower() for word in document if word not in english_punctuations] for document in engs]
sim_filtered = [[word.lower() for word in document if word not in english_punctuations] for document in sims]
seg_eng_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in eng] for eng
in seg_engs]
seg_sim_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in sim] for sim
in seg_sims]
total = 0
seq_len = []
for label, eng, sim in zip(labels, seg_eng_filtered, seg_sim_filtered):
total += 1
pre, mid, cur, flag = check_null(eng)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
total += 1
pre, mid, cur, flag = check_null(sim)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
if is_build:
sentences = []
for eng_tokens, sim_tokens in zip(eng_filtered, sim_filtered):
sentences.append(SPACE.join(eng_tokens))
sentences.append(SPACE.join(sim_tokens))
else:
sentences = []
np.random.shuffle(examples)
stat_length(seq_len)
return examples, sentences, (seg_eng_filtered, seg_sim_filtered), labels
def preprocess_test(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
sentences, segments, labels = [], [], []
data_path = os.path.join(file_path, file_name)
lines = open(data_path, 'r', encoding='ISO-8859-1').readlines()
for line in lines:
line = line.strip().split('\t')
num = int(line[-1])
del line[-1]
labels.append(0 if num == 0 else 1)
sentences.append(word_tokenize(SPACE.join(line).strip()))
if len(line) == 3:
segments.append([word_tokenize(seg) for seg in line])
else:
segments.append([['<NULL>'], word_tokenize(line[0]), word_tokenize(line[1])])
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%',
'"', '``', '-', '\'\'']
if is_build:
sen_filtered = [[word.lower() for word in sentence if word not in english_punctuations] for sentence in
sentences]
seg_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in eng] for eng in
segments]
total = 0
seq_len = []
for label, seg in zip(labels, seg_filtered):
total += 1
pre, mid, cur, flag = check_null(seg)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
# print('Get {} total examples'.format(total))
# print('Get {} causal examples'.format(causal))
# print('Get {} non-causal examples'.format(non_causal))
if is_build:
sentences = [SPACE.join(tokens) for tokens in sen_filtered]
else:
sentences = []
stat_length(seq_len)
return examples, sentences, seg_filtered, labels
def preprocess_transfer(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
data_path = os.path.join(file_path, file_name)
total = 0
with open(data_path, 'rb') as f:
data_set = json.load(f)
f.close()
for label, sample in zip(data_set['label'], data_set['sample']):
total += 1
pre, mid, cur, flag = check_null(sample)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
return examples
def build_dict(data_path):
dictionary = {}
with open(data_path, 'r', encoding='utf8') as fh:
for line in fh:
line = line.strip().split(' ')
fredist = nltk.FreqDist(line)
for localkey in fredist.keys():
if localkey in dictionary.keys():
dictionary[localkey] = dictionary[localkey] + fredist[localkey]
else:
# 如果字典中不存在
dictionary[localkey] = fredist[localkey] # 将当前词频添加到字典中
return set(dictionary)
def save(filename, obj, message=None):
if message is not None:
print('Saving {}...'.format(message))
if message == 'corpus':
with open(filename, 'w', encoding='utf8') as fh:
fh.writelines([line + '\n' for line in obj])
elif message == 'embeddings':
with open(filename, 'wb') as fh:
pkl.dump(obj, fh)
else:
with open(filename, 'w', encoding='utf8') as fh:
json.dump(obj, fh)
fh.close()
def get_embedding(data_type, corpus_dict, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
# token2id = {'<NULL>': 0, '<OOV>': 1, '<LEARN>': 2}
token2id = {'<NULL>': 0, '<OOV>': 1}
if emb_file is not None:
assert vec_size is not None
with open(emb_file, 'rb') as fin:
trained_embeddings = pkl.load(fin)
fin.close()
embedding_dict = set(trained_embeddings)
print('Num of tokens in corpus {}'.format(len(corpus_dict)))
filtered_tokens = corpus_dict.intersection(embedding_dict) # common
oov_tokens = corpus_dict.difference(filtered_tokens)
combined_tokens = []
for token in oov_tokens:
if len(token.split('-')) > 1:
combined_tokens.append(token)
combined_tokens = set(combined_tokens)
# oov_tokens = oov_tokens.difference(combined_tokens)
# token2id = {'<NULL>': 0, '<OOV>': 1}
# embedding_mat = np.zeros([len(corpus_dict) + 2, vec_size])
embedding_mat = np.zeros([len(filtered_tokens) + len(token2id), vec_size])
for token in filtered_tokens:
token2id[token] = len(token2id)
embedding_mat[token2id[token]] = trained_embeddings[token]
combined = 0
for tokens in combined_tokens:
sub_tokens = tokens.split('-')
token_vec = np.zeros([vec_size])
in_emb = 0
for t in sub_tokens:
if t in filtered_tokens:
token_vec += trained_embeddings[t]
in_emb += 1
if in_emb > 0:
combined += 1
token2id[tokens] = len(token2id)
embedding_mat = np.row_stack((embedding_mat, token_vec / in_emb))
scale = 3.0 / max(1.0, (len(corpus_dict) + vec_size) / 2.0)
embedding_mat[1] = np.random.uniform(-scale, scale, vec_size)
print('Filtered_tokens: {} Combined_tokens: {} OOV_tokens: {}'.format(len(filtered_tokens),
combined,
len(oov_tokens)))
else:
embedding_mat = np.random.uniform(-0.25, 0.25, (len(corpus_dict) + len(token2id), vec_size))
embedding_mat[0] = np.zeros(vec_size)
embedding_mat[1] = np.zeros(vec_size)
for token in corpus_dict:
token2id[token] = len(token2id)
# id2token = dict([val, key] for key, val in token2id.items())
id2token = dict(zip(token2id.values(), token2id.keys()))
# print(len(token2id), len(id2token), len(embedding_mat))
return embedding_mat, token2id, id2token
def gen_embedding(data_type, corpus_dict, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
# token2id = {'<NULL>': 0, '<OOV>': 1, '<LEARN>': 2}
token2id = {'<NULL>': 0, '<OOV>': 1}
if emb_file is not None:
assert vec_size is not None
with open(emb_file, 'rb') as fin:
trained_embeddings = pkl.load(fin)
fin.close()
embedding_dict = set(trained_embeddings)
print('Num of tokens in corpus {}'.format(len(corpus_dict)))
filtered_tokens = corpus_dict.intersection(embedding_dict) # common
oov_tokens = corpus_dict.difference(filtered_tokens)
combined_tokens = []
for token in oov_tokens:
if len(token.split('-')) > 1:
combined_tokens.append(token)
combined_tokens = set(combined_tokens)
# oov_tokens = oov_tokens.difference(combined_tokens)
# token2id = {'<NULL>': 0, '<OOV>': 1}
# embedding_mat = np.zeros([len(corpus_dict) + 2, vec_size])
embedding_mat = np.zeros([len(filtered_tokens) + len(token2id), vec_size])
for token in filtered_tokens:
token2id[token] = len(token2id)
embedding_mat[token2id[token]] = trained_embeddings[token]
combined = 0
for tokens in combined_tokens:
sub_tokens = tokens.split('-')
token_vec = np.zeros([vec_size])
in_emb = 0
for t in sub_tokens:
if t in filtered_tokens:
token_vec += trained_embeddings[t]
in_emb += 1
if in_emb > 0:
combined += 1
token2id[tokens] = len(token2id)
embedding_mat = np.row_stack((embedding_mat, token_vec / in_emb))
scale = 3.0 / max(1.0, (len(corpus_dict) + vec_size) / 2.0)
embedding_mat[1] = np.random.uniform(-scale, scale, vec_size)
print('Filtered_tokens: {} Combined_tokens: {} OOV_tokens: {}'.format(len(filtered_tokens),
combined,
len(oov_tokens)))
else:
embedding_mat = np.random.uniform(-0.25, 0.25, (len(corpus_dict) + len(token2id), vec_size))
embedding_mat[0] = np.zeros(vec_size)
embedding_mat[1] = np.zeros(vec_size)
for token in corpus_dict:
token2id[token] = len(token2id)
# id2token = dict([val, key] for key, val in token2id.items())
id2token = dict(zip(token2id.values(), token2id.keys()))
# print(len(token2id), len(id2token), len(embedding_mat))
return embedding_mat, token2id, id2token
def seg_length(sentences):
seg_len = []
for sen in sentences:
seg_len.append((len(sen[0]), len(sen[1]), len(sen[2])))
return seg_len
def gen_annotation(segs, max_length, filename, labels, data_type):
max_length = max_length['full']
if data_type == 'train':
eng_length = seg_length(segs[0])
sim_length = seg_length(segs[1])
with open(filename, 'w', encoding='utf8') as f:
for el, sl, label in zip(eng_length, sim_length, labels):
pre, alt, cur = el
if sum(el) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
pre, alt, cur = sl
if sum(sl) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
f.close()
else:
length = seg_length(segs)
with open(filename, 'w', encoding='utf8') as f:
for l, label in zip(length, labels):
pre, alt, cur = l
if sum(l) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
f.close()
def build_features(sentences, data_type, max_len, out_file, word2id, annotation_file=None):
print("Processing {} examples...".format(data_type))
total = 0
meta = {}
samples = []
# fh = open(annotation_file, 'r', encoding='utf8')
for sentence in tqdm(sentences):
total += 1
tokens = np.zeros([max_len['full']], dtype=np.int32)
tokens_pre = np.zeros([max_len['pre']], dtype=np.int32)
tokens_alt = np.zeros([max_len['alt']], dtype=np.int32)
tokens_cur = np.zeros([max_len['cur']], dtype=np.int32)
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2id:
return word2id[each]
return 1
seq_len = min(len(sentence['tokens']), max_len['full'])
pre_len = min(len(sentence['tokens_pre']), max_len['pre'])
alt_len = min(len(sentence['tokens_alt']), max_len['alt'])
cur_len = min(len(sentence['tokens_cur']), max_len['cur'])
for i in range(seq_len):
tokens[i] = _get_word(sentence['tokens'][i])
for i in range(pre_len):
tokens_pre[i] = _get_word(sentence['tokens_pre'][i])
for i in range(alt_len):
tokens_alt[i] = _get_word(sentence['tokens_alt'][i])
for i in range(cur_len):
tokens_cur[i] = _get_word(sentence['tokens_cur'][i])
samples.append({'id': sentence['eid'],
'tokens': tokens,
'tokens_pre': tokens_pre,
'tokens_alt': tokens_alt,
'tokens_cur': tokens_cur,
'length': seq_len,
'cau_label': sentence['cau_label']})
# fh.close()
with open(out_file, 'wb') as fo:
pkl.dump(samples, fo)
fo.close()
print('Build {} instances of features in total'.format(total))
meta['total'] = total
return meta
def run_prepare(config):
train_examples, train_corpus, train_seg, train_labels = preprocess_train(config.raw_dir, config.train_file,
'train', config.build)
transfer_examples1 = preprocess_transfer(config.raw_dir, config.transfer_file1, 'transfer')
transfer_examples2 = preprocess_transfer(config.raw_dir, config.transfer_file2, 'transfer')
valid_examples, valid_corpus, valid_seg, valid_labels = preprocess_test(config.raw_dir, config.valid_file,
'valid', config.build)
test_examples, test_corpus, test_seg, test_labels = preprocess_test(config.raw_dir, config.test_file,
'test', config.build)
if config.build:
# types = ['train', 'valid', 'test']
# labels = [train_labels, valid_labels, test_labels]
# segs = [train_seg, valid_seg, test_seg]
# for t, s, l in zip(types, segs, labels):
# gen_annotation(s, config.max_len, os.path.join(config.processed_dir, t + '_annotations.txt'), l, t)
save(config.corpus_file, train_corpus, 'corpus')
corpus_dict = build_dict(config.corpus_file)
token_emb_mat, token2id, id2token = get_embedding('word', corpus_dict, config.w2v_file, config.n_emb)
save(config.token_emb_file, token_emb_mat, message='embeddings')
save(config.token2id_file, token2id, message='token to index')
save(config.id2token_file, id2token, message='index to token')
else:
with open(config.token2id_file, 'r') as fh:
token2id = json.load(fh)
transfer_meta1 = build_features(transfer_examples1, 'transfer', config.max_len, config.transfer_record_file1,
token2id)
save(config.transfer_meta1, transfer_meta1, message='transfer meta')
del transfer_examples1
transfer_meta2 = build_features(transfer_examples2, 'transfer', config.max_len, config.transfer_record_file2,
token2id)
save(config.transfer_meta2, transfer_meta2, message='transfer meta')
del transfer_examples2
train_meta = build_features(train_examples, 'train', config.max_len, config.train_record_file, token2id,
config.train_annotation)
save(config.train_meta, train_meta, message='train meta')
del train_examples, train_corpus
valid_meta = build_features(valid_examples, 'valid', config.max_len, config.valid_record_file, token2id)
save(config.valid_meta, valid_meta, message='valid meta')
del valid_examples, valid_corpus
test_meta = build_features(test_examples, 'test', config.max_len, config.test_record_file, token2id,
config.test_annotation)
save(config.test_meta, test_meta, message='test meta')
del test_examples, test_corpus
save(config.shape_meta, {'max_len': config.max_len}, message='shape meta')
|
'''
Created on Mar 2, 2015
@author: Stefan-Code
'''
import unittest
from gglsbl3 import client
import os
from nose.tools import *
class ClientTest(unittest.TestCase):
def setUp(self):
self.api_key = "abcdef"
self.db_path = "./testdb.sqlite"
self.client = client.SafeBrowsingList(self.api_key, self.db_path, discard_fair_use_policy=False)
def tearDown(self):
self.client._close_storage()
os.remove(self.db_path)
def testName(self):
eq_(self.client.full_hash_protocol_client.config["url_args"]["key"], self.api_key)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import pathlib
from pw_manager.db import Database
from pw_manager.utils import utils, errors
import paramiko
class Options:
UPLOAD = 1
DOWNLOAD = 2
def check_credentials(server: str, username: str, password: str) -> bool:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname=server, username=username, password=password, port=22)
return True
except paramiko.ssh_exception.AuthenticationException:
return False
def sync(db: Database, action: Options, server: str, username: str, password: str, path: str):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=server, username=username, password=password, port=22)
sftp = ssh.open_sftp()
if action == Options.UPLOAD:
try:
sftp.stat(path)
except FileNotFoundError:
sftp.mkdir(path.rsplit("/", maxsplit=1)[0])
sftp.put(db.path, path)
elif action == Options.DOWNLOAD:
pathlib.Path(db.path + ".old").unlink(missing_ok=True)
pathlib.Path(db.path).rename(db.path + ".old")
sftp.get(path, db.path)
sftp.close()
ssh.close()
|
import FWCore.ParameterSet.Config as cms
dqmFileReader = cms.EDAnalyzer("DQMFileReader",
FileNames = cms.untracked.vstring(),
referenceFileName = cms.untracked.string("")
)
|
import datetime
import logging
import os
import sqlite3
from functools import wraps
import tmdbsimple
# noinspection PyPackageRequirements
from routing import Plugin
from xbmc import executebuiltin
from xbmcgui import ListItem, Dialog
from xbmcplugin import addDirectoryItem, endOfDirectory, setContent, setResolvedUrl
from lib import tmdb
from lib.api.flix.kodi import ADDON_PATH, ADDON_NAME, set_logger, notification, translate, Progress, \
container_refresh, get_current_view_id, set_view_mode, container_update, run_plugin
from lib.api.flix.utils import PY3
from lib.library import Library
from lib.providers import play_search, play_movie, play_show, play_season, play_episode
from lib.settings import get_language, include_adult_content, is_search_history_enabled, propagate_view_type, \
show_unaired_episodes
from lib.storage import SearchHistory
from lib.subtitles import SubtitlesService
MOVIES_TYPE = "movies"
SHOWS_TYPE = "tvshows"
EPISODES_TYPE = "episodes"
SEARCH_STORE = "store"
SEARCH_UPDATE = "update"
SEARCH_EDIT = "edit"
VIEW_PROPERTY = "view"
set_logger()
plugin = Plugin()
def progress(obj, length=None):
return Progress(obj, length=length, heading=ADDON_NAME, message=translate(30110))
def li(tid, icon):
return list_item(translate(tid), icon)
def list_item(label, icon):
icon_path = os.path.join(ADDON_PATH, "resources", "images", icon)
item = ListItem(label)
item.setArt({"icon": icon_path, "poster": icon_path})
return item
def media(func, *args, **kwargs):
return "PlayMedia({})".format(plugin.url_for(func, *args, **kwargs))
def action(func, *args, **kwargs):
return "RunPlugin({})".format(plugin.url_for(func, *args, **kwargs))
def update(func, *args, **kwargs):
return "Container.Update({})".format(plugin.url_for(func, *args, **kwargs))
def query_arg(name, required=True):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if name not in kwargs:
query_list = plugin.args.get(name)
if query_list:
kwargs[name] = query_list[0]
elif required:
raise AttributeError("Missing {} required query argument".format(name))
return func(*args, **kwargs)
return wrapper
return decorator
def handle_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
view_list = plugin.args.get(VIEW_PROPERTY)
ret = func(*args, **kwargs)
if view_list:
set_view_mode(view_list[0])
return ret
return wrapper
def handle_page(data, func, *args, **kwargs):
page = int(kwargs.get("page", 1))
total_pages = data["total_pages"] if isinstance(data, dict) else data
if page < total_pages:
kwargs["page"] = page + 1
url = plugin.url_for(func, *args, **kwargs)
propagate_view = propagate_view_type()
if propagate_view:
url = plugin.url_for(set_view, url=url)
addDirectoryItem(plugin.handle, url, li(30105, "next.png"), isFolder=not propagate_view)
def plugin_update(func, *args, **kwargs):
container_update(plugin.url_for(func, *args, **kwargs))
def add_person(person_li, person_id):
addDirectoryItem(plugin.handle, plugin.url_for(handle_person, person_id), person_li, isFolder=True)
def add_movie(movie):
item = movie.to_list_item(playable=True)
item.addContextMenuItems([
(translate(30144), update(similar_movies, movie.movie_id)),
(translate(30133), action(library_add, MOVIES_TYPE, movie.movie_id)),
])
addDirectoryItem(plugin.handle, plugin.url_for(play_movie, movie.movie_id), item)
def add_show(show):
item = show.to_list_item()
item.addContextMenuItems([
(translate(30139), media(play_show, show.show_id)),
(translate(30145), update(similar_shows, show.show_id)),
(translate(30133), action(library_add, SHOWS_TYPE, show.show_id)),
])
addDirectoryItem(plugin.handle, plugin.url_for(handle_show, show.show_id), item, isFolder=True)
def add_season(season):
item = season.to_list_item()
item.addContextMenuItems([
(translate(30139), media(play_season, season.show_id, season.season_number)),
])
addDirectoryItem(
plugin.handle,
plugin.url_for(
handle_season,
show_id=season.show_id,
season_number=season.season_number,
show_title=season.get_info("tvshowtitle")),
item,
isFolder=True,
)
def add_episode(episode):
addDirectoryItem(
plugin.handle,
plugin.url_for(play_episode, episode.show_id, episode.season_number, episode.episode_number),
episode.to_list_item(playable=True),
)
def play_youtube_video(video_id):
p = "plugin://plugin.video.tubed/?mode=play&video_id=" if PY3 \
else "plugin://plugin.video.youtube/play/?video_id="
run_plugin(p + video_id)
@plugin.route("/")
def index():
if "action" in plugin.args:
SubtitlesService(handle=plugin.handle, params=plugin.args).run()
return
addDirectoryItem(plugin.handle, plugin.url_for(discover), li(30100, "discover.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(movies), li(30101, "movies.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(shows), li(30102, "series.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(search), li(30103, "search.png"))
endOfDirectory(plugin.handle)
@plugin.route("/discover")
def discover():
addDirectoryItem(plugin.handle, plugin.url_for(discover_select, MOVIES_TYPE),
list_item("{} - {}".format(translate(30100), translate(30101)), "movies.png"))
addDirectoryItem(plugin.handle, plugin.url_for(discover_select, SHOWS_TYPE),
list_item("{} - {}".format(translate(30100), translate(30102)), "series.png"))
addDirectoryItem(plugin.handle, plugin.url_for(discover_people),
list_item("{} - {}".format(translate(30100), translate(30104)), "people.png"), isFolder=True)
endOfDirectory(plugin.handle)
def dialog_genres(media_type, kwargs):
genres_handle = tmdbsimple.Genres().movie_list if media_type == MOVIES_TYPE else tmdbsimple.Genres().tv_list
# noinspection PyArgumentList
genres_dict = tmdb.get_genres_by_name(genres_handle(language=get_language()))
genres_names = sorted(genres_dict.keys())
selected_genres = Dialog().multiselect("{} - {}".format(translate(30100), translate(30106)), genres_names)
has_selection = selected_genres is not None
if has_selection:
kwargs["with_genres"] = ",".join(str(genres_dict[genres_names[g]]) for g in selected_genres)
return has_selection
def dialog_year(media_type, kwargs):
years = [str(y) for y in range(datetime.datetime.now().year, 1900 - 1, -1)]
selected_year = Dialog().select("{} - {}".format(translate(30100), translate(30107)), years)
has_selection = selected_year >= 0
if has_selection:
kwargs["primary_release_year" if media_type == MOVIES_TYPE else "first_air_date_year"] = years[selected_year]
return has_selection
@plugin.route("/discover/select/<media_type>")
def discover_select(media_type):
if media_type == MOVIES_TYPE:
label = 30101
handler = discover_movies
elif media_type == SHOWS_TYPE:
label = 30102
handler = discover_shows
else:
return
result = Dialog().select("{} - {}".format(translate(30100), translate(label)), [
translate(30100), # discover
translate(30106), # by genre
translate(30107), # by year
translate(30143), # multiple filters
])
if result < 0:
return
kwargs = {}
if result == 1:
if not dialog_genres(media_type, kwargs):
return
elif result == 2:
if not dialog_year(media_type, kwargs):
return
elif result == 3:
if not any([dialog(media_type, kwargs) for dialog in (dialog_year, dialog_genres)]):
return
plugin_update(handler, **kwargs)
@plugin.route("/discover/movies")
@query_arg("page", required=False)
@query_arg("primary_release_year", required=False)
@query_arg("with_genres", required=False)
@handle_view
def discover_movies(**kwargs):
setContent(plugin.handle, MOVIES_TYPE)
kwargs.setdefault("include_adult", include_adult_content())
data = tmdbsimple.Discover().movie(**kwargs)
for movie in progress(*tmdb.get_movies(data)):
add_movie(movie)
handle_page(data, discover_movies, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/discover/shows")
@query_arg("page", required=False)
@query_arg("first_air_date_year", required=False)
@query_arg("with_genres", required=False)
@handle_view
def discover_shows(**kwargs):
setContent(plugin.handle, SHOWS_TYPE)
kwargs.setdefault("include_adult", include_adult_content())
data = tmdbsimple.Discover().tv(**kwargs)
for show in progress(*tmdb.get_shows(data)):
add_show(show)
handle_page(data, discover_shows, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/discover/people")
@query_arg("page", required=False)
@handle_view
def discover_people(**kwargs):
data = tmdbsimple.People().popular(**kwargs)
for person_li, person_id in tmdb.person_list_items(data):
add_person(person_li, person_id)
handle_page(data, discover_people, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/movies")
def movies():
addDirectoryItem(plugin.handle, plugin.url_for(trending_movies), li(30114, "trending.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_movies, "popular"), li(30115, "popular.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_movies, "top_rated"), li(30116, "top_rated.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_movies, "now_playing"), li(30117, "playing.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_movies, "upcoming"), li(30118, "upcoming.png"), isFolder=True)
endOfDirectory(plugin.handle)
@plugin.route("/movies/trending")
@query_arg("page", required=False)
@handle_view
def trending_movies(**kwargs):
setContent(plugin.handle, MOVIES_TYPE)
data = tmdb.Trending("movie", "week").get_trending(**kwargs)
for movie in progress(*tmdb.get_movies(data)):
add_movie(movie)
handle_page(data, trending_movies, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/movies/similar/<tmdb_id>")
@query_arg("page", required=False)
@handle_view
def similar_movies(tmdb_id, **kwargs):
setContent(plugin.handle, MOVIES_TYPE)
data = tmdbsimple.Movies(tmdb_id).similar_movies(**kwargs)
for movie in progress(*tmdb.get_movies(data)):
add_movie(movie)
handle_page(data, similar_movies, tmdb_id=tmdb_id, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/movies/get/<call>")
@query_arg("page", required=False)
@handle_view
def get_movies(call, **kwargs):
setContent(plugin.handle, MOVIES_TYPE)
logging.debug("Going to call tmdb.Movies().%s()", call)
data = getattr(tmdbsimple.Movies(), call)(**kwargs)
for movie in progress(*tmdb.get_movies(data)):
add_movie(movie)
handle_page(data, get_movies, call=call, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/shows")
def shows():
addDirectoryItem(plugin.handle, plugin.url_for(trending_shows), li(30119, "trending.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_shows, "popular"), li(30120, "popular.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_shows, "top_rated"), li(30121, "top_rated.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_shows, "airing_today"), li(30122, "playing.png"), isFolder=True)
addDirectoryItem(plugin.handle, plugin.url_for(get_shows, "on_the_air"), li(30123, "upcoming.png"), isFolder=True)
endOfDirectory(plugin.handle)
@plugin.route("/shows/trending")
@query_arg("page", required=False)
@handle_view
def trending_shows(**kwargs):
setContent(plugin.handle, SHOWS_TYPE)
data = tmdb.Trending("tv", "week").get_trending(**kwargs)
for show in progress(*tmdb.get_shows(data)):
add_show(show)
handle_page(data, trending_shows, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/shows/similar/<tmdb_id>")
@query_arg("page", required=False)
@handle_view
def similar_shows(tmdb_id, **kwargs):
setContent(plugin.handle, SHOWS_TYPE)
data = tmdbsimple.TV(tmdb_id).similar(**kwargs)
for show in progress(*tmdb.get_shows(data)):
add_show(show)
handle_page(data, similar_shows, tmdb_id=tmdb_id, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/shows/get/<call>")
@query_arg("page", required=False)
@handle_view
def get_shows(call, **kwargs):
setContent(plugin.handle, SHOWS_TYPE)
logging.debug("Going to call tmdb.TV().%s()", call)
data = getattr(tmdbsimple.TV(), call)(**kwargs)
for show in progress(*tmdb.get_shows(data)):
add_show(show)
handle_page(data, get_shows, call=call, **kwargs)
endOfDirectory(plugin.handle)
@plugin.route("/library/add/<media_type>/<tmdb_id>")
def library_add(media_type, tmdb_id):
with Library() as library:
if media_type == MOVIES_TYPE:
added = library.add_movie(tmdb.Movie(tmdb_id))
elif media_type == SHOWS_TYPE:
added = library.add_show(tmdb.Show(tmdb_id))
else:
logging.error("Unknown media type '%s'", media_type)
return
notification(translate(30134 if added else 30137), time=2000, sound=False)
@plugin.route("/library/rebuild")
def library_rebuild():
with Library() as library:
library.rebuild()
notification(translate(30138))
@plugin.route("/search")
def search():
# 0 - movie, 1 - show, 2 - person, 3 - all
search_type = Dialog().select(translate(30124), [translate(30125 + i) for i in range(4)])
if search_type < 0:
return
if is_search_history_enabled():
plugin_update(search_history, search_type)
else:
do_query(search_type)
@plugin.route("/search_history/<search_type>")
@query_arg("page", required=False)
@handle_view
def search_history(search_type, page=1):
search_type = int(search_type)
page = int(page)
with SearchHistory() as s:
addDirectoryItem(
plugin.handle,
plugin.url_for(do_query, search_type=search_type, search_action=SEARCH_STORE),
li(30130, "new_search.png"),
)
for search_id, query in s.get_page(search_type, page):
item = list_item(query, "search.png")
item.addContextMenuItems([
(translate(30131), action(delete_search_entry, search_id)),
(translate(30136), action(do_query, search_type=search_type, search_action=SEARCH_EDIT, query=query)),
])
addDirectoryItem(
plugin.handle,
plugin.url_for(do_query, search_type=search_type, search_action=SEARCH_UPDATE, query=query),
item,
)
handle_page(s.pages_count(search_type), search_history, search_type=search_type, page=page)
endOfDirectory(plugin.handle)
@plugin.route("/search_entry/delete/<search_id>")
def delete_search_entry(search_id):
with SearchHistory() as s:
s.delete_entry_by_id(int(search_id))
container_refresh()
@plugin.route("/clear_search_history")
def clear_search_history():
with SearchHistory() as s:
s.clear_entries()
notification(translate(30132))
@plugin.route("/query/<search_type>")
@query_arg("search_action", required=False)
@query_arg("query", required=False)
def do_query(search_type, query=None, search_action=None):
search_type = int(search_type)
old_query = query
if query is None:
query = Dialog().input(translate(30124) + ": " + translate(30125 + search_type))
elif search_action == SEARCH_EDIT:
query = Dialog().input(translate(30124) + ": " + translate(30125 + search_type), defaultt=old_query)
if query:
if search_action == SEARCH_STORE:
with SearchHistory() as s:
try:
s.add_entry(search_type, query)
except sqlite3.IntegrityError:
# In case the query already exists, just update the timestamp
s.update_entry(search_type, query, query)
elif search_action == SEARCH_UPDATE:
with SearchHistory() as s:
s.update_entry(search_type, query, query)
elif search_action == SEARCH_EDIT:
if old_query is None:
return
with SearchHistory() as s:
try:
s.update_entry(search_type, old_query, query)
except sqlite3.IntegrityError:
# In case the new query already exists, ignore
pass
if search_type == 3:
executebuiltin(media(play_query, query=query))
if search_action in (SEARCH_STORE, SEARCH_UPDATE, SEARCH_EDIT):
container_refresh()
else:
plugin_update(handle_search, search_type=search_type, query=query)
@plugin.route("/search/<search_type>")
@query_arg("page", required=False)
@query_arg("query")
@handle_view
def handle_search(search_type, **kwargs):
search_type = int(search_type)
kwargs.setdefault("include_adult", include_adult_content())
if search_type == 0:
setContent(plugin.handle, MOVIES_TYPE)
data = tmdbsimple.Search().movie(**kwargs)
for movie in progress(*tmdb.get_movies(data)):
add_movie(movie)
elif search_type == 1:
setContent(plugin.handle, SHOWS_TYPE)
data = tmdbsimple.Search().tv(**kwargs)
for show in progress(*tmdb.get_shows(data)):
add_show(show)
elif search_type == 2:
data = tmdbsimple.Search().person(**kwargs)
for person_li, person_id in tmdb.person_list_items(data):
add_person(person_li, person_id)
else:
logging.error("Invalid search type '%s' used", search_type)
raise ValueError("Unknown search type")
handle_page(data, handle_search, search_type=search_type, **kwargs)
succeeded = tmdb.has_results(data)
if not succeeded:
notification(translate(30112))
endOfDirectory(plugin.handle, succeeded)
@plugin.route("/handle_person/<person_id>")
def handle_person(person_id):
for m, is_movie in progress(*tmdb.get_person_media(person_id)):
add_movie(m) if is_movie else add_show(m)
endOfDirectory(plugin.handle)
@plugin.route("/handle_show/<show_id>")
def handle_show(show_id):
setContent(plugin.handle, SHOWS_TYPE)
for season in tmdb.Show(show_id).seasons(get_unaired=show_unaired_episodes()):
add_season(season)
endOfDirectory(plugin.handle)
@plugin.route("/handle_season/<show_id>/<season_number>")
@query_arg("show_title", required=False)
def handle_season(show_id, season_number, show_title=None):
setContent(plugin.handle, EPISODES_TYPE)
for episode in tmdb.Season(show_id, season_number, show_title).episodes(get_unaired=show_unaired_episodes()):
add_episode(episode)
endOfDirectory(plugin.handle)
@plugin.route("/play_trailer/<media_type>/<tmdb_id>")
@plugin.route("/play_trailer/<media_type>/<tmdb_id>/<season_number>")
@plugin.route("/play_trailer/<media_type>/<tmdb_id>/<season_number>/<episode_number>")
def play_trailer(media_type, tmdb_id, season_number=None, episode_number=None, language=None, fallback_language="en"):
if media_type == "movie":
tmdb_obj = tmdbsimple.Movies(tmdb_id)
elif media_type == "show":
tmdb_obj = tmdbsimple.TV(tmdb_id)
elif media_type == "season":
if season_number is None:
logging.error("season_number attribute is required for seasons")
return
tmdb_obj = tmdbsimple.TV_Seasons(tmdb_id, season_number)
elif media_type == "episode":
if season_number is None or episode_number is None:
logging.error("both season_number and episode_number attributes are required for episodes")
return
tmdb_obj = tmdbsimple.TV_Episodes(tmdb_id, season_number, episode_number)
else:
logging.error("Invalid media type '%s' used", media_type)
return
if language is None:
language = get_language()
for result in tmdb_obj.videos(language=language)["results"]:
if result["type"] == "Trailer" and result["site"] == "YouTube":
play_youtube_video(result["key"])
return
if language == fallback_language:
notification(translate(30108))
setResolvedUrl(plugin.handle, False, ListItem())
else:
play_trailer(
media_type, tmdb_id,
season_number=season_number,
episode_number=episode_number,
language=fallback_language,
fallback_language=fallback_language)
@plugin.route("/providers/play_query")
@query_arg("query")
def play_query(query):
play_search(query)
@plugin.route("/set_view")
@query_arg("url")
def set_view(url):
container_update(url + ("&" if "?" in url else "?") + VIEW_PROPERTY + "=" + str(get_current_view_id()))
plugin.add_route(play_movie, "/providers/play_movie/<movie_id>")
plugin.add_route(play_show, "/providers/play_show/<show_id>")
plugin.add_route(play_season, "/providers/play_season/<show_id>/<season_number>")
plugin.add_route(play_episode, "/providers/play_episode/<show_id>/<season_number>/<episode_number>")
def run():
try:
plugin.run()
except Exception as e:
logging.error("Caught exception:", exc_info=True)
notification(str(e))
|
__version__ = "0.0.2"
__banner__ = \
"""
# aiosecretsdump %s
# Author: Tamas Jos @skelsec (info@skelsecprojects.com)
""" % __version__
|
#!/usr/bin/env python
""" Some tests for high-throughput calculations. """
import unittest
import os
import shutil
import glob
import psutil
import numpy as np
from .utils import MatadorUnitTest, REAL_PATH, detect_program
from matador.compute import ComputeTask
from matador.scrapers import cell2dict, param2dict, phonon2dict, magres2dict
HOSTNAME = os.uname()[1]
PATHS_TO_DEL = ["completed", "bad_castep", "input", "logs", HOSTNAME]
VERBOSITY = 2
EXECUTABLE = "castep"
CASTEP_PRESENT = detect_program(EXECUTABLE)
MPI_PRESENT = detect_program("mpirun")
if CASTEP_PRESENT and MPI_PRESENT:
NCORES = max(psutil.cpu_count(logical=False) - 2, 1)
else:
NCORES = 1
@unittest.skipIf(not CASTEP_PRESENT, "CASTEP not found.")
class ElasticWorkflowTest(MatadorUnitTest):
""" Run a elastic workflow calculation. """
def test_bulk_mod(self):
for _f in glob.glob(REAL_PATH + "data/elastic_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("bulk_mod.cell", db=False)
param_dict, _ = param2dict("bulk_mod.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir="/tmp/scratch_test",
workflow_kwargs={"plot": False, "num_volumes": 5},
)
self.assertFalse(os.path.isfile("completed/Si2.bib"))
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertTrue(os.path.isfile("completed/Si2.bulk_mod.results"))
self.assertTrue(os.path.isfile("completed/Si2.bulk_mod.res"))
self.assertTrue(os.path.isfile("completed/Si2.bulk_mod.castep"))
with open("completed/Si2.bulk_mod.results", "r") as f:
flines = f.readlines()
B = []
for line in flines:
if "bulk modulus" in line:
B.append(float(line.split()[3]))
# check all computed bulk mods are between 88-92
self.assertEqual(len(B), 3)
self.assertTrue(all(abs(b - 90) < 2) for b in B)
self.assertFalse(os.path.isfile("completed/Si2.bulk_mod.pdf"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
self.assertTrue(os.path.isfile("completed/Si2.geom"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.bulk_mod.cell"))
self.assertFalse(os.path.exists("/tmp/scratch_test"))
self.assertFalse(os.path.exists("scratch_test_link"))
@unittest.skipIf(not CASTEP_PRESENT, "CASTEP not found.")
class PhononWorkflowTest(MatadorUnitTest):
""" Run a phonon workflow calculation. """
def test_phonon(self):
for _f in glob.glob(REAL_PATH + "data/phonon_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("Si.cell", db=False)
param_dict, _ = param2dict("Si.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir="tmpier_tst",
)
self.assertFalse(os.path.isfile("completed/Si2.bib"))
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertTrue(os.path.isfile("completed/Si2.bands"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.phonon"))
self.assertTrue(os.path.isfile("completed/Si2.phonon_dos"))
phon, s = phonon2dict("completed/Si2.phonon")
a = 2.7355124
np.testing.assert_array_almost_equal(
phon["lattice_cart"], np.array([[0, a, a], [a, 0, a], [a, a, 0]]), decimal=3
)
a = 3.869
np.testing.assert_array_almost_equal(
phon["lattice_abc"], np.array([[a, a, a], [60, 60, 60]]), decimal=3
)
self.assertTrue(s, msg="Failed to read phonon file")
self.assertGreater(np.min(phon["eigenvalues_q"]), -0.05)
self.assertTrue(os.path.isfile("completed/Si2.cell"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
@unittest.skipIf(not CASTEP_PRESENT, "CASTEP not found.")
class MagresWorkflowTest(MatadorUnitTest):
""" Run a magres workflow calculation. """
def test_magres(self):
for _f in glob.glob(REAL_PATH + "data/magres_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("Si.cell", db=False)
param_dict, _ = param2dict("Si.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir="tmpier_tst",
workflow_kwargs={"final_elec_energy_tol": 1e-9},
)
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertTrue(os.path.isfile("completed/Si2.bands"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.magres"))
self.assertTrue(os.path.isfile("completed/Si2.cell_magres"))
self.assertTrue(os.path.isfile("completed/Si2.param_magres"))
self.assertTrue(os.path.isfile("completed/Si2.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.param_scf"))
param, s = param2dict("completed/Si2.param_scf")
self.assertTrue(s, msg="Failed to read param file")
self.assertEqual(param["elec_energy_tol"], 1e-12)
param, s = param2dict("completed/Si2.param_magres")
self.assertEqual(param["elec_energy_tol"], 1e-12)
self.assertTrue(s, msg="Failed to read param file")
magres, s = magres2dict("completed/Si2.magres")
self.assertTrue(s, msg="Failed to read magres file")
a = 3.866895
np.testing.assert_array_almost_equal(
magres["lattice_abc"], np.array([[a, a, a], [60, 60, 60]]), decimal=3
)
np.testing.assert_array_almost_equal(
magres["chemical_shielding_isos"], np.array([129.577, 129.577]), decimal=2
)
self.assertTrue(os.path.isfile("completed/Si2.cell"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
@unittest.skipIf(not CASTEP_PRESENT, "CASTEP not found.")
class SpectralWorkflowTest(MatadorUnitTest):
""" Run a spectral workflow calculation. """
def test_full_spectral_in_compute_dir(self):
for _f in glob.glob(REAL_PATH + "data/spectral_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("Si.cell", db=False)
param_dict, _ = param2dict("Si.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir="tmpier_tst",
)
self.assertFalse(os.path.isfile("completed/Si2.bib"))
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.agr"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.dat"))
self.assertTrue(os.path.isfile("completed/Si2.bands"))
self.assertTrue(os.path.isfile("completed/Si2.bands_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.bands_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.castep_bin"))
self.assertTrue(os.path.isfile("completed/Si2.castep_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.castep_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cell"))
self.assertTrue(os.path.isfile("completed/Si2.cell_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cst_esp"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_dos"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odi"))
self.assertTrue(os.path.isfile("completed/Si2.odi_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.odi_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odi_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odo"))
self.assertTrue(os.path.isfile("completed/Si2.odo_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.odo_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odo_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.param"))
self.assertTrue(os.path.isfile("completed/Si2.param_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.param_dos"))
self.assertTrue(os.path.isfile("completed/Si2.param_scf"))
self.assertTrue(os.path.isfile("completed/Si2.pdis.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_dos"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
def test_dos_only_spectral(self):
for _f in glob.glob(REAL_PATH + "data/spectral_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("Si.cell", db=False)
del cell_dict["spectral_kpoints_path_spacing"]
param_dict, _ = param2dict("Si.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir="tmpier_tst",
)
self.assertFalse(os.path.isfile("completed/Si2.bib"))
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertFalse(os.path.isfile("completed/Si2-out.cell_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.bands_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.castep_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.cell_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.dome_bin_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.param_dispersion"))
self.assertFalse(os.path.isfile("completed/Si2.pdos_bin_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.agr"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.dat"))
self.assertTrue(os.path.isfile("completed/Si2.bands"))
self.assertTrue(os.path.isfile("completed/Si2.bands_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.castep_bin"))
self.assertTrue(os.path.isfile("completed/Si2.castep_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cell"))
self.assertTrue(os.path.isfile("completed/Si2.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cst_esp"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_dos"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odi"))
self.assertTrue(os.path.isfile("completed/Si2.odi_broadening"))
self.assertFalse(os.path.isfile("completed/Si2.odi_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odi_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odo"))
self.assertTrue(os.path.isfile("completed/Si2.odo_broadening"))
self.assertFalse(os.path.isfile("completed/Si2.odo_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odo_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.param"))
self.assertTrue(os.path.isfile("completed/Si2.param_dos"))
self.assertTrue(os.path.isfile("completed/Si2.param_scf"))
self.assertFalse(os.path.isfile("completed/Si2.pdis.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_dos"))
self.assertFalse(os.path.isfile("completed/Si2.pdos_bin_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
def test_full_spectral(self):
for _f in glob.glob(REAL_PATH + "data/spectral_workflow/*"):
shutil.copy(_f, ".")
cell_dict, _ = cell2dict("Si.cell")
param_dict, _ = param2dict("Si.param", db=False)
_ = ComputeTask(
res="Si2.res",
ncores=NCORES,
nnodes=None,
node=None,
cell_dict=cell_dict,
param_dict=param_dict,
verbosity=VERBOSITY,
compute_dir=None,
)
self.assertFalse(os.path.isfile("completed/Si2.bib"))
self.assertTrue(os.path.isfile("completed/Si2.check"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2-out.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.agr"))
self.assertTrue(os.path.isfile("completed/Si2.adaptive.dat"))
self.assertTrue(os.path.isfile("completed/Si2.bands"))
self.assertTrue(os.path.isfile("completed/Si2.bands_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.bands_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep"))
self.assertTrue(os.path.isfile("completed/Si2.castep_bin"))
self.assertTrue(os.path.isfile("completed/Si2.castep_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.castep_dos"))
self.assertTrue(os.path.isfile("completed/Si2.castep_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cell"))
self.assertTrue(os.path.isfile("completed/Si2.cell_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.cell_dos"))
self.assertTrue(os.path.isfile("completed/Si2.cell_scf"))
self.assertTrue(os.path.isfile("completed/Si2.cst_esp"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_dos"))
self.assertTrue(os.path.isfile("completed/Si2.dome_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odi"))
self.assertTrue(os.path.isfile("completed/Si2.odi_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.odi_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odi_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.odo"))
self.assertTrue(os.path.isfile("completed/Si2.odo_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.odo_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.odo_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.param"))
self.assertTrue(os.path.isfile("completed/Si2.param_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.param_dos"))
self.assertTrue(os.path.isfile("completed/Si2.param_scf"))
self.assertTrue(os.path.isfile("completed/Si2.pdis.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos.dat"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_broadening"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_dispersion"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_dos"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_pdis"))
self.assertTrue(os.path.isfile("completed/Si2.pdos_bin_pdos"))
self.assertTrue(os.path.isfile("completed/Si2.res"))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for composite upload tracker file functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from collections import namedtuple
import errno
import json
import random
import six
import gslib
from gslib.exception import CommandException
from gslib.tracker_file import RaiseUnwritableTrackerFileException
from gslib.utils.constants import UTF8
ObjectFromTracker = namedtuple('ObjectFromTracker', 'object_name generation')
class _CompositeUploadTrackerEntry(object):
"""Enum class for composite upload tracker file JSON keys."""
COMPONENTS_LIST = 'components'
COMPONENT_NAME = 'component_name'
COMPONENT_GENERATION = 'component_generation'
ENC_SHA256 = 'encryption_key_sha256'
PREFIX = 'prefix'
def ReadParallelUploadTrackerFile(tracker_file_name, logger):
"""Read the tracker file from the last parallel composite upload attempt.
If it exists, the tracker file is of the format described in
WriteParallelUploadTrackerFile or a legacy format. If the file doesn't exist
or is formatted incorrectly, then the upload will start from the beginning.
This function is not thread-safe and must be protected by a lock if
called within Command.Apply.
Args:
tracker_file_name: The name of the tracker file to read parse.
logger: logging.Logger for outputting log messages.
Returns:
enc_key_sha256: Encryption key SHA256 used to encrypt the existing
components, or None if an encryption key was not used.
component_prefix: String prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
"""
enc_key_sha256 = None
prefix = None
existing_components = []
tracker_file = None
# If we already have a matching tracker file, get the serialization data
# so that we can resume the upload.
try:
tracker_file = open(tracker_file_name, 'r')
tracker_data = tracker_file.read()
tracker_json = json.loads(tracker_data)
enc_key_sha256 = tracker_json[_CompositeUploadTrackerEntry.ENC_SHA256]
prefix = tracker_json[_CompositeUploadTrackerEntry.PREFIX]
for component in tracker_json[_CompositeUploadTrackerEntry.COMPONENTS_LIST]:
existing_components.append(
ObjectFromTracker(
component[_CompositeUploadTrackerEntry.COMPONENT_NAME],
component[_CompositeUploadTrackerEntry.COMPONENT_GENERATION]))
except IOError as e:
# Ignore non-existent file (happens first time a upload is attempted on an
# object, or when re-starting an upload after a
# ResumableUploadStartOverException), but warn user for other errors.
if e.errno != errno.ENOENT:
logger.warn(
'Couldn\'t read upload tracker file (%s): %s. Restarting '
'parallel composite upload from scratch.', tracker_file_name,
e.strerror)
except (KeyError, ValueError) as e:
# Legacy format did not support user-supplied encryption.
enc_key_sha256 = None
(prefix, existing_components) = _ParseLegacyTrackerData(tracker_data)
finally:
if tracker_file:
tracker_file.close()
return (enc_key_sha256, prefix, existing_components)
def _ParseLegacyTrackerData(tracker_data):
"""Parses a legacy parallel composite upload tracker file.
Args:
tracker_data: Legacy tracker file contents.
Returns:
component_prefix: The prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
"""
# Old tracker files used a non-JSON format.
# The first line represents the prefix, followed by line pairs of object_name
# and generation. Discard the last blank line.
old_tracker_data = tracker_data.split('\n')[:-1]
prefix = None
existing_components = []
if old_tracker_data:
prefix = old_tracker_data[0]
i = 1
while i < len(old_tracker_data) - 1:
(name, generation) = (old_tracker_data[i], old_tracker_data[i + 1])
if not generation:
# Cover the '' case.
generation = None
existing_components.append(ObjectFromTracker(name, generation))
i += 2
return (prefix, existing_components)
def ValidateParallelCompositeTrackerData(tracker_file_name, existing_enc_sha256,
existing_prefix, existing_components,
current_enc_key_sha256, bucket_url,
command_obj, logger, delete_func,
delete_exc_handler):
"""Validates that tracker data matches the current encryption key.
If the data does not match, makes a best-effort attempt to delete existing
temporary component objects encrypted with the old key.
Args:
tracker_file_name: String file name of tracker file.
existing_enc_sha256: Encryption key SHA256 used to encrypt the existing
components, or None if an encryption key was not used.
existing_prefix: String prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
current_enc_key_sha256: Current Encryption key SHA256 that should be used
to encrypt objects.
bucket_url: Bucket URL in which the components exist.
command_obj: Command class for calls to Apply.
logger: logging.Logger for outputting log messages.
delete_func: command.Apply-callable function for deleting objects.
delete_exc_handler: Exception handler for delete_func.
Returns:
prefix: existing_prefix, or None if the encryption key did not match.
existing_components: existing_components, or empty list if the encryption
key did not match.
"""
if six.PY3:
if isinstance(existing_enc_sha256, str):
existing_enc_sha256 = existing_enc_sha256.encode(UTF8)
if isinstance(current_enc_key_sha256, str):
current_enc_key_sha256 = current_enc_key_sha256.encode(UTF8)
if existing_prefix and existing_enc_sha256 != current_enc_key_sha256:
try:
logger.warn(
'Upload tracker file (%s) does not match current encryption '
'key. Deleting old components and restarting upload from '
'scratch with a new tracker file that uses the current '
'encryption key.', tracker_file_name)
components_to_delete = []
for component in existing_components:
url = bucket_url.Clone()
url.object_name = component.object_name
url.generation = component.generation
command_obj.Apply(
delete_func,
components_to_delete,
delete_exc_handler,
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=command_obj.ParallelOverrideReason.SPEED)
except: # pylint: disable=bare-except
# Regardless of why we can't clean up old components, need to proceed
# with the user's original intent to upload the file, so merely warn.
component_names = [
component.object_name for component in existing_components
]
logger.warn(
'Failed to delete some of the following temporary objects:\n%s\n'
'(Continuing on to re-upload components from scratch.)',
'\n'.join(component_names))
# Encryption keys have changed, so the old components and prefix
# cannot be used.
return (None, [])
return (existing_prefix, existing_components)
def GenerateComponentObjectPrefix(encryption_key_sha256=None):
"""Generates a random prefix for component objects.
Args:
encryption_key_sha256: Encryption key SHA256 that will be used to encrypt
the components. This is hashed into the prefix to avoid collision
during resumption with a different encryption key.
Returns:
String prefix for use in the composite upload.
"""
return str(
(random.randint(1, (10**10) - 1) + hash(encryption_key_sha256)) % 10**10)
def WriteComponentToParallelUploadTrackerFile(tracker_file_name,
tracker_file_lock,
component,
logger,
encryption_key_sha256=None):
"""Rewrites an existing tracker file with info about the uploaded component.
Follows the format described in _CreateParallelUploadTrackerFile.
Args:
tracker_file_name: Tracker file to append to.
tracker_file_lock: Thread and process-safe Lock protecting the tracker file.
component: ObjectFromTracker describing the object that was uploaded.
logger: logging.Logger for outputting log messages.
encryption_key_sha256: Encryption key SHA256 for use in this upload, if any.
"""
with tracker_file_lock:
(existing_enc_key_sha256, prefix,
existing_components) = (ReadParallelUploadTrackerFile(
tracker_file_name, logger))
if existing_enc_key_sha256 != encryption_key_sha256:
raise CommandException(
'gsutil client error: encryption key SHA256 (%s) in tracker file '
'does not match encryption key SHA256 (%s) of component %s' %
(existing_enc_key_sha256, encryption_key_sha256,
component.object_name))
newly_completed_components = [component]
completed_components = existing_components + newly_completed_components
WriteParallelUploadTrackerFile(tracker_file_name,
prefix,
completed_components,
encryption_key_sha256=encryption_key_sha256)
def WriteParallelUploadTrackerFile(tracker_file_name,
prefix,
components,
encryption_key_sha256=None):
"""Writes information about components that were successfully uploaded.
The tracker file is serialized JSON of the form:
{
"encryption_key_sha256": sha256 hash of encryption key (or null),
"prefix": Prefix used for the component objects,
"components": [
{
"component_name": Component object name,
"component_generation": Component object generation (or null),
}, ...
]
}
where N is the number of components that have been successfully uploaded.
This function is not thread-safe and must be protected by a lock if
called within Command.Apply.
Args:
tracker_file_name: The name of the parallel upload tracker file.
prefix: The generated prefix that used for uploading any existing
components.
components: A list of ObjectFromTracker objects that were uploaded.
encryption_key_sha256: Encryption key SHA256 for use in this upload, if any.
"""
if six.PY3:
if isinstance(encryption_key_sha256, bytes):
encryption_key_sha256 = encryption_key_sha256.decode('ascii')
tracker_components = []
for component in components:
tracker_components.append({
_CompositeUploadTrackerEntry.COMPONENT_NAME: component.object_name,
_CompositeUploadTrackerEntry.COMPONENT_GENERATION: component.generation
})
tracker_file_data = {
_CompositeUploadTrackerEntry.COMPONENTS_LIST: tracker_components,
_CompositeUploadTrackerEntry.ENC_SHA256: encryption_key_sha256,
_CompositeUploadTrackerEntry.PREFIX: prefix
}
try:
open(tracker_file_name, 'w').close() # Clear the file.
with open(tracker_file_name, 'w') as fp:
fp.write(json.dumps(tracker_file_data))
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
|
# user/serializers.py
from rest_framework import serializers
from dj_rest_auth.serializers import UserDetailsSerializer
from user.models import UserProfile
class ProjectField(serializers.RelatedField):
"""Custom RelatedField Serializer for representation of a project"""
def to_representation(self, value):
fields = {
'project_name': value.project_name,
'id': value.id,
'image_url': value.image_url,
}
return fields
class ReviewField(serializers.RelatedField):
"""Custom RelatedField Serializer for representation of a review"""
def to_representation(self, value):
fields = {
'review_name': value.review_name,
'id': value.id,
'project': {
'project_name': value.project.project_name,
'id': value.project.id,
}
}
return fields
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('company_name', 'image_url',
'is_admin', 'created_at', 'updated_at')
extra_kwargs = {
'is_admin': {'read_only': True},
'created_at': {'read_only': True},
'updated_at': {'read_only': True},
}
class UserSerializer(UserDetailsSerializer):
profile = UserProfileSerializer(source='userprofile')
projects = ProjectField(many=True, read_only=True)
reviews_created = ReviewField(many=True, read_only=True)
reviews = ReviewField(many=True, read_only=True)
id = serializers.IntegerField(source='pk', read_only=True)
class Meta(UserDetailsSerializer.Meta):
# Remove pk from the field
fields_to_list = list(UserDetailsSerializer.Meta.fields)
fields_to_list.remove('pk')
list_to_fields = tuple(fields_to_list)
fields = ('id',) + list_to_fields + \
('profile', 'projects', 'reviews_created', 'reviews')
extra_kwargs = {'password': {'write_only': True}}
def update(self, instance, validated_data):
userprofile_serializer = self.fields['profile']
userprofile_instance = instance.userprofile
userprofile_data = validated_data.pop('userprofile', {})
# to access the 'company_name' field in here
# company_name = userprofile_data.get('company_name')
# update the userprofile fields
userprofile_serializer.update(
userprofile_instance, userprofile_data)
instance = super().update(instance, validated_data)
return instance
class UserViewSerializer(UserDetailsSerializer):
"""This serializer used for retreving list of users and user detail only"""
profile = UserProfileSerializer(source='userprofile')
id = serializers.IntegerField(source='pk', read_only=True)
class Meta(UserDetailsSerializer.Meta):
# Remove pk from the field
fields_to_list = list(UserDetailsSerializer.Meta.fields)
fields_to_list.remove('pk')
list_to_fields = tuple(fields_to_list)
fields = ('id',) + list_to_fields + ('profile',)
|
"""
Module containing the `~halotools.empirical_models.LogNormalScatterModel` class
used to model stochasticity in the mapping between stellar mass and halo properties.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from .. import model_defaults
from .. import model_helpers as model_helpers
from ...utils.array_utils import custom_len
__all__ = ('LogNormalScatterModel', )
__author__ = ('Andrew Hearin', )
class LogNormalScatterModel(object):
""" Simple model used to generate log-normal scatter
in a stellar-to-halo-mass type relation.
"""
def __init__(self,
prim_haloprop_key=model_defaults.default_smhm_haloprop,
**kwargs):
"""
Parameters
----------
prim_haloprop_key : string, optional
String giving the column name of the primary halo property governing
the level of scatter.
Default is set in the `~halotools.empirical_models.model_defaults` module.
scatter_abscissa : array_like, optional
Array of values giving the abscissa at which
the level of scatter will be specified by the input ordinates.
Default behavior will result in constant scatter at a level set in the
`~halotools.empirical_models.model_defaults` module.
scatter_ordinates : array_like, optional
Array of values defining the level of scatter at the input abscissa.
Default behavior will result in constant scatter at a level set in the
`~halotools.empirical_models.model_defaults` module.
Examples
---------
>>> scatter_model = LogNormalScatterModel()
>>> scatter_model = LogNormalScatterModel(prim_haloprop_key='halo_mvir')
To implement variable scatter, we need to define the level
of log-normal scatter at a set of control values
of the primary halo property. Here we give an example of a model
in which the scatter is 0.3 dex for Milky Way table and 0.1 dex in cluster table:
>>> scatter_abscissa = [12, 15]
>>> scatter_ordinates = [0.3, 0.1]
>>> scatter_model = LogNormalScatterModel(scatter_abscissa=scatter_abscissa, scatter_ordinates=scatter_ordinates)
"""
default_scatter = model_defaults.default_smhm_scatter
self.prim_haloprop_key = prim_haloprop_key
if ('scatter_abscissa' in list(kwargs.keys())) and ('scatter_ordinates' in list(kwargs.keys())):
self.abscissa = np.atleast_1d(kwargs['scatter_abscissa'])
self.ordinates = np.atleast_1d(kwargs['scatter_ordinates'])
else:
self.abscissa = [12]
self.ordinates = [default_scatter]
self._initialize_param_dict()
self._update_interpol()
def mean_scatter(self, **kwargs):
""" Return the amount of log-normal scatter that should be added
to the galaxy property as a function of the input table.
Parameters
----------
prim_haloprop : array, optional
Array of mass-like variable upon which occupation statistics are based.
If ``prim_haloprop`` is not passed, then ``table`` keyword argument must be passed.
table : object, optional
Data table storing halo catalog.
If ``table`` is not passed, then ``prim_haloprop`` keyword argument must be passed.
Returns
-------
scatter : array_like
Array containing the amount of log-normal scatter evaluated
at the input table.
"""
# Retrieve the array storing the mass-like variable
if 'table' in list(kwargs.keys()):
mass = kwargs['table'][self.prim_haloprop_key]
elif 'prim_haloprop' in list(kwargs.keys()):
mass = kwargs['prim_haloprop']
else:
raise KeyError("Must pass one of the following keyword arguments to mean_scatter:\n"
"``table`` or ``prim_haloprop``")
self._update_interpol()
return self.spline_function(np.log10(mass))
def scatter_realization(self, seed=None, **kwargs):
""" Return the amount of log-normal scatter that should be added
to the galaxy property as a function of the input table.
Parameters
----------
prim_haloprop : array, optional
Array of mass-like variable upon which occupation statistics are based.
If ``prim_haloprop`` is not passed, then ``table`` keyword argument must be passed.
table : object, optional
Data table storing halo catalog.
If ``table`` is not passed, then ``prim_haloprop`` keyword argument must be passed.
seed : int, optional
Random number seed. Default is None.
Returns
-------
scatter : array_like
Array containing a random variable realization that should be summed
with the galaxy property to add scatter.
"""
scatter_scale = self.mean_scatter(**kwargs)
# initialize result with zero scatter result
result = np.zeros(len(scatter_scale))
# only draw from a normal distribution for non-zero values of scatter
mask = (scatter_scale > 0.0)
with NumpyRNGContext(seed):
result[mask] = np.random.normal(loc=0, scale=scatter_scale[mask])
return result
def _update_interpol(self):
""" Private method that updates the interpolating functon used to
define the level of scatter as a function of the input table.
If this method is not called after updating ``self.param_dict``,
changes in ``self.param_dict`` will not alter the model behavior.
"""
scipy_maxdegree = 5
degree_list = [scipy_maxdegree, custom_len(self.abscissa)-1]
self.spline_degree = np.min(degree_list)
self.ordinates = [self.param_dict[self._get_param_key(i)] for i in range(len(self.abscissa))]
self.spline_function = model_helpers.custom_spline(
self.abscissa, self.ordinates, k=self.spline_degree)
def _initialize_param_dict(self):
""" Private method used to initialize ``self.param_dict``.
"""
self.param_dict = {}
for ipar, val in enumerate(self.ordinates):
key = self._get_param_key(ipar)
self.param_dict[key] = val
def _get_param_key(self, ipar):
""" Private method used to retrieve the key of self.param_dict
that corresponds to the appropriately selected i^th ordinate
defining the behavior of the scatter model.
"""
return 'scatter_model_param'+str(ipar+1)
|
import queue
L=queue.LifoQueue(maxsize=5)
L.put(5)
L.put(6)
L.put(1)
print(L.get())
print(L.get())
print(L.get())
if L.full():
print("full")
if L.empty():
print("empty")
|
def solution(n, arr1, arr2):
pattern = [' ', '#']
result = []
for i in range(n) :
decoded = ''
union = arr1[i] | arr2[i]
for _ in range(n):
decoded = pattern[union % 2] + decoded
union //= 2
result.append(decoded)
return result
if __name__ == '__main__':
result = solution(
6,
[46, 33, 33 ,22, 31, 50],
[27 ,56, 19, 14, 14, 10]
)
print(result)
|
from django.apps import AppConfig
class RankestimateConfig(AppConfig):
name = 'rankestimate'
|
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from utils.misc.msg_dict import texts
from keyboards.inline.startKeyboard import menuStart
from loader import dp, db, bot
import asyncpg
from aiogram.dispatcher import FSMContext
@dp.message_handler(CommandStart())
async def bot_start(message: types.Message):
user = await db.select_user(telegram_id=message.from_user.id)
try:
if user[3] == message.from_user.id:
if user[4] == "uzbek":
await message.answer(texts["uz_start"])
else:
await message.answer(texts["en_start"])
except:
await message.answer(texts["choose_lang"], reply_markup=menuStart)
@dp.callback_query_handler(text="uzbek")
async def Salom(call: types.CallbackQuery):
try:
user = await db.add_user(telegram_id=call.from_user.id,
full_name=call.from_user.full_name,
username=call.from_user.username,
lang="uzbek",
sourc="auto",
target="uz")
except asyncpg.exceptions.UniqueViolationError:
user = await db.select_user(telegram_id=call.from_user.id)
await call.message.delete()
await call.message.answer(texts["uz_start"])
@dp.callback_query_handler(text="english")
async def Hello(call: types.CallbackQuery):
try:
user = await db.add_user(telegram_id=call.from_user.id,
full_name=call.from_user.full_name,
username=call.from_user.username,
lang="english",
sourc="auto",
target="en")
except asyncpg.exceptions.UniqueViolationError:
user = await db.select_user(telegram_id=call.from_user.id)
await call.message.delete()
await call.message.answer(texts["en_start"])
@dp.message_handler(text="/lang")
async def change_lang(msg: types.Message, state: FSMContext):
await msg.answer(texts["choose_lang"], reply_markup=menuStart)
await state.set_state("lang")
@dp.callback_query_handler(state='lang')
async def choose_lang(call: types.CallbackQuery, state: FSMContext):
lang = call.data
await db.update_user_lang(lang=lang, telegram_id=call.from_user.id)
await call.message.delete()
if lang == "uzbek":
await call.message.answer(texts["set_uz"])
elif lang == "english":
await call.message.answer(texts["set_en"])
await state.finish()
@dp.message_handler(state="lang")
async def post_unknown(message: types.Message):
user = await db.select_user(telegram_id=message.from_user.id)
if user[4] == "english":
await message.answer("<b>Please choose your language.</>")
else:
await message.answer("<b>Iltimos tilni tanlang.</>")
|
import torch
import torch.nn as nn
from ilm import ilm_IN
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.relu6 = nn.ReLU()
self.relu7 = nn.ReLU()
self.relu8 = nn.ReLU()
self.relu9 = nn.ReLU()
self.pool_stride_2 = nn.MaxPool3d(kernel_size=(2,2,2), stride=2) # (size, stride)
self.droput_3d_05 = nn.Dropout3d(p=0.5)
self.droput_1d_02 = nn.Dropout(p=0.25)
self.conv1 = nn.Conv3d(1, 5, 5, padding=(2, 2, 2)) #(input channels, ouput channels (no. of filters), kernel size)
self.ilm_in1 = ilm_IN(channels=5)
self.conv2 = nn.Conv3d(5, 10, 5, padding=(2, 2, 2))
#ReLU
#pooling
self.ilm_in2 = ilm_IN(channels=10)
self.conv3 = nn.Conv3d(10, 15, 3, padding=(1, 1, 1))
self.ilm_in3 = ilm_IN(channels=15)
self.conv4 = nn.Conv3d(15, 20, 3, padding=(1, 1, 1))
#ReLU
#pooling
self.ilm_in4 = ilm_IN(channels=20)
self.conv5 = nn.Conv3d(20, 40, 3, padding=(1, 1, 1))
#ReLU
#pooling
self.ilm_in5 = ilm_IN(channels=40)
self.conv6 = nn.Conv3d(40, 80, 3, padding=(1, 1, 1))
#ReLU
#pooling
self.ilm_in6 = ilm_IN(channels=80)
self.conv7 = nn.Conv3d(80, 160, 3, padding=(1, 1, 1))
#ReLU
#pooling
self.ilm_in7 = ilm_IN(channels=160)
self.conv8 = nn.Conv3d(160, 320, 3, padding=(1, 1, 1))
#ReLU
#pooling
self.ilm_in8 = ilm_IN(channels=320)
#flatten
#Dropout3d
self.fc1 = nn.Linear(320*2*2*1, 65)
self.ln9 = nn.LayerNorm(65) ## Layer Norm has the same functionality as Instance Norm when it comes to 1d data. InstanceNorm1d function is not suitable for flattened 1d data
## LN is thus used in the fully connected layers.
self.fc2 = nn.Linear(65, 40)
self.ln10 = nn.LayerNorm(40)
self.fc3 = nn.Linear(40, 20)
self.ln11 = nn.LayerNorm(20)
#Dropout
self.fc4 = nn.Linear(20, 3)
def forward(self, x):
x = self.conv1(x)
x = self.ilm_in1(x)
x = self.pool_stride_2(self.relu1(self.conv2(x)))
x = self.ilm_in2(x)
x = self.conv3(x)
x = self.ilm_in3(x)
x = self.pool_stride_2(self.relu2(self.conv4(x)))
x = self.ilm_in4(x)
x = self.pool_stride_2(self.relu3(self.conv5(x)))
x = self.ilm_in5(x)
x = self.pool_stride_2(self.relu4(self.conv6(x)))
x = self.ilm_in6(x)
x = self.pool_stride_2(self.relu5(self.conv7(x)))
x = self.ilm_in7(x)
x = self.pool_stride_2(self.relu6(self.conv8(x)))
x = self.ilm_in8(x)
x = self.droput_3d_05(x)
x = x.view(-1, 1280)
x = self.relu7(self.fc1(x))
x = self.ln9(x)
x = self.relu8(self.fc2(x))
x = self.ln10(x)
x = self.relu9(self.fc3(x))
x = self.ln11(x)
x = self.droput_1d_02(x)
x = self.fc4(x)
return x
|
#!/usr/bin/python
import socket
import cv2
import numpy
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
#TCP_IP = 'localhost'
TCP_IP = '192.168.0.106'
TCP_PORT = 5001
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(True)
print('A la escucha ...')
conn, addr = s.accept()
print('Conectado desde ' + str (addr))
while 1:
length = recvall(conn,16)
stringData = recvall(conn, int(length))
data = numpy.fromstring(stringData, dtype='uint8')
decimg=cv2.imdecode(data,1)
cv2.imshow('SERVER',decimg)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
except:
raise
finally:
cv2.destroyAllWindows()
s.close()
|
"""Safe in Cloud dabase decryption"""
import struct
import io
import zlib
from Crypto.Cipher import AES
from passlib.utils import pbkdf2
def read_sic_db(filename, password):
"""Read the database and return the XML content."""
with open(filename, 'rb') as db_file:
db_file.seek(3) # skip magic + version
# Get salt and iv
salt = __get_array(db_file)
init_vector = __get_array(db_file)
__skip_array(db_file) # skip salt 2
# decrypt iv and password block
skey = pbkdf2.pbkdf2(password, salt, 10000, 32)
cipher = AES.new(skey, AES.MODE_CBC, init_vector)
iv_pw_block = cipher.decrypt(__get_array(db_file))
# extract iv and password from block
byte_buffer = io.BytesIO(iv_pw_block)
iv2 = __get_array(byte_buffer)
pass2 = __get_array(byte_buffer)
__skip_array(byte_buffer) # skip check
# decrypt data
cipher = AES.new(pass2, AES.MODE_CBC, iv2)
data = cipher.decrypt(db_file.read())
# decompress data
decompressor = zlib.decompressobj()
return decompressor.decompress(data) + decompressor.flush()
def __get_byte(file):
return ord(file.read(1))
def __get_short(file):
return int.from_bytes(file.read(2), byteorder='big')
def __get_array(file):
size = ord(file.read(1))
return struct.unpack("%ds" % size, file.read(size))[0]
def __skip_array(file):
file.seek(ord(file.read(1)), 1)
|
'''
This is a converter for depth profiles created using RUMP the genplot based Rutherford Backscattering Spectrometry Analysis tool.
The discrete layers are treated as steps with a width defined by dx.
This program doubles the number of lines of code for a depth profile created using the WRITEPRO command in the SIM Enviroment
and allows convienient plotting with the Graphics Layout Engine GLE or any other plotting tool that will look at a comma separated table.
'''
from os import listdir
print "This is the depth profile converter for XRUMP.\n It was created by Robert Balsano in February 2015. \n It is designed to be used with xrump and the Graphics Layout Engine GLE."
filepath1= raw_input("Please enter the full location of the RUMP profile you would like to convert")
filepath3= raw_input("Please enter the full location of the CSV file you would like to create")
file1 = open(filepath1,"r")
file3 = open(filepath3,"w")
linesave = file1.read()
theory = []
line = ""
for char in linesave:
if char !="\n":
line += char
if char == "\n":
theory.append(line)
line = ""
file1.close
#file1.close()
file3.write("!"+ theory[0]+ "\n")
for i in range(1, len(theory)-1):
#file3.write(theory[i]+ "\n")
spacecounter = 0
collum = []
element = ""
for char in theory[i]:
if spacecounter == 0 and (char == " " or char == "\t"):
collum.append(element)
element = ""
#file3.write(", ")
if char != " " and char!= "\t":
spacecounter = 0
#capture the cell
element += char
else:
spacecounter += 1
collum.append(element)
element = ""
for elt in collum:
file3.write(elt + ", ")
file3.write("\n")
for i in range(0, len(collum)):
if i < 1 or i>1:
file3.write(collum[i] + ", ")
if i == 1:
file3.write(str(float(collum[1])+float(collum[3])) + ", ")
file3.write("\n")
#print len(theory)
file3.close()
input("conversion has completed press enter to exit")
|
from pika_queue import PikaQueue
from settings import RABBIT_ACCOUNT, RABBIT_PASSWORD, RABBIT_HOST, RABBIT_PORT, ETL_QUEUE, PREDICT_ERROR_QUEUE
def publish_message(result_tid):
publish_queue = PikaQueue(RABBIT_HOST, RABBIT_PORT, RABBIT_ACCOUNT, RABBIT_PASSWORD)
publish_queue.AddToQueue(ETL_QUEUE, result_tid)
publish_queue.Close()
def publish_error(error_url):
publish_queue = PikaQueue(RABBIT_HOST, RABBIT_PORT, RABBIT_ACCOUNT, RABBIT_PASSWORD)
publish_queue.AddToQueue(PREDICT_ERROR_QUEUE, error_url)
publish_queue.Close()
|
import concurrent.futures
import platform
import tkinter as tk
import traceback
from io import BytesIO
from tkinter import ttk, messagebox
from typing import Tuple, List, Optional, Callable, Union
from urllib.request import urlopen
from thonny import tktextext, get_workbench
from thonny.ui_utils import scrollbar_style, lookup_style_option
from .htmltext import FormData, HtmlText, HtmlRenderer
EDITOR_CONTENT_NAME = "$EDITOR_CONTENT"
_images_by_urls = {}
class ExercisesView(ttk.Frame):
def __init__(self, master, exercise_provider_class):
self._destroyed = False
self._poll_scheduler = None
super().__init__(master, borderwidth=0, relief="flat")
self._provider = exercise_provider_class(self)
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=self._provider.get_max_threads())
self._page_future = None # type: Optional[concurrent.futures.Future]
self._image_futures = {}
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
self.vert_scrollbar.grid(row=0, column=1, sticky=tk.NSEW, rowspan=2)
self.hor_scrollbar = ttk.Scrollbar(
self, orient=tk.HORIZONTAL, style=scrollbar_style("Horizontal")
)
self.hor_scrollbar.grid(row=2, column=0, sticky=tk.NSEW)
tktextext.fixwordbreaks(tk._default_root)
self.init_header(row=0, column=0)
spacer = ttk.Frame(self, height=1)
spacer.grid(row=1, sticky="nsew")
self._html_widget = HtmlText(
master=self,
renderer_class=ExerciseHtmlRenderer,
link_and_form_handler=self._on_request_new_page,
image_requester=self._on_request_image,
read_only=True,
wrap="word",
font="TkDefaultFont",
padx=0,
pady=0,
insertwidth=0,
borderwidth=0,
highlightthickness=0,
yscrollcommand=self.vert_scrollbar.set,
xscrollcommand=self.hor_scrollbar.set,
)
self._html_widget.grid(row=1, column=0, sticky="nsew")
self.vert_scrollbar["command"] = self._html_widget.yview
self.hor_scrollbar["command"] = self._html_widget.xview
self._poll_scheduler = None
# TODO: go to last page from previous session?
self.go_to("/")
self._poll_provider_responses()
def _poll_provider_responses(self):
if self._destroyed:
return
if self._page_future is not None and self._page_future.done():
# Cancelled futures won't make it here
assert not self._page_future.cancelled()
exc = self._page_future.exception()
if exc is not None:
self._set_page_html("<pre>%s</pre>" %
"".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
)
else:
html, breadcrumbs = self._page_future.result()
self._set_page_html(html)
self.breadcrumbs_bar.set_links(breadcrumbs)
self._page_future = None
remaining_img_futures = {}
for url, fut in self._image_futures.items():
if fut.done():
try:
data = fut.result()
except:
traceback.print_exc()
else:
self._update_image(url, fut.result())
else:
remaining_img_futures[url] = fut
self._image_futures = remaining_img_futures
self._poll_scheduler = self.after(200, self._poll_provider_responses)
def init_header(self, row, column):
header_frame = ttk.Frame(self, style="ViewToolbar.TFrame")
header_frame.grid(row=row, column=column, sticky="nsew")
header_frame.columnconfigure(0, weight=1)
self.breadcrumbs_bar = BreadcrumbsBar(header_frame, self._on_request_new_page)
self.breadcrumbs_bar.grid(row=0, column=0, sticky="nsew")
# self.menu_button = ttk.Button(header_frame, text="≡ ", style="ViewToolbar.Toolbutton")
self.menu_button = ttk.Button(
header_frame, text=" ≡ ", style="ViewToolbar.Toolbutton", command=self.post_button_menu
)
self._button_menu = tk.Menu(header_frame, tearoff=False)
# self.menu_button.grid(row=0, column=1, sticky="ne")
self.menu_button.place(anchor="ne", rely=0, relx=1)
def _on_request_new_page(self, target, form_data=None):
if target.startswith("/"):
self.go_to(target, form_data=form_data)
else:
get_workbench().open_url(target)
def _on_request_image(self, url):
assert url is not None
if url not in self._image_futures:
self._image_futures[url] = self._executor.submit(self._provider.get_image, url)
def post_button_menu(self):
self._button_menu.delete(0, "end")
items = self._provider.get_menu_items()
if not items:
return
for label, handler in items:
if label == "-":
self._button_menu.add_separator()
else:
if isinstance(handler, str):
def command(url=handler):
self.go_to(url)
else:
command = handler
self._button_menu.add_command(label=label, command=command)
self._button_menu.tk_popup(
self.menu_button.winfo_rootx(),
self.menu_button.winfo_rooty() + self.menu_button.winfo_height(),
)
def go_to(self, url, form_data=None):
if form_data is None:
form_data = FormData()
assert url.startswith("/")
if self._page_future is not None:
self._page_future.cancel()
self._page_future = self._executor.submit(
self._provider.get_html_and_breadcrumbs, url, form_data)
self._set_page_html("<p>Palun oota...</p>")
def _set_page_html(self, html):
self._html_widget.set_html_content(html)
def _make_tk_image(self, data):
try:
from PIL import Image
from PIL.ImageTk import PhotoImage
with BytesIO(data) as fp:
fp.seek(0)
pil_img = Image.open(fp)
# Resize while keeping the aspect ratio
basewidth = 250
wpercent = (basewidth / float(pil_img.size[0]))
hsize = int((float(pil_img.size[1]) * float(wpercent)))
return PhotoImage(pil_img.resize((basewidth, hsize), Image.ANTIALIAS))
except ImportError:
return tk.PhotoImage(data=data)
def _update_image(self, url, data):
try:
tk_img = self._make_tk_image(data)
except:
traceback.print_exc()
return
_images_by_urls[url] = tk_img
self._html_widget.update_image(url, tk_img)
def destroy(self):
if self._poll_scheduler is not None:
try:
self.after_cancel(self._poll_scheduler)
self._poll_scheduler = None
except:
pass
super(ExercisesView, self).destroy()
self._destroyed = True
class BreadcrumbsBar(tktextext.TweakableText):
def __init__(self, master, click_handler):
super(BreadcrumbsBar, self).__init__(
master,
borderwidth=0,
relief="flat",
height=1,
font="TkDefaultFont",
wrap="word",
padx=6,
pady=5,
insertwidth=0,
highlightthickness=0,
background=lookup_style_option("ViewToolbar.TFrame", "background"),
read_only=True,
)
self._changing = False
self.bind("<Configure>", self.update_height, True)
self.tag_configure("_link", foreground=lookup_style_option("Url.TLabel", "foreground"))
self.tag_configure("_underline", underline=True)
self.tag_bind("_link", "<1>", self._link_click)
self.tag_bind("_link", "<Enter>", self._link_enter)
self.tag_bind("_link", "<Leave>", self._link_leave)
self.tag_bind("_link", "<Motion>", self._link_motion)
self._click_handler = click_handler
def set_links(self, links):
# NBSP and other space-like weird chars don't work properly in Mac
spacer = " " if platform.system() == "Darwin" else "\u00a0"
try:
self._changing = True
self.direct_delete("1.0", "end")
if not links:
return
# remove trailing newline
links = links[:]
links[-1] = (links[-1][0], links[-1][1].rstrip("\r\n"))
for key, label in links:
self.direct_insert("end", "/" + spacer)
if not label.endswith("\n"):
label += " "
self.direct_insert("end", label, ("_link", key))
finally:
self._changing = False
self.update_height()
def update_height(self, event=None):
if self._changing:
return
height = self.tk.call((self, "count", "-update", "-displaylines", "1.0", "end"))
self.configure(height=height)
def _link_click(self, event):
mouse_index = self.index("@%d,%d" % (event.x, event.y))
user_tags = [
tag for tag in self.tag_names(mouse_index) if tag not in ["_link", "_underline"]
]
if len(user_tags) == 1:
self._click_handler(user_tags[0])
def _get_link_range(self, event):
mouse_index = self.index("@%d,%d" % (event.x, event.y))
return self.tag_prevrange("_link", mouse_index + "+1c")
def _link_motion(self, event):
self.tag_remove("_underline", "1.0", "end")
dir_range = self._get_link_range(event)
if dir_range:
range_start, range_end = dir_range
self.tag_add("_underline", range_start, range_end)
def _link_enter(self, event):
self.config(cursor="hand2")
def _link_leave(self, event):
self.config(cursor="")
self.tag_remove("_underline", "1.0", "end")
class ExerciseHtmlRenderer(HtmlRenderer):
def _expand_field_value(self, value_holder, attrs):
if attrs["type"] == "hidden" and attrs["name"] == EDITOR_CONTENT_NAME:
value = get_workbench().get_editor_notebook().get_current_editor_content()
if value is None:
messagebox.showerror("Ei saa esitada", "Puudub aktiivne redaktor. Ei ole midagi esitada.", master=self)
return False
else:
return value
else:
return super(ExerciseHtmlRenderer, self)._expand_field_value(value_holder, attrs)
def _get_image(self, name):
# Previously seen images can be given synchronously
if name in _images_by_urls:
return _images_by_urls[name]
if self._image_requester is not None:
# others should be requested asynchronously
self._image_requester(name)
return None
class ExerciseProvider:
def get_html_and_breadcrumbs(self, url: str, form_data: FormData) -> Tuple[str, List[Tuple[str, str]]]:
raise NotImplementedError()
def get_image(self, url) -> bytes:
return urlopen(url).read()
def get_max_threads(self) -> int:
return 10
def get_menu_items(self) -> List[Tuple[str, Union[str, Callable, None]]]:
"""
This will be called each time the user clicks on the menu button.
First item in each pair is Text of the menu item ("-" if you want to create a separator)
Second item:
str is interpreted as a provider url, fetched in a thread (just like clicking on a link)
None means the item is not available at this moment
a callable is executed in UI thread (without arguments)
"""
return []
|
import numpy as np
import scipy.sparse as spsp
import seaborn as sns
import scedar.eda as eda
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import pytest
class TestSparseSampleFeatureMatrix(object):
"""docstring for TestSparseSampleFeatureMatrix"""
sfm5x10_arr = spsp.csr_matrix(np.random.ranf(50).reshape(5, 10))
sfm3x3_arr = spsp.csr_matrix(np.random.ranf(9).reshape(3, 3))
sfm5x10_lst = spsp.csr_matrix(
list(map(list, np.random.ranf(50).reshape(5, 10))))
plt_arr = spsp.csr_matrix(np.arange(60).reshape(6, 10))
plt_sdm = eda.SampleFeatureMatrix(plt_arr,
sids=list("abcdef"),
fids=list(map(lambda i: 'f{}'.format(i),
range(10))))
# array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
# [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
# [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
ref_plt_f_sum = np.arange(0, 501, 100) + np.arange(10).sum()
ref_plt_s_sum = np.arange(0, 55, 6) + np.arange(0, 51, 10).sum()
ref_plt_f_mean = ref_plt_f_sum / 10
ref_plt_s_mean = ref_plt_s_sum / 6
ref_plt_f_cv = np.arange(10).std(ddof=1) / ref_plt_f_mean
ref_plt_s_cv = np.arange(0, 51, 10).std(ddof=1) / ref_plt_s_mean
ref_plt_f_gc = np.apply_along_axis(eda.stats.gc1d, 1, plt_arr.toarray())
ref_plt_s_gc = np.apply_along_axis(eda.stats.gc1d, 0, plt_arr.toarray())
ref_plt_f_a15 = np.array([0, 5, 10, 10, 10, 10])
ref_plt_s_a35 = np.array([2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
def test_init_x_none(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(None)
def test_init_x_bad_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix([[0, 1], ['a', 2]])
def test_init_x_1d(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix([1, 2, 3])
def test_init_dup_sfids(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, [0, 0, 1, 2, 3])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, ['0', '0', '1', '2', '3'])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, None, [0, 0, 1, 2, 3])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, None,
['0', '0', '1', '2', '3'])
def test_init_empty_x_sfids(self):
sfm1 = eda.SampleFeatureMatrix(np.array([[], []]), None, [])
assert sfm1._x.shape == (2, 0)
assert sfm1._sids.shape == (2,)
assert sfm1._fids.shape == (0,)
np.testing.assert_equal(sfm1.s_sum(), [])
np.testing.assert_equal(sfm1.f_sum(), [0, 0])
np.testing.assert_equal(sfm1.s_cv(), [])
np.testing.assert_equal(np.isnan(sfm1.f_cv()), [True, True])
sfm2 = eda.SampleFeatureMatrix(np.empty((0, 0)))
assert sfm2._x.shape == (0, 0)
assert sfm2._sids.shape == (0,)
assert sfm2._fids.shape == (0,)
np.testing.assert_equal(sfm2.s_sum(), [])
np.testing.assert_equal(sfm2.f_sum(), [])
np.testing.assert_equal(sfm2.s_cv(), [])
np.testing.assert_equal(sfm2.f_cv(), [])
def test_init_wrong_sid_len(self):
# wrong sid size
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(10)), list(range(5)))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, list(range(10)))
def test_init_wrong_fid_len(self):
# wrong fid size
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(5)), list(range(2)))
def test_init_wrong_sfid_len(self):
# wrong sid and fid sizes
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(10)), list(range(10)))
def test_init_non1d_sfids(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([[0], [1], [2]]),
np.array([[0], [1], [1]]))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([[0], [1], [2]]),
np.array([0, 1, 2]))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([0, 1, 2]),
np.array([[0], [1], [2]]))
def test_init_bad_sid_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [False, True, 2], [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [[0], [0, 1], 2], [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, np.array([0, 1, 2]), [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [(0), (0, 1), 2], [0, 1, 1])
def test_init_bad_fid_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [False, True, 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [[0], [0, 1], 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [(0), (0, 1), 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], np.array([0, 1, 2]))
def test_valid_init(self):
eda.SampleFeatureMatrix(
self.sfm5x10_arr, list(range(5)), list(range(10)))
eda.SampleFeatureMatrix(self.sfm5x10_arr, None, list(range(10)))
eda.SampleFeatureMatrix(self.sfm5x10_arr, list(range(5)), None)
eda.SampleFeatureMatrix(np.arange(10).reshape(-1, 1))
eda.SampleFeatureMatrix(np.arange(10).reshape(1, -1))
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sfm = sfm.ind_x([0, 5], list(range(9)))
assert ss_sfm._x.shape == (2, 9)
assert ss_sfm.sids == ['a', 'f']
assert ss_sfm.fids == list(range(10, 19))
# select with Default
ss_sfm = sfm.ind_x()
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select with None
ss_sfm = sfm.ind_x(None, None)
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sfm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sfm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sfm.ind_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sfm.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sfm.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_id_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sfm = sfm.id_x(['a', 'f'], list(range(10, 15)))
assert ss_sfm._x.shape == (2, 5)
assert ss_sfm.sids == ['a', 'f']
assert ss_sfm.fids == list(range(10, 15))
# select with Default
ss_sfm = sfm.id_x()
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select with None
ss_sfm = sfm.id_x(None, None)
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
sfm.id_x([6])
with pytest.raises(ValueError) as excinfo:
sfm.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sfm.id_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sfm.id_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sfm.id_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_ax(self):
fig, axs = plt.subplots(ncols=2)
fig = self.plt_sdm.s_ind_regression_scatter(
0, 1, figsize=(5, 5), ax=axs[0], ci=None)
plt.close()
return fig
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_id_regression_scatter(self):
return self.plt_sdm.s_id_regression_scatter(
"a", "b", feature_filter=[1, 2, 3], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_labs(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, xlab='X', ylab='Y', figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_bool_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, feature_filter=[True]*2 + [False]*8, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_int_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, feature_filter=[0, 1], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_func_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1,
feature_filter=lambda x, y: (x in (0, 1, 2)) and (10 < y < 12),
figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_custom_func_sf(self):
# plt_sdm = eda.SampleFeatureMatrix(
# plt_arr,
# sids=list("abcdef"),
# fids=list(map(lambda i: 'f{}'.format(i),
# range(10))))
# array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
# [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
# [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
return self.plt_sdm.f_ind_regression_scatter(
0, 1,
sample_filter=lambda x, y: (x in (0, 10, 20)) and (10 < y < 30),
figsize=(5, 5), ci=None)
def test_f_id_ind_x_vec(self):
x = self.plt_sdm.f_ind_x_vec(0)
x2 = self.plt_sdm.f_id_x_vec('f0')
np.testing.assert_equal(x, x2)
np.testing.assert_equal(x, [0, 10, 20, 30, 40, 50])
x3 = self.plt_sdm.f_ind_x_vec(6)
x4 = self.plt_sdm.f_id_x_vec('f6')
np.testing.assert_equal(x3, x4)
np.testing.assert_equal(x3, [6, 16, 26, 36, 46, 56])
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_no_ff(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_ind_ff(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, sample_filter=[0, 2, 5], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_labs(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, sample_filter=[0, 2, 5], figsize=(5, 5), title='testregscat',
xlab='x', ylab='y', ci=None)
@pytest.mark.mpl_image_compare
def test_f_id_regression_scatter(self):
return self.plt_sdm.f_id_regression_scatter(
'f5', 'f6', sample_filter=[0, 2, 5], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_ax(self):
fig, axs = plt.subplots(ncols=2)
fig = self.plt_sdm.s_ind_dist(0, figsize=(5, 5), ax=axs[0])
plt.close()
return fig
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist(self):
return self.plt_sdm.s_ind_dist(0, figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_id_dist(self):
return self.plt_sdm.s_id_dist("a", feature_filter=[1, 2, 3],
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_labs(self):
return self.plt_sdm.s_ind_dist(0, xlab='X', ylab='Y', figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_bool_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=[True]*2 + [False]*8, title='testdist',
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_int_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=[0, 1], figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_func_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=lambda x: x in (0, 1, 2),
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_ind_dist_custom_func_sf(self):
return self.plt_sdm.f_ind_dist(
0, sample_filter=lambda x: x in (0, 10, 20),
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_ind_dist_no_ff(self):
return self.plt_sdm.f_ind_dist(0, figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_ind_dist_ind_ff(self):
return self.plt_sdm.f_ind_dist(0, sample_filter=[0, 2, 5],
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_ind_dist_labs(self):
return self.plt_sdm.f_ind_dist(0, sample_filter=[0, 2, 5],
figsize=(5, 5),
xlab='x', ylab='y')
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_id_dist(self):
return self.plt_sdm.f_id_dist('f5', sample_filter=[0, 2, 5],
figsize=(5, 5))
def test_getters(self):
tsfm = eda.SampleFeatureMatrix(np.arange(10).reshape(5, 2),
['a', 'b', 'c', '1', '2'],
['a', 'z'])
np.testing.assert_equal(tsfm.x, np.array(
np.arange(10).reshape(5, 2), dtype='float64'))
np.testing.assert_equal(tsfm.sids, np.array(['a', 'b', 'c', '1', '2']))
np.testing.assert_equal(tsfm.fids, np.array(['a', 'z']))
assert tsfm.x is not tsfm._x
assert tsfm.sids is not tsfm._sids
assert tsfm.fids is not tsfm._fids
# array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
# [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
# [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
def test_f_sum(self):
x = self.plt_sdm.f_sum()
assert x.ndim == 1
assert x.shape[0] == 6
np.testing.assert_allclose(x, self.ref_plt_f_sum)
# only need to test that filter has been passed correctly
np.testing.assert_allclose(
self.plt_sdm.f_sum([0, 1, 2]), self.ref_plt_f_sum[:3])
def test_s_sum(self):
x = self.plt_sdm.s_sum()
assert x.ndim == 1
assert x.shape[0] == 10
np.testing.assert_allclose(x, self.ref_plt_s_sum)
np.testing.assert_allclose(
self.plt_sdm.s_sum([0, 1, 2]), self.ref_plt_s_sum[:3])
def test_f_cv(self):
x = self.plt_sdm.f_cv()
assert x.ndim == 1
assert x.shape[0] == 6
np.testing.assert_allclose(self.plt_sdm.f_cv(), self.ref_plt_f_cv)
np.testing.assert_allclose(
self.plt_sdm.f_cv([0, 1, 2]), self.ref_plt_f_cv[:3])
def test_s_cv(self):
x = self.plt_sdm.s_cv()
assert x.ndim == 1
assert x.shape[0] == 10
np.testing.assert_allclose(x, self.ref_plt_s_cv)
np.testing.assert_allclose(self.plt_sdm.s_cv([0, 1, 2]),
self.ref_plt_s_cv[:3])
def test_f_gc(self):
x = self.plt_sdm.f_gc()
assert x.ndim == 1
assert x.shape[0] == 6
np.testing.assert_allclose(x, self.ref_plt_f_gc)
np.testing.assert_allclose(self.plt_sdm.f_gc([0, 1, 2]),
self.ref_plt_f_gc[:3])
def test_s_gc(self):
x = self.plt_sdm.s_gc()
assert x.ndim == 1
assert x.shape[0] == 10
np.testing.assert_allclose(x, self.ref_plt_s_gc)
np.testing.assert_allclose(self.plt_sdm.s_gc([0, 1, 2]),
self.ref_plt_s_gc[:3])
def test_f_ath(self):
x = self.plt_sdm.f_n_above_threshold(15)
assert x.ndim == 1
assert x.shape[0] == 6
np.testing.assert_allclose(x, self.ref_plt_f_a15)
def test_s_ath(self):
x = self.plt_sdm.s_n_above_threshold(35)
assert x.ndim == 1
assert x.shape[0] == 10
np.testing.assert_allclose(x, self.ref_plt_s_a35)
# Because summary dist plot calls hist_dens_plot immediately after
# obtaining the summary statistics vector, the correctness of summary
# statistics vector and hist_dens_plot implies the correctness of the
# plots.
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_summary_stat_dist(self):
self.plt_sdm.f_sum_dist([0, 1, 2])
self.plt_sdm.s_sum_dist([0, 1, 2])
self.plt_sdm.f_cv_dist([0, 1, 2])
self.plt_sdm.s_cv_dist([0, 1, 2])
self.plt_sdm.f_gc_dist([0, 1, 2])
self.plt_sdm.s_gc_dist([0, 1, 2])
self.plt_sdm.f_n_above_threshold_dist(15)
self.plt_sdm.s_n_above_threshold_dist(15)
|
from boiga.compose import (
compose2, compose3, compose4, compose5, compose6, compose7
)
from tests.typecheck_helper import TypecheckResult, typecheck
# Ideally, these are @dataclass, but not in Python 3.6
class _Flow0:
a: int
def __init__(self, a: int) -> None: self.a = a # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow0) and (self.a == other.a)
class _Flow1: # noqa: E302
b: int
def __init__(self, b: int) -> None: self.b = b # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow1) and (self.b == other.b)
class _Flow2: # noqa: E302
c: int
def __init__(self, c: int) -> None: self.c = c # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow2) and (self.c == other.c)
class _Flow3: # noqa: E302
d: int
def __init__(self, d: int) -> None: self.d = d # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow3) and (self.d == other.d)
class _Flow4: # noqa: E302
e: int
def __init__(self, e: int) -> None: self.e = e # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow4) and (self.e == other.e)
class _Flow5: # noqa: E302
f: int
def __init__(self, f: int) -> None: self.f = f # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow5) and (self.f == other.f)
class _Flow6: # noqa: E302
g: int
def __init__(self, g: int) -> None: self.g = g # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow6) and (self.g == other.g)
class _Flow7: # noqa: E302
h: int
def __init__(self, h: int) -> None: self.h = h # noqa: E301
def __eq__(self, other: object) -> bool: # noqa: E301
return isinstance(other, _Flow7) and (self.h == other.h)
def _flow0(x: _Flow0) -> _Flow1: return _Flow1(x.a)
def _flow1(x: _Flow1) -> _Flow2: return _Flow2(x.b) # noqa: E302
def _flow2(x: _Flow2) -> _Flow3: return _Flow3(x.c) # noqa: E302
def _flow3(x: _Flow3) -> _Flow4: return _Flow4(x.d) # noqa: E302
def _flow4(x: _Flow4) -> _Flow5: return _Flow5(x.e) # noqa: E302
def _flow5(x: _Flow5) -> _Flow6: return _Flow6(x.f) # noqa: E302
def _flow6(x: _Flow6) -> _Flow7: return _Flow7(x.g) # noqa: E302
imports = [
'from boiga.compose import compose2, compose3, compose4, compose5, compose6, compose7',
'from tests.test_compose import _flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow6',
'from tests.test_compose import _Flow0, _Flow1, _Flow2, _Flow3, _Flow4, _Flow5, _Flow6, _Flow7',
]
def typecheck_flow(code: str) -> TypecheckResult:
return typecheck([*imports, code])
class TestCompose2:
def test_valid_flow_with_call(self) -> None:
assert compose2(_flow0, _flow1)(_Flow0(42)) == _Flow2(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose2(_flow0, _flow1)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose2(_flow0, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 2 of "compose2"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow('compose2(_flow0, _flow1)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow('compose2(_flow0, _flow1)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow2" has no attribute "foo"'
]
class TestCompose3:
def test_valid_flow_with_call(self) -> None:
flow = compose3(_flow0, _flow1, _flow2)
assert flow(_Flow0(42)) == _Flow3(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose3(_flow0, _flow1, _flow2)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose3(_flow0, _flow1, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 3 of "compose3"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow('compose3(_flow0, _flow1, _flow2)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow('compose3(_flow0, _flow1, _flow2)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow3" has no attribute "foo"'
]
class TestCompose4:
def test_valid_flow_with_call(self) -> None:
flow = compose4(_flow0, _flow1, _flow2, _flow3)
assert flow(_Flow0(42)) == _Flow4(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose4(_flow0, _flow1, _flow2, _flow3)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose4(_flow0, _flow1, _flow2, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 4 of "compose4"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow('compose4(_flow0, _flow1, _flow2, _flow3)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow('compose4(_flow0, _flow1, _flow2, _flow3)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow4" has no attribute "foo"'
]
class TestCompose5:
def test_valid_flow_with_call(self) -> None:
flow = compose5(_flow0, _flow1, _flow2, _flow3, _flow4)
assert flow(_Flow0(42)) == _Flow5(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose5(_flow0, _flow1, _flow2, _flow3, _flow4)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose5(_flow0, _flow1, _flow2, _flow3, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 5 of "compose5"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow(
'compose5(_flow0, _flow1, _flow2, _flow3, _flow4)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow(
'compose5(_flow0, _flow1, _flow2, _flow3, _flow4)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow5" has no attribute "foo"'
]
class TestCompose6:
def test_valid_flow_with_call(self) -> None:
flow = compose6(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5)
assert flow(_Flow0(42)) == _Flow6(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose6(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose6(_flow0, _flow1, _flow2, _flow3, _flow4, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 6 of "compose6"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow(
'compose6(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow(
'compose6(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow6" has no attribute "foo"'
]
class TestCompose7:
def test_valid_flow_with_call(self) -> None:
flow = compose7(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow6)
assert flow(_Flow0(42)) == _Flow7(42)
def test_valid_flow_typechecks(self) -> None:
result = typecheck_flow('compose7(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow6)')
assert result.ok
def test_invalid_flow_fns(self) -> None:
result = typecheck_flow('compose7(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow0)')
assert not result.ok
assert result.errors == [
'<string>:4: error: Cannot infer type argument 7 of "compose7"'
]
def test_invalid_flow_call(self) -> None:
result = typecheck_flow(
'compose7(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow6)(_Flow2(42))')
assert not result.ok
assert result.errors == [
'<string>:4: error: Argument 1 has incompatible type "_Flow2"; expected "_Flow0"'
]
def test_invalid_flow_result(self) -> None:
result = typecheck_flow(
'compose7(_flow0, _flow1, _flow2, _flow3, _flow4, _flow5, _flow6)(_Flow0(42)).foo')
assert not result.ok
assert result.errors == [
'<string>:4: error: "_Flow7" has no attribute "foo"'
]
|
from .allnumbers import Numbers as __num__
__languages = ['english', 'arabic', 'hindi', 'persian', 'bengali',
'chinese_simple', 'chinese_complex', 'malayalam', 'thai', 'urdu']
for __language_1 in __languages:
for __language_2 in __languages:
if __language_1 != __language_2:
locals()['{}_to_{}'.format(__language_1, __language_2)] = eval(
'__num__().{}_to_{}'.format(__language_1, __language_2))
|
import numpy as np
import elfi
def uniform_prior(minv, maxv, name, **kwargs):
return elfi.Prior("uniform", minv, maxv - minv, name=name)
def truncnorm_prior(minv, maxv, mean, std, name, **kwargs):
return elfi.Prior("truncnorm", (minv - mean)/std, (maxv - mean)/std, mean, std, name=name)
def beta_prior(a, b, name, **kwargs):
return elfi.Prior("beta", a, b, name=name)
def constant_prior(val, name, **kwargs):
return elfi.Constant(val, name=name)
class ModelParams():
priors = {
"uniform": uniform_prior,
"truncnorm": truncnorm_prior,
"beta": beta_prior,
"constant": constant_prior,
}
def __init__(self, parameters):
self.parameters = parameters
for i, p in enumerate(self.parameters): # BOLFI works in mysterious ways
p["name"] = "p{:02d}_{}".format(i, p["name"])
def get_elfi_params(self):
ret = list()
for p in self.parameters:
if p["distr"] not in self.priors.keys():
raise ValueError("Unsupported distribution: {}".format(p["distr"]))
ret.append(self.priors[p["distr"]](**p))
return ret
def get_bounds(self):
return {p["name"]: (p["minv"], p["maxv"]) for p in self.parameters if p["distr"] is not "constant"}
def get_acq_noises(self):
return [p["acq_noise"] for p in self.parameters if p["distr"] is not "constant"]
def get_lengthscales(self):
return [p["kernel_scale"] for p in self.parameters if p["distr"] is not "constant"]
def get_grid_tics(self, seed):
ret = list()
rs = np.random.RandomState(seed)
for p in self.parameters:
if p["distr"] is not "constant":
delta = (p["maxv"] - p["minv"])/float(p["ntics"])
d = rs.uniform(0.0, delta)
minv = p["minv"] + d
maxv = p["maxv"] - (delta - d)
tics = np.linspace(minv, maxv, p["ntics"]).tolist()
ret.append(tics)
return ret
def get_L(self):
return [p["L"] for p in self.parameters if p["distr"] is not "constant"]
|
from flask import Flask
from flask_restful import Api
from resources.user import User, UserList, UserBy
# from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
@app.before_first_request
def create_tables():
db.create_all()
api.add_resource(User, '/user/<string:username>')
api.add_resource(UserBy, '/user/<string:column>/<string:column_value>')
api.add_resource(UserList, '/users')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
|
class Nil:
pass
|
from django.conf import settings
if settings.USE_UPY_SEO and len(settings.LANGUAGES) > 1:
from modeltranslation.translator import translator, TranslationOptions
from upy.contrib.seo.models import MetaSite, MetaNode, MetaPage
class SiteTranslationOptions(TranslationOptions):
fields = ('title', 'description', 'keywords', 'author', 'content_type', 'robots', 'generator', 'html_head')
class NodeTranslationOptions(TranslationOptions):
fields = ('alias', 'title')
class PageTranslationOptions(TranslationOptions):
fields = ('title', 'description', 'keywords', 'author', 'content_type', 'robots', 'html_head')
translator.register(MetaSite, SiteTranslationOptions)
translator.register(MetaNode, NodeTranslationOptions)
translator.register(MetaPage, PageTranslationOptions)
|
from botnet.helpers import load_json, save_json, is_channel_name
def test_load_save_json(tmp_file):
data = {'key': 'value'}
save_json(tmp_file, data)
loaded_data = load_json(tmp_file)
assert loaded_data == data
def test_is_channel_name():
assert is_channel_name('#channel')
assert not is_channel_name('')
assert not is_channel_name('nickname_')
|
from flask_restx import Namespace
api_v1 = Namespace('Scholix Version 1.0', title='Scholexplorer API 1.0', description="scholexplorer API version 1.0")
import apis.v1.methods
|
"""Zero-shot Classification related modeling class"""
from typing import Dict, List, Optional
from pororo.tasks.utils.base import PororoBiencoderBase, PororoFactoryBase
class PororoZeroShotFactory(PororoFactoryBase):
"""
Zero-shot topic classification
See also:
https://joeddav.github.io/blog/2020/05/29/ZSL.html
Korean (`brainbert.base.ko.kornli`)
- dataset: KorNLI (Ham et al. 2020)
- metric: N/A
English (`roberta.base.en.nli`)
- dataset: MNLI (Adina Williams et al. 2017)
- metric: N/A
Japanese (`jaberta.base.ja.nli`)
- dataset: XNLI (Alexis Conneau et al. 2018)
- metric: N/A
Chinese (`zhberta.base.zh.nli`)
- dataset: XNLI (Alexis Conneau et al. 2018)
- metric: N/A
Examples:
>>> zsl = Pororo(task="zero-topic")
>>> zsl("Who are you voting for in 2020?", ["business", "art & culture", "politics"])
{'business': 33.23, 'art & culture': 8.33, 'politics': 96.12}
>>> zsl = Pororo(task="zero-topic", lang="ko")
>>> zsl('''라리가 사무국, 메시 아닌 바르사 지지..."바이 아웃 유효" [공식발표]''', ["스포츠", "사회", "정치", "경제", "생활/문화", "IT/과학"])
{'스포츠': 94.15, '사회': 37.11, '정치': 74.26, '경제': 39.18, '생활/문화': 71.15, 'IT/과학': 34.71}
>>> zsl('''장제원, 김종인 당무감사 추진에 “참 잔인들 하다”···정강정책 개정안은 “졸작”''', ["스포츠", "사회", "정치", "경제", "생활/문화", "IT/과학"])
{'스포츠': 2.18, '사회': 56.1, '정치': 88.24, '경제': 16.17, '생활/문화': 66.13, 'IT/과학': 11.2}
>>> zsl = Pororo(task="zero-topic", lang="ja")
>>> zsl("香川 真司は、兵庫県神戸市垂水区出身のプロサッカー選手。元日本代表。ポジションはMF、FW。ボルシア・ドルトムント時代の2010-11シーズンでリーグ前半期17試合で8得点を記録し9シーズンぶりのリーグ優勝に貢献。キッカー誌が選定したブンデスリーガの年間ベスト イレブンに名を連ねた。", ["スポーツ", "政治", "技術"])
{'スポーツ': 0.2, '政治': 99.71, '技術': 68.9}
>>> zsl = Pororo(task="zero-topic", lang="zh")
>>> zsl("商务部14日发布数据显示,今年前10个月,我国累计对外投资904.6亿美元,同比增长5.9%。", ["政治", "经济", "国际化"])
{'政治': 33.72, '经济': 3.9, '国际化': 13.67}
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["en", "ko", "ja", "zh"]
@staticmethod
def get_available_models():
return {
"ko": ["brainbert.base.ko.kornli"],
"ja": ["jaberta.base.ja.nli"],
"zh": ["zhberta.base.zh.nli"],
"en": ["roberta.base.en.nli"],
}
def load(self, device: str):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
if "brainbert" in self.config.n_model:
from pororo.models.brainbert import BrainRobertaModel
model = BrainRobertaModel.load_model(
f"bert/{self.config.n_model}",
self.config.lang,
).eval().to(device)
return PororoBertZeroShot(model, self.config)
if "jaberta" in self.config.n_model:
from pororo.models.brainbert import JabertaModel
model = JabertaModel.load_model(
f"bert/{self.config.n_model}",
self.config.lang,
).eval().to(device)
return PororoBertZeroShot(model, self.config)
if "zhberta" in self.config.n_model:
from pororo.models.brainbert import ZhbertaModel
model = ZhbertaModel.load_model(
f"bert/{self.config.n_model}",
self.config.lang,
).eval().to(device)
return PororoBertZeroShot(model, self.config)
if "roberta" in self.config.n_model:
from pororo.models.brainbert import CustomRobertaModel
model = CustomRobertaModel.load_model(
f"bert/{self.config.n_model}",
self.config.lang,
).eval().to(device)
return PororoBertZeroShot(model, self.config)
class PororoBertZeroShot(PororoBiencoderBase):
def __init__(self, model, config):
super().__init__(config)
self._model = model
self._template = {
"ko": "이 문장은 {label}에 관한 것이다.",
"ja": "この文は、{label}に関するものである。",
"zh": "这句话是关于{label}的。",
"en": "This sentence is about {label}.",
}
def predict(
self,
sent: str,
labels: List[str],
**kwargs,
) -> Dict[str, float]:
"""
Conduct zero-shot classification
Args:
sent (str): sentence to be classified
labels (List[str]): candidate labels
Returns:
List[Tuple(str, float)]: confidence scores corresponding to each input label
"""
cands = [
self._template[self.config.lang].format(label=label)
for label in labels
]
result = dict()
for label, cand in zip(labels, cands):
if self.config.lang == "ko":
tokens = self._model.encode(
sent,
cand,
add_special_tokens=True,
no_separator=False,
)
else:
tokens = self._model.encode(
sent,
cand,
no_separator=False,
)
# throw away "neutral" (dim 1) and take the probability of "entail" (2) as the probability of the label being true
pred = self._model.predict(
"sentence_classification_head",
tokens,
return_logits=True,
)[:, [0, 2]]
prob = pred.softmax(dim=1)[:, 1].item() * 100
result[label] = round(prob, 2)
return result
|
import os
import random
import re
import requests
from PIL import Image
from validators.url import url
from userbot import CMD_HELP, bot
from userbot.tweet import (
bhautweet,
johnnytweet,
jtweet,
apjtweet,
moditweet,
sundartweet,
)
from userbot.utils import admin_cmd
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
def deEmojify(inputString: str) -> str:
return re.sub(EMOJI_PATTERN, "", inputString)
# for nekobot
async def trumptweet(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=trumptweet&text={text}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.webp", "webp")
return "gpx.webp"
async def changemymind(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=changemymind&text={text}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.webp", "webp")
return "gpx.webp"
async def tweets(text1, text2):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=tweet&text={text1}&username={text2}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.webp", "webp")
return "gpx.webp"
async def purge():
try:
os.remove("gpx.png")
os.remove("gpx.webp")
except OSError:
pass
# @register(outgoing=True, pattern=r"^\.trump(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="trump ?(.*)"))
async def trump(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to trump so he can tweet.`")
return
await event.edit("`Requesting trump to tweet...`")
text = deEmojify(text)
img = await trumptweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.johnny(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="johnny ?(.*)"))
async def johnny(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to Johnny so he can tweet.`")
return
await event.edit("`Requesting Johnny to tweet...`")
text = deEmojify(text)
img = await johnnytweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.bhau(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="bhau ?(.*)"))
async def bhau(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to Hindustani Bhau so he can tweet.`")
return
await event.edit("`Requesting Hindustani bhau to tweet...`")
text = deEmojify(text)
img = await bhautweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.sundar(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="sundar ?(.*)"))
async def sunny(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to Sundar Pichai so he can tweet.`")
return
await event.edit("`Requesting Sundar Pichai to tweet...`")
text = deEmojify(text)
img = await sundartweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.joker(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="joker ?(.*)"))
async def j(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to 🃏 Joker so he can tweet.`")
return
await event.edit("`Requesting 🃏 Joker to tweet...`")
text = deEmojify(text)
img = await jtweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.modi(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="modi ?(.*)"))
async def modi(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to Modi so he can tweet.`")
return
await event.edit("`Requesting Modi to tweet...`")
text = deEmojify(text)
img = await moditweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.apj(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="apj ?(.*)"))
async def mia(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to Dr.A.P.J.Abdul Kalam so he can tweet.`")
return
await event.edit("`Requesting Dr.A.P.J.Abdul Kalam to tweet...`")
text = deEmojify(text)
img = await apjtweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern=r"^\.cmm(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="cmm ?(.*)"))
async def cmm(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Give text for to write on banner!`")
return
await event.edit("`Your banner is under creation wait a sec...`")
text = deEmojify(text)
img = await changemymind(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
# @register(outgoing=True, pattern="^.type(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="type ?(.*)"))
async def type(animu):
# """Generate random waifu sticker with the text!"""
text = animu.pattern_match.group(1)
if not text:
if animu.is_reply:
text = (await animu.get_reply_message()).message
else:
await animu.answer("`No text given.`")
return
animus = [
1,
2,
3,
4,
5,
6,
8,
7,
10,
11,
13,
22,
34,
35,
36,
37,
43,
44,
45,
52,
53,
]
sticcers = await bot.inline_query(
"stickerizerbot", f"#{random.choice(animus)}{(deEmojify(text))}"
)
await sticcers[0].click(
animu.chat_id,
reply_to=animu.reply_to_msg_id,
silent=True if animu.is_reply else False,
hide_via=True,
)
await animu.delete()
# @register(outgoing=True, pattern="^.waifu(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="waifu ?(.*)"))
async def waifu(danish):
# """Generate random waifu sticker with the text!"""
text = danish.pattern_match.group(1)
if not text:
if danish.is_reply:
text = (await danish.get_reply_message()).message
else:
await danish.answer("`No text given.`")
return
king = [32, 33, 37, 40, 41, 42, 58, 20]
sticcers = await bot.inline_query(
"stickerizerbot", f"#{random.choice(king)}{(deEmojify(text))}"
)
await sticcers[0].click(
danish.chat_id,
reply_to=danish.reply_to_msg_id,
silent=True if danish.is_reply else False,
hide_via=True,
)
await danish.delete()
# @register(outgoing=True, pattern=r"\.tweet(?: |$)(.*)")
@borg.on(admin_cmd(outgoing=True, pattern="tweet ?(.*)"))
async def tweet(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply:
if not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`What should i tweet? Give your username and tweet!`")
return
else:
await event.edit("What should i tweet? Give your username and tweet!`")
return
if "." in text:
username, text = text.split(".")
else:
await event.edit("`What should i tweet? Give your username and tweet!`")
await event.edit(f"`Requesting {username} to tweet...`")
text = deEmojify(text)
img = await tweets(text, username)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
@borg.on(admin_cmd(pattern="tweetme(?: |$)(.*)"))
async def tweetme(okie):
# """Creates random anime sticker!"""
what = okie.pattern_match.group(1)
if not what:
if okie.is_reply:
what = (await okie.get_reply_message()).message
else:
await okie.edit("`Tweets must contain some text, pero!`")
return
sticcers = await bot.inline_query("TwitterStatusBot", f"{(deEmojify(what))}")
await sticcers[0].click(
okie.chat_id,
reply_to=okie.reply_to_msg_id,
silent=True if okie.is_reply else False,
hide_via=True,
)
await okie.delete()
CMD_HELP.update(
{
"tweet":
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.tweet <username>.<tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet with custom username.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.trump <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet for Donald Trump.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.sundar <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet for Sundar Pichai.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.johnny <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet for Johnny Sins.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.bhau <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet for Hindustani bhau.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.modi <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet for Modi .\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.tweetme <tweet>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create tweet from u in dark theme.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.cmm <text>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Create banner for Change My Mind.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.waifu <text>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : Random anime girl stickers.\n\n"
"╼•∘ 🅲🅼🅽🅳 ∘•╾ :.type <text>"
"\n╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : random sticker is writing your text."
}
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='blueprint_readthedocs',
version='1.0',
packages=find_packages(exclude=["tests", "tests.*"]),
license='MIT',
install_requires=[
],
extras_require={
'dev': [
'sphinx'
]
}
)
|
import os
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
reqd_pkgs = [
"yt>=4.0",
"xmltodict",
]
setuptools.setup(
name="yt_aspect",
version="0.0.2",
author="Chris Havlin",
author_email="chris.havlin@gmail.com",
description="A yt plugin for loading ASPECT output",
install_requires=reqd_pkgs,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chrishavlin/yt_aspect",
project_urls={"Bug Tracker": "https://github.com/chrishavlin/yt_aspect/issues",},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"yt_aspect": "yt_aspect"},
packages=setuptools.find_packages(where="yt_aspect"),
python_requires=">=3.6",
)
|
# continue
i = 1
while i < 30:
i+=1 # careful with continue we need an increment to avoid forever loop
if i % 2 == 0:
print("Even Number!", i)
print("Jumping to the start of the loop")
continue # so continue jumps back to the start of the loop
# so sort of like else here
print("Hmm must be odd!", i)
i+=10
i = 1
# while i < 10:
# if i % 2 == 0:
# print("Even number!", i)
# i += 1
# continue # means we go to start of the loop immediately
# print("My generic num", i)
# i += 1
# if i >= 7:
# break
# print("All done")
|
"""
Given a dictionary (tree), that can contains multiple nested structures.
Write a function, that takes element and finds the number of occurrences
of this element in the tree.
Tree can only contains basic structures like:
str, list, tuple, dict, set, int, bool
"""
from typing import Any
# Example tree:
example_tree = {
"first": ["RED", "BLUE"],
"second": {
"simple_key": ["simple", "list", "of", "RED", "valued"],
},
"third": {
"abc": "BLUE",
"jhl": "RED",
"complex_key": {
"key1": "value1",
"key2": "RED",
"key3": ["a", "lot", "of", "values", {"nested_key": "RED"}],
}
},
"fourth": "RED",
}
def find_occurrences(tree: dict, element: Any) -> int:
...
if __name__ == '__main__':
print(find_occurrences(example_tree, "RED")) # 6
|
import numpy as np
from PIL import Image
import vk4extract #<----module has to be in the same folder as this file to run
import os
# Change directory to the folder with Keyence .vk files
os.chdir(r'C:\Users\jespe\surfdrive\Thesis\microscopy\2021-09-21\TenCate_retake_20210920')
root = ('.\\')
vkimages = os.listdir(root) # Lists all the files in the folder as VKimages
#loops through vkimages to find .vk4 files and extract rgb data
for img in vkimages:
if (img.endswith('.vk4')):
with open(img, 'rb') as in_file:
offsets = vk4extract.extract_offsets(in_file)
rgb_dict = vk4extract.extract_color_data(offsets, 'peak', in_file) # use extract_img_data for light and height data
rgb_data = rgb_dict['data']
height = rgb_dict['height']
width = rgb_dict['width']
rgb_matrix = np.reshape(rgb_data, (height, width, 3))
image = Image.fromarray(rgb_matrix, 'RGB') # extract raw RGB images
image.save(img.replace('.vk4', '.png'), 'PNG') # saves the images as PNG in same folder as root
|
# -*- coding: utf-8 -*-
description = 'system setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'nectarhw.nectar.frm2',
instrument = 'Instrument',
experiment = 'Exp',
datasinks = ['conssink', 'filesink', 'daemonsink'],
notifiers = ['email', 'smser'],
)
modules = ['nicos.commands.basic', 'nicos.commands.standard',
'nicos_mlz.antares.commands', 'nicos_mlz.nectar.commands']
includes = ['notifiers']
devices = dict(
Sample = device('nicos.devices.sample.Sample',
description = 'sample object',
),
Exp = device('nicos_mlz.antares.devices.experiment.Experiment',
description = 'experiment object',
dataroot = '/data/FRM-II',
serviceexp = 'service',
sample = 'Sample',
mailsender = 'nectar@frm2.tum.de',
sendmail = False,
zipdata = False,
managerights = {},
),
Instrument = device('nicos.devices.instrument.Instrument',
description = 'NECTAR instrument',
instrument = 'NECTAR',
doi = 'http://dx.doi.org/10.17815/jlsrf-1-45',
responsible = 'Adrian Losko <adrian.losko@frm2.tum.de>',
operators = ['Technische Universität München (TUM)'],
website = 'http://www.mlz-garching.de/nectar',
),
filesink = device('nicos.devices.datasinks.AsciiScanfileSink',
),
conssink = device('nicos.devices.datasinks.ConsoleScanSink',
),
daemonsink = device('nicos.devices.datasinks.DaemonSink',
),
Space = device('nicos.devices.generic.FreeSpace',
description = 'Free Space in the RootDir of nectarhw',
path = '/',
minfree = 5,
),
HomeSpace = device('nicos.devices.generic.FreeSpace',
description = 'Free Space in the home directory of user nectar',
path = '/localhome/nectar',
minfree = 1,
),
DataSpace = device('nicos.devices.generic.FreeSpace',
description = 'Free Space on the DataStorage',
path = '/data',
minfree = 50,
),
LogSpace = device('nicos.devices.generic.FreeSpace',
description = 'Free space on the log drive',
path = '/nectarcontrol/log',
lowlevel = True,
warnlimits = (0.5, None),
),
)
|
#!/usr/bin/env python3
import os
import signal
import threading
from . import generic
import core.potloader as potloader
import core.utils as utils
from .dblogger import DBThread
class GenericPot(potloader.PotLoader):
""" Implementation of generic honeypot that listens on an arbitrary UDP port
and responds with a random response of a given size or with a predefined pattern.
"""
def name(self):
return 'generic'
def _create_server(self):
return generic.create_server(
self.conf,
self.name(),
self.log_queue,
self.output_queue,
self.hpfeeds_client,
self.alerter
)
def _create_dbthread(self, dbfile, new_attack_interval):
return DBThread(
dbfile,
self.name(),
self.log_queue,
self.output_queue,
self.stop_event,
new_attack_interval
)
def _start_server(self):
self.server.serve_forever()
def _get_config_path(self):
return os.path.join(os.path.dirname(__file__), 'genericpot.conf')
def _detailed_status(self, status):
port = self.server.server_address[1]
avg_amp = float('{0:.2f}'.format(status['avg_amp']))
pkt_in_bytes = utils.format_unit(status['packets_in_bytes'])
stats = [
['Average amplification', utils.sep_thousand(avg_amp)],
['Traffic IN/OUT', pkt_in_bytes],
]
return stats
# override of default function for obtaining payload inside status structure
# setup function is generic enough for display, but since generic honeypot is
# port-specific, return structure for the currently bound port
def _extract_status_payload(self, stats):
port = self.server.server_address[1]
payload = stats['payload']
if port in payload:
port_stats = payload[port]
specific = payload['specific']
port_stats['specific'] = {
'avg_amp': specific['avg_amp'][port],
'packets_in_bytes': specific['packets_in_bytes'][port]
}
return port_stats
else:
utils.print_warn('Port %d not found in the database, statistics not available' % (port))
# set total_attacks parameter to zero in order to signal empty statistics table
return {'total_attacks': 0}
if __name__ == "__main__":
genericpot = GenericPot()
genericpot.setup()
t = threading.Thread(target=genericpot.run)
t.start()
genericpot.potthread = t
signal.signal(signal.SIGINT, genericpot.shutdown_signal_wrapper)
signal.pause()
|
"""Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
#from cleverhans.attacks import FastGradientMethod
#from cleverhans.attacks import BasicIterativeMethod
from attacks import BasicIterativeMethod
import numpy as np
from PIL import Image
import tensorflow as tf
import inception_resnet_v2
from tensorflow.contrib.slim.nets import inception
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 16, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
# this function is from RalphMao commented on 17 Mar
# at https://github.com/tensorflow/tensorflow/issues/312
def optimistic_restore(session, save_file, include_global_step=True):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0])
for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
name2var = dict(zip(map(lambda x:x.name.split(':')[0],
tf.global_variables()), tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
if 'global_step' in saved_var_name:
if include_global_step:
restore_vars.append(curr_var)
else:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
class InceptionPureModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes):
self.num_classes = num_classes
self.built = False
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with tf.variable_scope('Pure') as scope:
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(
x_input, num_classes=self.num_classes, is_training=False,
reuse=reuse)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
class InceptionModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes):
self.num_classes = num_classes
self.built = False
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(
x_input, num_classes=self.num_classes, is_training=False,
reuse=reuse)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
class InceptionResModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes):
self.num_classes = num_classes
self.built = False
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
_, end_points = inception_resnet_v2.inception_resnet_v2(
x_input, num_classes=self.num_classes, is_training=False,
reuse=reuse)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
debug_flag = False
eps = 2.0 * FLAGS.max_epsilon / 255.0
eps_iter = 1.0 / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
checkpoints = ['./ens4_adv_inception_v3/ens4_adv_inception_v3.ckpt',
'./ens_adv_inception_resnet_v2/ens_adv_inception_resnet_v2.ckpt',
'./inception_v3/inception_v3.ckpt' ]
model_names = [InceptionModel, InceptionResModel, InceptionPureModel]
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
models = []
for model_name in model_names:
models.append(model_name(num_classes))
iter_fgsms = []
for model in models:
iter_fgsms.append(BasicIterativeMethod(model))
x_advs = []
model_preds = []
for iter_fgsm in iter_fgsms:
x_adv, model_pred, y_target = iter_fgsm.generate(x_input,
eps=eps, clip_min=-1., clip_max=1.,
eps_iter=eps_iter,
nb_iter=int(max(FLAGS.max_epsilon+4,
1.25*FLAGS.max_epsilon)*2) )
x_advs.append(x_adv)
model_preds.append(model_pred)
# Run computation
saver = tf.train.Saver()
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
for checkpoint in checkpoints:
optimistic_restore(sess, checkpoint)
i = 0
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images, preds = sess.run([x_advs, model_preds],
feed_dict={x_input: images})
diff_images = []
for adv_images_per_model in adv_images:
diff_images.append(adv_images_per_model - images)
ens_diffs = np.mean(diff_images, axis=0)
ens_diffs[np.where(ens_diffs > eps/8)] = eps
ens_diffs[np.where(ens_diffs < -eps/8)] = -eps
ens_adv_images = np.clip(ens_diffs + images, -1., 1.)
save_images(ens_adv_images, filenames, FLAGS.output_dir)
if debug_flag:
adv_preds = sess.run(model_preds, feed_dict={ x_input: adv_images[0]})
adv_preds2 = sess.run(model_preds, feed_dict={ x_input: adv_images[1]})
adv_preds3 = sess.run(model_preds, feed_dict={ x_input: adv_images[2]})
ens_adv_preds = sess.run(model_preds, feed_dict={ x_input: ens_adv_images})
filenames1 = []
filenames2 = []
filenames3 = []
for j, filename in enumerate(filenames):
filename = filename.split('.')[0]
filenames1.append(filename + '_v3.png')
filenames2.append(filename + '_res2.png')
filenames3.append(filename + '_pure_v3.png')
save_images(adv_images[0], filenames1, FLAGS.output_dir)
save_images(adv_images[1], filenames2, FLAGS.output_dir)
save_images(adv_images[2], filenames3, FLAGS.output_dir)
for j, (pred, adv_pred, adv_pred2, adv_pred3, ens_adv_pred) \
in enumerate(zip(preds, adv_preds, adv_preds2, adv_preds3, ens_adv_preds)):
print ('Test for model ', j)
print ('clean prediction: \n',
np.argmax(pred, axis=1), ' ', np.max(pred, axis=1))
print ('adv images from model 0 prediction: \n',
np.argmax(adv_pred, axis=1), ' ', np.max(adv_pred, axis=1))
print ('adv images from model 1 prediction: \n',
np.argmax(adv_pred2, axis=1), ' ', np.max(adv_pred2, axis=1))
print ('adv images from model 2 prediction: \n',
np.argmax(adv_pred3, axis=1), ' ', np.max(adv_pred3, axis=1))
print ('ens_adv images prediction: \n',
np.argmax(ens_adv_pred, axis=1), ' ',
np.max(ens_adv_pred, axis=1))
print ('%d images are being processed' % ((i+1)*FLAGS.batch_size))
i+=1
if __name__ == '__main__':
tf.app.run()
|
from synapseConstantsMinimal import *
############# ORN to mitral!
############# Set these such that 100 ORNs at approx 50Hz make the mitral cell fire in the middle of its linear range.
#SYN_EXC_G = 1 * 8.6516e-9 # Siemens
#SYN_INH_G = 1 * 2.2126e-9 # Siemens
GRANULE_INH_GRADED = False#True
RECEPTOR_SATURATION = 1.0#0.5 # Needed for single synapse KinSynChan and also in the baseline SynChan to correct for usage of synaptic weights across synchan and kinsynchan.
#RECEPTOR_SATN_CORRECTN_NMDA = 0.8785 # above 6mV EPSP #0.22 #for CM=0.04 #0.25 #for CM=0.01 #0.275 # See my onenote notes dt03/07/2010 for derivation of this value.
#RECEPTOR_SATN_CORRECTN_AMPA = 1.0818 # above 6mV EPSP #0.35 #0.19 # See my onenote notes dt03/07/2010 for derivation of this value.
RECEPTOR_SATN_CORRECTN_NMDA = 0.892 #4mV EPSP #0.22 #for CM=0.04 #0.25 #for CM=0.01 #0.275 # See my onenote notes dt03/07/2010 for derivation of this value.
RECEPTOR_SATN_CORRECTN_AMPA = 1.069 #4mV EPSP #0.35 #0.19 # See my onenote notes dt03/07/2010 for derivation of this value.
#RECEPTOR_SATN_CORRECTN_NMDA = 0.9397 #7mV EPSP with CM=0.02 #0.22 #for CM=0.04 #0.25 #for CM=0.01 #0.275 # See my onenote notes dt03/07/2010 for derivation of this value.
#RECEPTOR_SATN_CORRECTN_AMPA = 1.0679 #7mV EPSP with CM=0.02 #0.35 #0.19 # See my onenote notes dt03/07/2010 for derivation of this value.
## Arevian et al's activity dep inhibition work is under physiological conditions
## i.e. 1mM Mg. as also Urban's previous work.
MG_CONC = 1.0 #0.001 #1.3 #mM SI units mol/m^3 = mmol/liter = mMolar (mM) # value of 1.3 mM From Isaacson 2001 PNAS; [Mg++] should be non-zero, hence 0.001 for 0.0
## Giridhar et al use no Mg2+, hence this setting for testing asymmetrical lateral inhibition
#MG_CONC = 0.1 #0.001 #1.3 #mM SI units mol/m^3 = mmol/liter = mMolar (mM) # value of 1.3 mM From Isaacson 2001 PNAS; [Mg++] should be non-zero, hence 0.001 for 0.0
mitral_granule_AMPA_Ek = 0.0 # Volts
## below is imported from synapseConstantsMinimal.py
#mitral_granule_AMPA_Gbar = 0.35e-9#0.35e-9 # Siemens ## This has been set so as to get roughly 8mV near the 12mV EPSP of Trombley & Shepherd 1992 JNeurosci
mitral_granule_saturatingAMPA_Gbar = 0.35e-9/RECEPTOR_SATN_CORRECTN_AMPA# Siemens ## This has been set so as to get roughly 8mV near the 12mV EPSP of Trombley & Shepherd 1992 JNeurosci
# From Cang and Isaacson 2003, in-vivo whole cell sEPSP data: 1ms rise and 4ms decay.
mitral_granule_AMPA_tau1 = 1.0e-3#2.0e-3 # seconds # Davison etal 2003 assume instantaneous rise but write that 4 ms is experimental, but is this for AMPA or GABA!
mitral_granule_AMPA_tau2 = 4.0e-3#5.5e-3 # seconds # decay time - from Migliore and Shepherd 2008.
mitral_granule_saturatingAMPA_pulseWidth = mitral_granule_AMPA_tau1 # this is the time to peak for KinSynChan
mitral_granule_saturatingAMPA_tau1 = mitral_granule_AMPA_tau2 # saturating syn decay time - roughly from fig 1B of Migliore and Shepherd 2008.
mitral_granule_saturatingAMPA_rInf = RECEPTOR_SATURATION # make some receptors saturate - set it so that it is best for lateral inhibition.
mitral_granule_NMDA_Ek = 0.0 # Volts
mitral_granule_NMDA_Gbar = 0.26*mitral_granule_AMPA_Gbar #0.1e-9 # Siemens ## This has been set so as to get roughly 8mV near the 12mV EPSP of Trombley & Shepherd 1992 JNeurosci
mitral_granule_saturatingNMDA_Gbar = 0.26*mitral_granule_AMPA_Gbar/RECEPTOR_SATN_CORRECTN_NMDA # Siemens ## This has been set so as to get roughly 8mV near the 12mV EPSP of Trombley & Shepherd 1992 JNeurosci
##### For SynChan
mitral_granule_NMDA_tau1 = 25e-3#20e-3 # rise time
mitral_granule_NMDA_tau2 = 200e-3#50e-3 # decay time - roughly from Migliore and Shepherd 2008.
##### For KinSynChan - rinf - fraction open due to a transmitter release and tau1 - decay time
mitral_granule_saturatingNMDA_pulseWidth = mitral_granule_NMDA_tau1 # this is the time to peak for KinSynChan
mitral_granule_saturatingNMDA_tau1 = mitral_granule_NMDA_tau2 # decay time - roughly from Migliore and Shepherd 2008.
mitral_granule_saturatingNMDA_rInf = RECEPTOR_SATURATION # make some receptors saturate - first pass set it so that it is best for lateral inhibition.
mitral_granule_NMDA_KMg_A = 1.0/0.1 #1.0/0.33 # mM
mitral_granule_NMDA_KMg_B = 1.0/73.0 #1.0/60.0 # V
mitral_granule_NMDA_MgConc = MG_CONC
## Choose between using a short-term plastic synapse or a non-plastic synapse.
GABA_plastic = False
GABA_depression_factor = 0.8 # should be 0.5 from Murthy's 2005 paper but aggregated synapses (?)
GABA_recovery_time = 6.0 # seconds # From Venki Murthy's 2005 paper.
granule_mitral_GABA_Ek = -0.078 # Volts
#### averaged inhibitory synapse:
#granule_mitral_GABA_Gbar = 0.6e-9 # Siemens
#granule_mitral_GABA_tau1 = 50e-3 # averaged IPSP from Urban and Sakmann 2002 #1e-3 # seconds # Davison etal 2003 assume instantaneous rise but write that 4 ms is experimental, but is this for AMPA or GABA!
#granule_mitral_GABA_tau2 = 75e-3 # averaged IPSP from Urban and Saknann 2002 #10e-3 # seconds # roughly from fig 2A of Isaacson and Strowbridge 1998.
#### unitary inhibitory synapse:
## below are imported from synapseConstantsMinimal.py
#granule_mitral_GABA_Gbar = 10e-9#15e-9#2e-9 # Siemens
#self_mitral_GABA_Gbar = 50e-12 # Siemens
granule_mitral_GABA_tau1 = 1e-3#3e-3 # roughly from fig 1C of Schoppa et al 1998
granule_mitral_GABA_tau2 = 20e-3#1e-3#20e-3 # from text and fig 1C of Schoppa et al 1998
|
#!/usr/bin/env python
# encoding: utf-8
# author: ryan_wu
# email: imitator_wu@outlook.com
# date: 2020-11-29 17:00:46
import os
import sys
import pickle
import argparse
import functools
import numpy as np
from sklearn import svm
from sklearn.model_selection import cross_val_score
PWD = os.path.dirname(os.path.realpath(__file__))
bio_dataset = ['MUTAG', 'PROTEINS', 'NCI1', 'PTC']
social_dataset = ['REDDITBINARY', 'IMDBBINARY', 'IMDBMULTI', 'COLLAB', 'REDDITMULTI5K']
def tuple_int_sort(a, b):
if type(a) == type(b):
return a > b
elif isinstance(a, tuple) and isinstance(b, int):
return 0
elif isinstance(a, int) and isinstance(b, tuple):
return 1
def tree_tags(graph, depth, tagV):
global global_tags
for i, n in graph['tree'].items():
graphID = n.get('graphID', n['ID'])
if n['depth'] != depth:
continue
if n['depth'] == 0:
if tagV == 0: # tag0: 叶子结点直接为0
n['tag0'] = 0
elif tagV == 1: # tag1: 使用文件里本身tag
n['tag1'] = int(graph['G'].nodes[graphID].get('tag', 0))
elif tagV == 2: # tag2: 使用节点的度
n['tag2'] = graph['G'].degree[graphID]
elif tagV == 3:
n['tag3'] = (int(graph['G'].nodes[graphID].get('tag', 0)), graph['G'].degree[graphID])
continue
child_tags = [graph['tree'][c]['tag%s' % tagV] for c in n['children']]
child_tags.sort()
child_tags = ','.join(map(str, child_tags))
if child_tags not in global_tags:
global_tags[child_tags] = len(global_tags)
n['tag%s' % tagV] = global_tags[child_tags]
global_tags = {0:0}
def load_data(dataset, tree_depth):
with open('trees/%s_%s.pickle' % (dataset, tree_depth), 'rb') as fp:
g_list = pickle.load(fp)
global global_tags
global_tags = {0:0}
tagV = 2 if dataset in social_dataset else 3
[tree_tags(g, k, tagV) for k in range(tree_depth+1) for g in g_list]
all_tags = set([n['tag%s' % tagV] for g in g_list for i, n in g['tree'].items()])
all_tags = list(all_tags)
if tagV == 3:
all_tags.sort(key=functools.cmp_to_key(tuple_int_sort))
else:
all_tags.sort()
xs = []
ys = []
for g in g_list:
ys.append(g['label'])
tags = [n['tag%s' % tagV] for i, n in g['tree'].items()]
x = [tags.count(t) for t in all_tags]
xs.append(x)
return xs, ys
def pool_crossV(input_):
xs, ys, c, gamma = input_
clf = svm.SVC(C=c, gamma=gamma)
scores = cross_val_score(clf, np.array(xs), np.array(ys), cv=10, scoring='accuracy')
return (c, scores)
def gridSearch(dataset, tree_depth):
print(dataset, tree_depth)
xs, ys = load_data(dataset, tree_depth)
cs = [2**i for i in range(-5, 15)]
gamma = 'auto' if dataset in ['IMDBBINARY', 'IMDBMULTI', 'REDDITBINARY', 'REDDITMULTI5K'] else 'scale'
max_acc = 0
for r in range(10):
c_accs = []
for c in cs:
c_acc = pool_crossV((xs, ys, c, gamma))
c_accs.append(c_acc)
c_accs = list(c_accs)
c_accs.sort(key=lambda ca: np.array(ca[1]).mean(), reverse=True)
max_c, accs = c_accs[0]
acc_mean = np.array(accs).mean()
if acc_mean <= max_acc:
break
max_acc = max(max_acc, acc_mean)
print(r, '%.6f' % max_c, '%.6f' % acc_mean, '[%s]' % ', '.join(['%.4f' % a for a in accs]))
sys.stdout.flush()
max_c_i = cs.index(max_c)
max_c_left = cs[max(max_c_i-1, 0)]
max_c_right = cs[min(max_c_i+1, len(cs)-1)]
cs = [max_c_left+(max_c_right-max_c_left)/20*i for i in range(20)]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tree kernel with SVM for whole-tree classification')
parser.add_argument('-d', '--dataset', type=str, default=None,
help='name of dataset (default: None, search all datasets)')
parser.add_argument('-k', '--tree_depth', type=int, default=None, choices=[2, 3, 4, 5],
help='the depth of coding tree (default: None, search all depthes)')
args = parser.parse_args()
print(args)
if args.dataset is not None and args.tree_depth is not None:
gridSearch(args.dataset, args.tree_depth)
elif args.dataset is not None and args.tree_depth is None:
for k in [2, 3, 4, 5]:
gridSearch(args.dataset, k)
elif args.dataset is None and args.tree_depth is not None:
for d in social_dataset + bio_dataset:
gridSearch(d, args.tree_depth)
else:
for d in social_dataset + bio_dataset:
for k in [2, 3, 4, 5]:
gridSearch(d, k)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from delivery import deli, tasks
urlpatterns = [
url(r'^$', deli.delivery_list, name='delivery'),
url(r'^add/$', deli.delivery_add, name='delivery_add'),
url(r'^list/$', deli.delivery_list, name='delivery_list'),
url(r'^status/(?P<project_id>\d+)/$', deli.status, name='delivery_status'),
url(r'^edit/(?P<project_id>\d+)/$', deli.delivery_edit, name='delivery_edit'),
url(r'^log/(?P<project_id>\d+)/$', deli.log, name='delivery_log'),
url(r'^log2/(?P<project_id>\d+)/$', deli.log2, name='delivery_log2'),
url(r'^log/delete/$', deli.log_del, name='log_del'),
url(r'^log/delall/$', deli.log_delall, name='log_delall'),
url(r'^logs/history/(?P<project_id>\d+)/$', deli.logs_history, name='logs_history'),
url(r'^get/logs/(?P<project_id>\d+)/(?P<logname>.+)/$', deli.get_log, name='get_log'),
url(r'^deploy/(?P<project_id>\d+)/$', deli.delivery_deploy, name='delivery_deploy'),
url(r'^taskstop/(?P<project_id>\d+)/$', deli.task_stop, name='delivery_taskstop'),
url(r'^delete/$', deli.delivery_del, name='delivery_del'),
]
|
# Application condition
waitFor.id == max_used_id and not cur_node_is_processed
# Reaction
port = "NXT_PORT_S" + waitFor.Port
color_nxt_type = ""
color_str = waitFor.Color
if color_str == "Красный":
color_nxt_type = "NXT_COLOR_RED"
elif color_str == "Зелёный":
color_nxt_type = "NXT_COLOR_GREEN"
elif color_str == "Синий":
color_nxt_type = "NXT_COLOR_BLUE"
elif color_str == "Чёрный":
color_nxt_type = "NXT_COLOR_BLACK"
elif color_str == "Жёлтый":
color_nxt_type = "NXT_COLOR_YELLOW"
elif color_str == "Белый":
color_nxt_type = "NXT_COLOR_WHITE"
wait_for_color_block_code = "while (ecrobot_get_nxtcolorsensor_id(" + port + ") != " + color_nxt_type + ") {}\n"
wait_init_code = "ecrobot_init_nxtcolorsensor(" + port + ", " + color_nxt_type + ");\n"
wait_terminate_code = "ecrobot_init_nxtcolorsensor(" + port + ", " + color_nxt_type + ");\n"
if wait_init_code not in init_code:
init_code.append(wait_init_code)
terminate_code.append(wait_terminate_code)
code.append([wait_for_color_block_code])
id_to_pos_in_code[waitFor.id] = len(code) - 1
cur_node_is_processed = True
|
# Generated by Django 1.9.2 on 2016-03-01 09:56
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="TestModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", tinymce.models.HTMLField(verbose_name="HTML Content")),
],
)
]
|
# a = [17, 37, 907, 19, 23, 29, 653, 41, 13]
# start = 1000186
# while all(start % n != 0 for n in a):
# start += 1
# print(start, [start % n == 0 for n in a])
# print((1000194-1000186)*13)
with open("data/day13.txt") as f:
lines = f.readlines()
print(lines)
lines = lines[1]
d = {int(num):i for i, num in enumerate(lines.split(',')) if num !="x"}
print(d)
|
'''
EBA & EIPOA rendering extensions
(c) Copyright 2015 Acsone S. A., All rights reserved.
'''
from arelle.ModelValue import qname
import arelle.EbaUtil as EbaUtil
qnFindFilingIndicators = qname("{http://www.eurofiling.info/xbrl/ext/filing-indicators}find:fIndicators")
# Note: Load and Update filing indicators are spread in two different plugins so that the load can be used in non-GUI mode
def checkUpdateFilingIndicator(roledefinition, modelXbrl):
'''
:type modelXbrl: ModelXbrl
:type roledefinition: string
:rtype (boolean)
'''
#check whether the current table has a None filing indicator and if so, set it to True
for tableLabel, filingCode in modelXbrl.filingCodeByTableLabel.items():
if roledefinition.startswith(filingCode):
if not filingCode in modelXbrl.filingIndicatorByFilingCode or modelXbrl.filingIndicatorByFilingCode[filingCode] == None:
filingIndicator = True
modelXbrl.filingIndicatorByFilingCode[filingCode] = filingIndicator
filingIndicatorDisplay = str(filingIndicator)
EbaUtil.updateFilingIndicator(modelXbrl, filingCode, filingIndicator)
for tableLabel, filingCode in modelXbrl.filingCodeByTableLabel.items():
if roledefinition.startswith(filingCode):
treeRowId = modelXbrl.treeRowByTableLabel[tableLabel]
modelXbrl.indexTableTreeView.set(treeRowId, 0, filingIndicatorDisplay)
# continue looping since we may have more than one table per filing indicator
return True
def setFiling(viewtree, modelXbrl, filingIndicator):
'''
:type viewtree: ViewTree
:type modelXbrl: ModelXbrl
:type filingIndicator: boolean
:rtype boolean
'''
# Set filing indicator in second row of tables index
# The indicator is a tri-state value
item = viewtree.treeView.item(viewtree.menuRow)
label = item.get('text')
if not label in modelXbrl.filingCodeByTableLabel:
return
filingIndicatorCode = modelXbrl.filingCodeByTableLabel[label]
if not filingIndicatorCode in modelXbrl.filingIndicatorByFilingCode:
return
filingIndicatorDisplay = getFilingIndicatorDisplay(filingIndicator)
# maintain the indicator value in the instance model
modelXbrl.filingIndicatorByFilingCode[filingIndicatorCode] = filingIndicator
for tableLabel, fcode in modelXbrl.filingCodeByTableLabel.items():
if fcode == filingIndicatorCode:
treeRowId = modelXbrl.treeRowByTableLabel[tableLabel]
viewtree.treeView.set(treeRowId, 0, filingIndicatorDisplay)
EbaUtil.updateFilingIndicator(modelXbrl, filingIndicatorCode, filingIndicator)
return True
def renderConcept(isModelTable, concept, conceptText, viewRelationshipSet, modelXbrl, conceptNode):
'''
:type isModelTable: ViewTree
:type concept: Concept
:type conceptText: string
:type viewRelationshipSet: ViewRelationshipSet
:type modelXbrl: ModelXbrl
:type conceptNode: ViewRelationshipSet
:rtype boolean
'''
if not isModelTable:
return True
# in case we are rendering a table in a EBA document instance,
# also prepare the filing indicator
# Note: several table views can have the same filing indicator
filingIndicator = None
defaultENLanguage = "en"
filingIndicatorCodeRole = "http://www.eurofiling.info/xbrl/role/filing-indicator-code";
filingIndicatorCode = concept.genLabel(role=filingIndicatorCodeRole, lang=defaultENLanguage)
if viewRelationshipSet.isEbaTableIndex:
isModelTable = True
'''
It is often desirable to see the table code, particularly for development.
So, until we add a dedicated option, we always prefix the normal label by the
table code.
Note: The verbose label could possibly be used since it generally contains
the table code as a prefix ("http://www.xbrl.org/2008/role/verboseLabel")
However, it is often too verbose to be used in the table index.
Note: For some taxonomies (e.g. Corep), the normal label already contains the
table code, so avoid dup's
'''
prefix = concept.genLabel(lang=viewRelationshipSet.lang, strip=True, linkroleHint="http://www.eurofiling.info/xbrl/role/rc-code")
if prefix:
if not(conceptText.startswith(prefix)):
conceptText = prefix + " " + conceptText
viewRelationshipSet.treeView.item(conceptNode, text=conceptText)
# show filing indicator
if not filingIndicatorCode in modelXbrl.filingIndicatorByFilingCode:
filingIndicator = None
modelXbrl.filingIndicatorByFilingCode[filingIndicatorCode] = filingIndicator
else:
filingIndicator = modelXbrl.filingIndicatorByFilingCode[filingIndicatorCode]
modelXbrl.filingCodeByTableLabel[conceptText] = filingIndicatorCode
viewRelationshipSet.treeView.set(conceptNode, 0, getFilingIndicatorDisplay(filingIndicator))
modelXbrl.treeRowByTableLabel[conceptText] = conceptNode
modelXbrl.indexTableTreeView = viewRelationshipSet.treeView
def getFilingIndicatorDisplay(filingIndicator):
if filingIndicator == None:
filingIndicatorDisplay = ""
else:
filingIndicatorDisplay = "Yes" if filingIndicator else "No"
return filingIndicatorDisplay
def saveNewFileFromGUI(cntlrWinMain):
'''
:type cntlrWinMain: CntlrWinMain
:rtype boolean
'''
# Note: when creating a new instance with the "new EBA file" menu, the model
# strangely appears not to be based on an INSTANCE document model type
# => Force a save file immediately so that the user won't forget anymore (as indicated in AREBA WIKI tricks and tips)
saved = cntlrWinMain.fileSave()
return (True, saved)
__pluginInfo__ = {
'name': 'EBA Rendering extensions',
'version': '1.1',
'description': '''This plugin contains GUI extensions for EBA and EIOPA (update of filing indicators
or forced saving of XBRL instances at creation time)''',
'license': 'Apache-2',
'author': 'Acsone S. A.',
'copyright': '(c) Copyright 2015 Acsone S. A.',
# classes of mount points (required)
'CntlrWinMain.Rendering.CheckUpdateFilingIndicator': checkUpdateFilingIndicator,
'CntlrWinMain.Rendering.SetFilingIndicator': setFiling,
'CntlrWinMain.Rendering.RenderConcept': renderConcept,
'CntlrWinMain.Rendering.SaveNewFileFromGUI': saveNewFileFromGUI
}
|
import pandas as pd
import os
import shutil
import time
from . import pdf_export
import csv
def tf_history_convert(history):
lossData = pd.DataFrame(history.history)
def torch_history_convert(history):
pass
def df_history_to_report(lossData,model_path,model_name,history,start,model):
if os.path.exists(model_path+"/"+model_name+"/Quality Control"):
shutil.rmtree(model_path+"/"+model_name+"/Quality Control")
os.makedirs(model_path+"/"+model_name+"/Quality Control")
# The training evaluation.csv is saved (overwrites the Files if needed).
lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'
with open(lossDataCSVpath, 'w') as f:
writer = csv.writer(f)
writer.writerow(['loss','val_loss', 'learning rate'])
for i in range(len(history.history['loss'])):
writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
def tf_model_export(model,model_name,model_description,patch_size,X_val,Use_pretrained_model,authors=["You"]):
model.export_TF(name=model_name,
description=model_description,
authors=authors,
test_img=X_val[0,...,0], axes='YX',
patch_shape=(patch_size, patch_size))
print("Your model has been sucessfully exported and can now also be used in the CSBdeep Fiji plugin")
pdf_export(trained = True, pretrained_model = Use_pretrained_model)
def torch_model_export():
pass
|
import discord, sqlite3
from discord.ext import commands
import modules.member_helper as helper
import modules.sql_init as sqlinit
sql = sqlinit.SQLInit()
class Quote():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def quote(self, *, author : str = "Mitt Romney"):
try:
sql.cur.execute("select author, quote from quotes where author = ? order by random() limit 1", [author])
results = sql.cur.fetchone()
if results is not None:
try:
author = results[0]
quote = results[1]
embed = discord.Embed(description=quote, color=5025616)
embed.set_author(name=author)
await self.bot.say(embed=embed)
except:
embed = discord.Embed(title="Error", description="An unexpected error has occured. 😔", color=12000284)
await self.bot.say(embed=embed)
else:
embed = discord.Embed(title="Error", description="There is no quote for {0}. 🔍".format(author), color=12000284)
await self.bot.say(embed=embed)
except:
embed = discord.Embed(title="Error", description="An unexpected error has occured. 😔", color=12000284)
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(Quote(bot))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import pytest
import hgvs.dataproviders.uta
import hgvs.location
import hgvs.parser
from hgvs.exceptions import HGVSError, HGVSDataNotAvailableError, HGVSInvalidIntervalError
from hgvs.alignmentmapper import AlignmentMapper
from hgvs.enums import Datum
from support import CACHE
@pytest.mark.quick
class Test_AlignmentMapper(unittest.TestCase):
ref = "GRCh37.p10"
@classmethod
def setUp(cls):
cls.hdp = hgvs.dataproviders.uta.connect(
mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
cls.parser = hgvs.parser.Parser()
def test_alignmentmapper_failures(self):
with self.assertRaises(HGVSDataNotAvailableError):
AlignmentMapper(self.hdp, tx_ac="bogus", alt_ac="NM_033089.6", alt_aln_method="splign")
with self.assertRaises(HGVSDataNotAvailableError):
AlignmentMapper(
self.hdp, tx_ac="bogus", alt_ac="NM_033089.6", alt_aln_method="transcript")
with self.assertRaises(HGVSDataNotAvailableError):
AlignmentMapper(self.hdp, tx_ac="NM_033089.6", alt_ac="bogus", alt_aln_method="splign")
with self.assertRaises(HGVSDataNotAvailableError):
AlignmentMapper(
self.hdp, tx_ac="NM_000051.3", alt_ac="NC_000011.9", alt_aln_method="bogus")
with self.assertRaises(HGVSInvalidIntervalError):
AlignmentMapper(self.hdp, 'NM_000348.3', 'NC_000002.11', 'splign').n_to_g(
self.parser.parse_n_interval("-1"))
with self.assertRaises(HGVSInvalidIntervalError):
AlignmentMapper(self.hdp, 'NM_000348.3', 'NC_000002.11', 'splign').n_to_c(
self.parser.parse_n_interval("-1"))
with self.assertRaises(HGVSInvalidIntervalError):
AlignmentMapper(self.hdp, 'NM_000348.3', 'NC_000002.11', 'splign').c_to_n(
self.parser.parse_c_interval("99999"))
def test_alignmentmapper_AlignmentMapper_LCE3C_uncertain(self):
"""Use NM_178434.2 tests to test mapping with uncertain positions"""
tx_ac = "NM_178434.2"
alt_ac = "NC_000001.10"
tm = AlignmentMapper(self.hdp, tx_ac, alt_ac, alt_aln_method="splign")
parser = hgvs.parser.Parser()
test_cases = [
# ? is not yet supported
# {"g": parser.parse_g_interval("(?_152573139)"), "n": parser.parse_n_interval("(?_2)"), "c": parser.parse_c_interval("(?_-69)")},
# {"g": parser.parse_g_interval("(152573138_?)"), "n": parser.parse_n_interval("(1_?)"), "c": parser.parse_c_interval("(-70_?)")},
]
self.run_cases(tm, test_cases)
def test_alignmentmapper_AlignmentMapper_LCE3C(self):
"""NM_178434.2: LCE3C single exon, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = "NM_178434.2"
alt_ac = "NC_000001.10"
tm = AlignmentMapper(self.hdp, tx_ac, alt_ac, alt_aln_method="splign")
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{
"g": parser.parse_g_interval("152573138"),
"n": parser.parse_n_interval("1"),
"c": parser.parse_c_interval("-70")
},
{
"g": parser.parse_g_interval("152573140"),
"n": parser.parse_n_interval("3"),
"c": parser.parse_c_interval("-68")
},
# cds
{
"g": parser.parse_g_interval("152573207"),
"n": parser.parse_n_interval("70"),
"c": parser.parse_c_interval("-1")
},
{
"g": parser.parse_g_interval("152573208"),
"n": parser.parse_n_interval("71"),
"c": parser.parse_c_interval("1")
},
# 3'
{
"g": parser.parse_g_interval("152573492"),
"n": parser.parse_n_interval("355"),
"c": parser.parse_c_interval("285")
},
{
"g": parser.parse_g_interval("152573493"),
"n": parser.parse_n_interval("356"),
"c": parser.parse_c_interval("*1")
},
{
"g": parser.parse_g_interval("152573560"),
"n": parser.parse_n_interval("423"),
"c": parser.parse_c_interval("*68")
},
{
"g": parser.parse_g_interval("152573562"),
"n": parser.parse_n_interval("425"),
"c": parser.parse_c_interval("*70")
},
]
self.run_cases(tm, test_cases)
def test_alignmentmapper_AlignmentMapper_HIST3H2A(self):
"""NM_033445.2: LCE3C single exon, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = "NM_033445.2"
alt_ac = "NC_000001.10"
tm = AlignmentMapper(self.hdp, tx_ac, alt_ac, alt_aln_method="splign")
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{
"g": parser.parse_g_interval("228645560"),
"n": parser.parse_n_interval("1"),
"c": parser.parse_c_interval("-42")
},
{
"g": parser.parse_g_interval("228645558"),
"n": parser.parse_n_interval("3"),
"c": parser.parse_c_interval("-40")
},
# cds
{
"g": parser.parse_g_interval("228645519"),
"n": parser.parse_n_interval("42"),
"c": parser.parse_c_interval("-1")
},
{
"g": parser.parse_g_interval("228645518"),
"n": parser.parse_n_interval("43"),
"c": parser.parse_c_interval("1")
},
# 5'
{
"g": parser.parse_g_interval("228645126"),
"n": parser.parse_n_interval("435"),
"c": parser.parse_c_interval("393")
},
{
"g": parser.parse_g_interval("228645125"),
"n": parser.parse_n_interval("436"),
"c": parser.parse_c_interval("*1")
},
{
"g": parser.parse_g_interval("228645124"),
"n": parser.parse_n_interval("437"),
"c": parser.parse_c_interval("*2")
},
{
"g": parser.parse_g_interval("228645065"),
"n": parser.parse_n_interval("496"),
"c": parser.parse_c_interval("*61")
},
]
self.run_cases(tm, test_cases)
def test_alignmentmapper_AlignmentMapper_LCE2B(self):
"""NM_014357.4: LCE2B, two exons, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = "NM_014357.4"
alt_ac = "NC_000001.10"
tm = AlignmentMapper(self.hdp, tx_ac, alt_ac, alt_aln_method="splign")
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{
"g": parser.parse_g_interval("152658599"),
"n": parser.parse_n_interval("1"),
"c": parser.parse_c_interval("-54")
},
{
"g": parser.parse_g_interval("152658601"),
"n": parser.parse_n_interval("3"),
"c": parser.parse_c_interval("-52")
},
# cds
{
"g": parser.parse_g_interval("152659319"),
"n": parser.parse_n_interval("54"),
"c": parser.parse_c_interval("-1")
},
{
"g": parser.parse_g_interval("152659320"),
"n": parser.parse_n_interval("55"),
"c": parser.parse_c_interval("1")
},
# around end of exon 1
{
"g": parser.parse_g_interval("152658632"),
"n": parser.parse_n_interval("34"),
"c": parser.parse_c_interval("-21")
},
{
"g": parser.parse_g_interval("152658633"),
"n": parser.parse_n_interval("34+1"),
"c": parser.parse_c_interval("-21+1")
},
# span
{
"g": parser.parse_g_interval("152658633_152659299"),
"n": parser.parse_n_interval("34+1_35-1"),
"c": parser.parse_c_interval("-21+1_-20-1")
},
# around beginning of exon 2
{
"g": parser.parse_g_interval("152659300"),
"n": parser.parse_n_interval("35"),
"c": parser.parse_c_interval("-20")
},
{
"g": parser.parse_g_interval("152659299"),
"n": parser.parse_n_interval("35-1"),
"c": parser.parse_c_interval("-20-1")
},
# around end of exon 2
{
"g": parser.parse_g_interval("152659652"),
"n": parser.parse_n_interval("387"),
"c": parser.parse_c_interval("333")
},
{
"g": parser.parse_g_interval("152659653"),
"n": parser.parse_n_interval("388"),
"c": parser.parse_c_interval("*1")
},
# span
{
"g": parser.parse_g_interval("152659651_152659654"),
"n": parser.parse_n_interval("386_389"),
"c": parser.parse_c_interval("332_*2")
},
# 3'
{
"g": parser.parse_g_interval("152659877"),
"n": parser.parse_n_interval("612"),
"c": parser.parse_c_interval("*225")
},
]
self.run_cases(tm, test_cases)
def test_alignmentmapper_AlignmentMapper_PTH2(self):
"""NM_178449.3: PTH2, two exons, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = "NM_178449.3"
alt_ac = "NC_000019.9"
tm = AlignmentMapper(self.hdp, tx_ac, alt_ac, alt_aln_method="splign")
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{
"g": parser.parse_g_interval("49926698"),
"n": parser.parse_n_interval("1"),
"c": parser.parse_c_interval("-102")
},
# cds
{
"g": parser.parse_g_interval("49926597"),
"n": parser.parse_n_interval("102"),
"c": parser.parse_c_interval("-1")
},
{
"g": parser.parse_g_interval("49926596"),
"n": parser.parse_n_interval("103"),
"c": parser.parse_c_interval("1")
},
# around end of exon 1
{
"g": parser.parse_g_interval("49926469"),
"n": parser.parse_n_interval("230"),
"c": parser.parse_c_interval("128")
},
{
"g": parser.parse_g_interval("49926468"),
"n": parser.parse_n_interval("230+1"),
"c": parser.parse_c_interval("128+1")
},
# span
{
"g": parser.parse_g_interval("49925901_49926467"),
"n": parser.parse_n_interval("230+2_231-2"),
"c": parser.parse_c_interval("128+2_129-2")
},
# around beginning of exon 2
{
"g": parser.parse_g_interval("49925900"),
"n": parser.parse_n_interval("231-1"),
"c": parser.parse_c_interval("129-1")
},
{
"g": parser.parse_g_interval("49925899"),
"n": parser.parse_n_interval("231"),
"c": parser.parse_c_interval("129")
},
# around end of exon 2
{
"g": parser.parse_g_interval("49925725"),
"n": parser.parse_n_interval("405"),
"c": parser.parse_c_interval("303")
},
{
"g": parser.parse_g_interval("49925724"),
"n": parser.parse_n_interval("406"),
"c": parser.parse_c_interval("*1")
},
{
"g": parser.parse_g_interval("49925671"),
"n": parser.parse_n_interval("459"),
"c": parser.parse_c_interval("*54")
},
]
self.run_cases(tm, test_cases)
def run_cases(self, tm, test_cases):
for test_case in test_cases:
self.assertEqual(tm.g_to_n(test_case["g"]), test_case["n"])
self.assertEqual(tm.n_to_g(test_case["n"]), test_case["g"])
self.assertEqual(tm.n_to_c(test_case["n"]), test_case["c"])
self.assertEqual(tm.c_to_n(test_case["c"]), test_case["n"])
self.assertEqual(tm.g_to_c(test_case["g"]), test_case["c"])
self.assertEqual(tm.c_to_g(test_case["c"]), test_case["g"])
if __name__ == "__main__":
unittest.main()
# TODO: Reintegrate older tests, especially those with indels
# harder tests ###
#def test_alignmentmapper_AlignmentMapper_1_ZCCHC3(self):
# """
# reece=> select * from uta.tx_info where ac="NM_033089.6";
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# --------+--------+-------------+-------------+-----------+---------------------------------------+---------
# ZCCHC3 | 1 | NM_033089.6 | 24 | 1236 | zinc finger, CCHC domain containing 3 |
#
# reece=> select * from uta.tx_exons where ac="NM_033089.6";
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+---------+-------------+------------------------
# NM_033089.6 | 1 | 1 | 0 | 2759 | GRCh37.p10 | 278203 | 280965 | 484M3D2275M | GGAGGATGCTGGGAAGGAGGTAA
# """
# # http://tinyurl.com/mattx8u
# #
# # Around the deletion
# # http://tinyurl.com/jwt3txg
# # 687 690
# # C | C G G | C
# # \___ ___/
# # C | C
# # 484
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
# ac = "NM_033089.6"
# tm = AlignmentMapper(self.hdp, ac, self.ref)
# cds = 24 + 1 # hgvs
# # gs, ge = genomic start/end; rs,re = rna start/end; cs, ce = cdna start/end; so, eo = start offset/end offset
# test_cases = [
# {"gs": 278204, "ge": 278204, "rs": 1, "re": 1, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 1-cds, "ce": 1-cds},
# {"gs": 278214, "ge": 278214, "rs": 11, "re": 11, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 11-cds, "ce": 11-cds},
# {"gs": 278204, "ge": 278214, "rs": 1, "re": 11, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 1-cds, "ce": 11-cds},
#
# # around cds (cds can"t be zero)
# {"gs": 278227, "ge": 278227, "rs": 24, "re": 24, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 24-cds, "ce": 24-cds},
#
# # beyond cds add 1 due to hgvs
# {"gs": 278228, "ge": 278228, "rs": 25, "re": 25, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 25-cds+1, "ce": 25-cds+1},
# {"gs": 278229, "ge": 278229, "rs": 26, "re": 26, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 26-cds+1, "ce": 26-cds+1},
# {"gs": 280966, "ge": 280966, "rs": 2760, "re": 2760, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 2760-cds+1, "ce": 2760-cds+1},
# {"gs": 278687, "ge": 278687, "rs": 484, "re": 484, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 484-cds+1, "ce": 484-cds+1},
# {"gs": 278687, "ge": 278688, "rs": 484, "re": 485, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 484-cds+1, "ce": 485-cds+1},
# {"gs": 278688, "ge":278691, "rs": 485, "re": 485, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 485-cds+1, "ce": 485-cds+1},
#
# # around cds_start (24) and cds_end (1236), mindful of *coding* del (3D)
# {"gs": 278204+24, "ge": 278204+1236, "rs": 25, "re": 1237-3, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 25-cds+1, "ce": 1237-cds-3+1},
# {"gs": 280956, "ge": 280966, "rs": 2750, "re": 2760, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 2750-cds+1, "ce': 2760-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
#def test_alignmentmapper_AlignmentMapper_2_MCL1(self):
# """
# reece=> select * from uta.tx_info where ac="NM_182763.2";
# gene | strand | ac | cds_start_i | cds_end_i | descr |
# ------+--------+-------------+-------------+-----------+-------------------------------------------------+----------------
# MCL1 | -1 | NM_182763.2 | 208 | 1024 | myeloid cell leukemia sequence 1 (BCL2-related) | This gene encod
#
# reece=> select * from uta.tx_exons where ac="NM_182763.2";
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+-----------+--------------+---------------------
# NM_182763.2 | 1 | 1b | 0 | 896 | GRCh37.p10 | 150551318 | 150552214 | 896M |
# NM_182763.2 | 2 | 3 | 896 | 3841 | GRCh37.p10 | 150547026 | 150549967 | 1077M4I1864M | GATGGGTTTGTGGAGTTCTT
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = "NM_182763.2"
# tm = AlignmentMapper(self.hdp, ac, self.ref)
# cds = 208 + 1 # hgvs
# test_cases = [
# {"gs": 150552215, "ge": 150552215, "rs": 1, "re": 1, "so": 0, "eo": 0, "d": Datum.SEQ_START , "cs": 1-cds, "ce": 1-cds},
# {"gs": 150552214, "ge": 150552214, "rs": 2, "re": 2, "so": 0, "eo": 0, "d": Datum.SEQ_START , "cs": 2-cds, "ce": 2-cds},
#
# # beyond cds add 1 due to hgvs
# {"gs": 150552007, "ge": 150552007, "rs": 209, "re": 209, "so": 0, "eo": 0, "d": Datum.SEQ_START , "cs": 209-cds+1, "ce": 209-cds+1},
# {"gs": 150547027, "ge": 150547027, "rs": 3842, "re": 3842, "so": 0, "eo": 0, "d": Datum.SEQ_START , "cs": 3842-cds+1, "ce": 3842-cds+1},
#
# #{"gs": 150549968, "ge": 150549968, "rs": 897, "re": 897, "so": 0, "eo": 0, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
# {"gs": 150551318, "ge": 150551318, "rs": 897, "re": 897, "so": 1, "eo": 1, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
# {"gs": 150551318, "ge": 150551319, "rs": 897, "re": 897, "so": 1, "eo": 0, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
# {"gs": 150551317, "ge": 150551318, "rs": 897, "re": 897, "so": 2, "eo": 1, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
# {"gs": 150549968, "ge": 150549969, "rs": 897, "re": 897, "so": 0, "eo": -1, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
# {"gs": 150549969, "ge": 150549970, "rs": 897, "re": 897, "so": -1, "eo": -2, "d": Datum.SEQ_START , "cs": 897-cds+1, "ce": 897-cds+1},
#
# # exon 2, 4nt insertion ~ r.2760
# # See http://tinyurl.com/mwegybw
# # The coords of this indel via NW alignment differ from those at NCBI, but are the same canonicalized
# # variant. Nothing to do about that short of running Splign ourselves. Test a few examples.
# {"gs": 150548892, "ge": 150548892, "rs": 1973, "re": 1973, "so": 0, "eo":0, "d": Datum.SEQ_START , "cs": 1973-cds+1, "ce": 1973-cds+1},
# #? {"gs": 150548891, "ge": 150548892, "rs": 1972, "re": 1973, "so": 0, "eo":0, "d": Datum.SEQ_START , "cs": 1972-cds+1, "ce": 1973-cds+1},
# {"gs": 150548890, "ge": 150548892, "rs": 1973, "re": 1979, "so": 0, "eo":0, "d": Datum.SEQ_START , "cs": 1973-cds+1, "ce": 1979-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
# ## exon 2, 4nt insertion ~ r.2760
# ## See http://tinyurl.com/mwegybw
# ## The coords of this indel via NW alignment differ from those at
# ## NCBI, but are the same canonicalized variant. Nothing to do
# ## about that short of running Splign ourselves.
# #self.assertEqual(tm.n_to_g(1972, 1972), (150548891, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1973), (150548890, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1974), (150548890, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1975), (150548890, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1976), (150548890, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1977), (150548890, 150548891))
# #self.assertEqual(tm.n_to_g(1972, 1978), (150548889, 150548891))
# #
# #self.assertEqual(tm.g_to_n(150548891, 150548891), (1972, 1972, 0, 0))
# #self.assertEqual(tm.g_to_n(150548890, 150548891), (1972, 1973, 0, 0))
# #self.assertEqual(tm.g_to_n(150548889, 150548891), (1972, 1978, 0, 0))
# #
# ## around cds_start (208) and cds_end (1024), mindful of *non-coding* ins (4I)
# ## i.e., we *don't* need to account for the 4nt insertion here
# #self.assertEqual(tm.n_to_c(208, 1024), (0, 1024 - 208, 0, 0))
# #self.assertEqual(tm.c_to_n(0, 1024 - 208), (208, 1024, 0, 0))
# #self.assertEqual(tm.g_to_c(150552214 - 208, 150552214 - 208), (0, 0, 0, 0))
# #self.assertEqual(tm.c_to_g(0, 0), (150552214 - 208, 150552214 - 208))
# ## cds_end is in 2nd exon
# #self.assertEqual(tm.g_to_c(150549967 - (1024 - 896), 150549967 - (1024 - 896)), (1024 - 208, 1024 - 208, 0, 0))
# #self.assertEqual(tm.c_to_g(1024 - 208, 1024 - 208), (150549967 - (1024 - 896), 150549967 - (1024 - 896)))
#
#
#def test_alignmentmapper_AlignmentMapper_3_IFI27L1(self):
# """
# #reece=> select * from uta.tx_info where ac="NM_145249.2";
# # gene | chr | strand | ac | cds_start_i | cds_end_i | descr | summary
# #---------+-----+--------+-------------+-------------+-----------+-----------------------------------------------+---------
# # IFI27L1 | 14 | 1 | NM_145249.2 | 254 | 569 | interferon, alpha-inducible protein 27-like 1 |
# #(1 row)
# # reece=>select * from uta.tx_exons where ac = "NM_145249.2";
# #
# # ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | g_cigar | g_seq_a | t_seq_a
# # -------------+-----+------+-----------+---------+------------+-----------+----------+---------+---------+---------
# # NM_145249.2 | 1 | 1 | 0 | 157 | GRCh37.p10 | 94547638 | 94547795 | 157M | |
# # NM_145249.2 | 2 | 2a | 157 | 282 | GRCh37.p10 | 94563186 | 94563311 | 125M | |
# # NM_145249.2 | 3 | 3 | 282 | 315 | GRCh37.p10 | 94567084 | 94567117 | 33M | |
# # NM_145249.2 | 4 | 4 | 315 | 477 | GRCh37.p10 | 94568159 | 94568321 | 162M | |
# # NM_145249.2 | 5 | 5 | 477 | 715 | GRCh37.p10 | 94568822 | 94569060 | 238M | |
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = "NM_145249.2"
# tm = AlignmentMapper(self.hdp, ac, self.ref)
# cds = 254 + 1 # hgvs
# test_cases = [
# #{"gs": 94547639, "ge": 94547639, "rs": 1, "re": 1, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 1-cds, "ce": 1-cds},
# #{"gs": 94547796, "ge": 94547796, "rs": 158, "re": 158, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 158-cds, "ce": 158-cds},
# #{"gs": 94563185, "ge": 94563185, "rs": 159, "re": 159, "so": -2, "eo": -2, "d": Datum.SEQ_START, "cs": 159-cds, "ce": 159-cds},
#
# # beyond cds add 1 due to hgvs
# #{"gs": 94567118, "ge": 94567120, "rs": 316, "re": 316, "so": 0, "eo": 2, "d": Datum.SEQ_START, "cs": 316-cds+1, "ce": 316-cds+1},
# {"gs": 94567115, "ge": 94567118, "rs": 313, "re": 316, "so": 0, "eo": 0, "d": Datum.SEQ_START, "cs": 313-cds+1, "ce": 316-cds+1},
#
# # intron in the middle between exon 1 and 2
# #{"gs": 94555500, "ge": 94555501, "rs": 157, "re": 158, "so": 7686, "eo": -7685, "d": Datum.SEQ_START, "cs": 157-cds+1, "ce": 158-cds+1},
# #{"gs": 94555481, "ge": 94555501, "rs": 157, "re": 158, "so": 7686, "eo": -7685, "d": Datum.SEQ_START, "cs": 157-cds+1, "ce": 158-cds+1},
# ]
# self.run_cases(tm, test_cases)
# ANOTHER POSSIBLE TEST CASE ###
# reece=> select * from uta.tx_info where ac = "NM_145171.3";
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# -------+--------+-------------+-------------+-----------+-----------------------------+-----------------------------------
# GPHB5 | -1 | NM_145171.3 | 57 | 450 | glycoprotein hormone beta 5 | GPHB5 is a cystine knot-forming...
#
# reece=> select * from uta.tx_exons where ac = "NM_145171.3" order by g_start_i;
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar | g_seq_a
# -------------+-----+------+-----------+---------+------------+-----------+----------+-----------+-------------------------
# NM_145171.3 | 3 | 3 | 261 | 543 | GRCh37.p10 | 63779548 | 63779830 | 282M |
# NM_145171.3 | 2 | 2 | 56 | 261 | GRCh37.p10 | 63784360 | 63784564 | 156M1I48M | CATGAAGCTGGCATTCCTCTT...
# NM_145171.3 | 1 | 1 | 0 | 56 | GRCh37.p10 | 63785537 | 63785593 | 56M |
# def test_alignmentmapper_AlignmentMapper_GPHB5(self):
# ac = "NM_145171.3"
# tm = AlignmentMapper(self.hdp,ac,self.ref)
# pass
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
|
###
Starting with a docstring in this
practice file
"""
a = "Hello, world!"
def print_var(a):
print(a)
print_var(a)
|
from django.contrib.auth.models import Group
from .base import Component
class Groups(Component):
field_name = "groups"
def parse(self):
for name, data in self.raw_data.items():
group = Group.objects.create(name=data["name"])
for username in data["users"]:
user = self.bootstrap.users[username]
user.groups.add(group)
self.data[name] = group
|
from package.puzzle_generator import *
def main():
p1 = Puzzle({
'A': [
Biconditional(
DisjunctiveStatement( # uncertain
IsOfType('B', Knight),
IsOfType('C', Knight),
IsOfType('B', Monk),
IsOfType('C', Monk),
),
IsOfType('E', Knave),
),
IsOfType('A', Monk),
],
'B': [
CountOfTypes(Knight, Knave, operator.eq),
IsSameAs('A', 'B'),
],
'C': [
Biconditional(
IsOfType('C', Monk),
IsSameAs('B', 'D'),
),
ConjunctiveStatement(
IsOfType('A', Knight),
IsOfType('E', Knight),
)
],
'D': [
IsOfType('D', Monk),
IfConnective(
Not(IsOfType('A', Monk)),
IsOfType('B', Knight),
),
],
'E': IfConnective(
IsOfType('D', Knave),
CountOfType(Monk, 2, operator.eq),
),
})
a = AllTheSame()
b = Honesty('A', 'E', operator.gt)
c = IsSameAs('C', 'A')
p2 = Puzzle({
'A': a,
'B': b,
'C': c,
'D': DisjunctiveStatement(
ConjunctiveStatement(a, b),
ConjunctiveStatement(a, c),
ConjunctiveStatement(b, c),
),
'E': IsOfType('E', Knave),
})
p4 = Puzzle({
'A': [
CountOfType(Knight, 2, operator.le),
CountOfType(Knave, 2, operator.lt),
],
'B': [
Honesty('B', 'A', operator.eq),
CountOfType(Knave, 1, operator.ge),
],
'C': [
IsOfType('B', Monk),
DisjunctiveStatement(
IsOfType('D', Monk),
IsOfType('E', Monk),
),
],
'D': Biconditional(
IsOfType('D', Monk),
IsOfType('E', Knave),
),
'E': Biconditional(
IsOfType('E', Monk),
IsOfType('A', Knight),
),
})
p5 = Puzzle({
'A': [
CountOfType(Knight, 3, operator.eq),
IsOfType('B', Knight),
],
'B': [
CountOfType(Monk, 1, operator.ge),
Not(IsOfType('A', Knight)),
],
'C': [
CountOfType(Knave, 0, operator.eq),
CountOfType(Monk, 2, operator.ge),
],
'D': [
ExclusiveOrConnective(
IsOfType('D', Knight),
IsOfType('B', Monk),
),
Honesty('B', 'D', operator.lt),
],
'E': CountOfType(Knave, 1, operator.eq),
'F': CountOfType(Knight, 2, operator.le), # uncertain
})
def remainder_by_2_equals(a, b):
return operator.mod(a, 2) == b
p6 = Puzzle({
'A': ConjunctiveStatement(
IsOfType('B', Knight),
IsOfType('C', Knight),
),
'B': [
CountOfType(Knight, 0, remainder_by_2_equals),
IsOfType('A', Knave),
],
'C': [
Honesty('C', 'A', operator.gt),
Honesty('B', 'A', operator.gt),
],
})
p8 = Puzzle({
'Karen': [
IfConnective(
IsOfType('Thomas', Knave),
Honesty('Karen', 'Perry', operator.gt),
),
Not(IsSameAs('Perry', 'Thomas')),
],
'Perry': [
IfConnective(
CountOfType(Monk, 1, operator.ge),
CountOfType(Knight, 1, remainder_by_2_equals),
),
CountOfTypes(Knave, Knight, operator.gt),
],
'Thomas': IfConnective(
CountOfType(Knave, 0, remainder_by_2_equals),
Not(IsOfType('Thomas', Knave)),
),
})
c1 = IsSameAs('A', 'E')
p9 = Puzzle({
'A': [
Biconditional(
IsOfType('A', Monk),
CountOfType(Monk, 0, remainder_by_2_equals),
),
],
'B': [
Biconditional(
IsOfType('A', Knight),
CountOfType(Knight, 0, remainder_by_2_equals),
),
Honesty('C', 'A', operator.gt),
],
'C': [
c1,
Honesty('A', 'B', operator.gt),
],
'D': [
c1,
IfConnective(
IsOfType('E', Knave),
IsOfType('A', Knave),
),
],
'E': [
Biconditional(
IsOfType('B', Knave),
CountOfType(Knave, 0, remainder_by_2_equals),
),
IfConnective(
IsOfType('A', Knight),
IsOfType('D', Monk),
),
],
})
p13 = Puzzle({
'A': Biconditional(
Honesty('A', 'D', operator.gt),
Honesty('D', 'C', operator.gt),
),
'B': IsOfType('D', Knight),
'C': IfConnective(
Honesty('A', 'C', operator.gt),
CountOfType(Knave, 1, remainder_by_2_equals)
),
'D': ConjunctiveStatement(
Not(IsSameAs('D', 'B')),
Not(IsOfType('B', Monk)),
),
})
p14 = Puzzle({
'Ned': CountOfType(Knight, 0, remainder_by_2_equals),
'Chandler': Honesty('Zoe', 'Chandler', operator.ge),
'Zoe': CountOfType(Knight, 1, remainder_by_2_equals),
'Ewa': Honesty('Ewa', 'Zoe', operator.gt),
})
p18 = Puzzle({
'A': CountOfType(Monk, 0, operator.eq),
'B': [
ConjunctiveStatement(
IfConnective(
IsOfType('B', Knight),
CountOfType(Knight, 1, operator.eq),
),
IfConnective(
IsOfType('B', Monk),
CountOfType(Monk, 1, operator.eq),
),
IfConnective(
IsOfType('B', Knave),
CountOfType(Knave, 1, operator.eq),
),
),
Not(IsOfType('D', Monk)),
],
'C': CountOfType(Knight, 0, operator.eq),
'D': DisjunctiveStatement(
IsOfType('A', Monk),
IsOfType('D', Knave),
)
})
p19 = Puzzle({
'A': [
Honesty('C', 'B', operator.gt),
IfConnective(
Honesty('B', 'A', operator.gt),
IsOfType('B', Monk),
),
Honesty('A', 'C', operator.gt),
],
'B': [
Honesty('B', 'A', operator.gt),
Honesty('A', 'C', operator.gt),
Not(IsOfType('C', Knave)),
],
'C': [
Honesty('A', 'B', operator.gt),
Not(Honesty('B', 'A', operator.gt)),
],
})
p20 = Puzzle({
'A': [
CountOfType(Knave, 2, operator.eq),
Not(IsOfType('B', Knave)),
],
'B': [
CountOfType(Knight, 2, operator.eq),
],
'C': [
Honesty('B', 'A', operator.gt),
IsOfType('A', Knight),
]
})
p22 = Puzzle({
'Deb': IfConnective(
IsOfType('Deb', Knight),
CountOfType(Knave, 1, operator.eq), # uncertain "exactly"?
),
'Jeb': IfConnective(
Not(IsOfType('Jeb', Monk)),
IsOfType('Bob', Monk)
),
'Rob': IfConnective(
IsOfType('Rob', Monk),
CountOfType(Knave, 3, operator.eq)
),
'Bob': [
IfConnective(
IsOfType('Bob', Knave),
IsSameAs('Deb', 'Rob')
),
CountOfType(Knave, 3, operator.eq), # uncertain "exactly"?
],
})
p23 = Puzzle({
'A': [
Biconditional(
IsOfType('B', Knight),
IsOfType('C', Knight)
),
IsOfType('C', Knave),
],
'B': [
Biconditional(
IsOfType('A', Knight),
IsOfType('C', Monk)
),
],
'C': [
Biconditional(
IsOfType('A', Knave),
IsOfType('D', Knight),
),
IsOfType('B', Monk),
],
'D': [
Biconditional(
IsOfType('A', Knave),
IsOfType('B', Knave),
),
],
})
p24 = Puzzle({
'A': [
Honesty('B', 'C', operator.gt),
IsOfType('C', Knave),
],
'B': [
Honesty('C', 'A', operator.gt),
SumOfTypes((Knave, Knight), 2, operator.eq),
],
'C': [
IsSameAs('C', 'B'),
],
})
p25 = Puzzle({
'A': [
IsOfType('A', Knight),
CountOfType(Knave, 0, remainder_by_2_equals),
],
'B': [
IsOfType('C', Knight),
CountOfType(Monk, 0, operator.eq),
],
'C': [
CountOfType(Knight, 1, operator.eq),
Biconditional(
IsOfType('C', Knight),
IsOfType('A', Knave)
),
],
})
p26 = Puzzle({
'Antoine': [
Biconditional(
IsOfType('Bernardo', Knight),
IsOfType('Antoine', Knave),
),
CountOfType(Monk, 1, operator.ge),
],
'Bernardo': CountOfType(Knight, 1, remainder_by_2_equals),
'Campbell': ConjunctiveStatement(
Not(IsOfType('Campbell', Monk)),
IsOfType('Antoine', Monk),
)
})
b1 = Not(IsSameAs('E', 'B'))
e = IsOfType('A', Knight)
p27 = Puzzle({
'A': [
Biconditional(
Not(b1),
Honesty('D', 'A', operator.eq),
),
CountOfType(Monk, 0, operator.eq),
],
'B': [
b1,
CountOfType(Knave, 2, operator.ge),
],
'C': [
DisjunctiveStatement(
IsOfType('D', Knight),
CountOfType(Monk, 0, operator.eq),
),
Not(e),
],
'D': [
IfConnective(
Not(IsSameAs('D', 'B')),
IsOfType('E', Knave)
),
],
'E': [
e,
],
})
p27.print_puzzle_with_solutions()
# p.print_puzzle_statistics()
if __name__ == '__main__':
main()
|
from actions.demo_ban_user import DemoBanUserAction
from godmode.views.list_view import BaseListView
from godmode.models.base import BaseAdminModel
from godmode.widgets.base import BaseWidget
from groups.main_group import MainGroup
from database.db import User, pg_database
from widgets.boolean import BooleanReverseWidget
class NameWidget(BaseWidget):
filterable = False
def render_list(self, item):
return "<b>{}</b>".format(item.fullname)
class UsersAdminModel(BaseAdminModel):
db = pg_database
name = "users"
title = "Users"
icon = "icon-user"
group = MainGroup
index = 100
table = User
# widgets = {
# "is_locked": BooleanReverseWidget
# }
class PUsersListView(BaseListView):
title = "User list"
# sorting = ["id", "name"]
sorting = ["id", "fullname"]
default_sorting = User.registered_at.desc()
fields = [
"id",
"fullname",
"registered_at",
"avatar_path"
]
# object_actions = [DemoBanUserAction]
# batch_actions = [DemoBanUserAction]
widgets = {
"fullname": NameWidget
}
list_view = PUsersListView
|
from setuptools import setup, find_packages
setup(name='deepART',
version='0.0.0',
description='A library containing adaptive resonance theory neural networks',
url='',
author='Nicholas Cheng Xue Law, Chun Ping Lim',
author_email='nlaw8@gatech.edu, lim.chunping@gmail.com',
license='MIT',
packages=find_packages(exclude=['*.tests']),
install_requires=[
'numpy==1.20.3',
'joblib==1.0.1',
'nltk==3.6.2',
'pandas==1.2.4',
'gensim==4.0.1',
'spacy==3.0.6',
"scikit-learn==0.24.2",
"torch==1.8.1",
"torchvision==0.9.1"
],
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False
)
|
# -*- coding:utf-8 -*-
import subprocess
import os
class questionType:
def Person_type(self,list):
for word in list:
if word ==u'谁':
list.append(u'职业')
return list
def Number_type(self,list):
for word in list:
if word ==u'生日':
list.append(u'出生日期')
if word ==u'时候':
list.append(u'日期')
list.append(u'时间')
return list
def Location_type(self,list):
for word in list:
if word ==u'哪':
list.append(u'地理位置')
list.append(u'所属地区')
list.append(u'地点')
if word ==u'国人':
list.append(u'国籍')
return list
def object_type(self,list):
for word in list:
if word ==u'干什么':
list.append(u'职业')
list.append('BaiduTAG')
return list
def null_type(self,list):
for word in list:
if word ==u'属':
list.append(u'生肖')
return list
def ques_type_list(self,list,types):
# file = open('D:/Desktop/Cstopword.txt','r')
# stop_lists = file.readlines();
# stop_list =[]
# for stop_word in stop_lists:
# stop_word = stop_word.strip()
# stop_list.append(stop_word)
# for word in list:
# if word in stop_list:
# list.remove(word)
stopword = [line.strip() for line in open('./QA1/stopword.txt','r')]
for word in list:
if word in stopword:
list.remove(word)
if len(list)>=3:
return list
if types=='person':
list =self.Person_type(list)
return list
elif types=='location':
list =self.Location_type(list)
return list
elif types=='object':
list = self.object_type(list)
elif types=='null':
list = self.null_type(list)
return list
#import os.path
# def correct(question):
# jarpath = os.path.join(os.path.abspath('.'),'G:/Desktop/')
# startJVM(getDefaultJVMPath(),'-ea','-Djava.class.path=%s'%(jarpath+'pattern.jar'))
# JDClass = JClass("QA.tiny_QA.tinyQA")
# jd = JDClass()
# jprint = java.lang.System.out.printIn
# jprint(jd.GetPatten(question))
# shutdownJVM()
def get_type(self,question):
command = subprocess.Popen(['java','-jar','pattern.jar',question],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
quesType = (str(command.stdout.read()))[2:-3]
return quesType
# #quesType = (str(command).split('\n'))[-1]
# command = str(command).strip().split('\n')
# print (command[-1])
# #return quesType
# command = os.popen('java -jar pattern.jar '+question)
# commands = str(command.read()).strip().split('\n')
# return (commands[-1])
if __name__ == '__main__':
question = u'姚明的体重是多少'
qtype = questionType()
print(qtype.get_type(question))
|
# -*- coding: utf-8 -*-
import io
import re
from setuptools import setup
with io.open("README.md") as f:
long_description = f.read()
with io.open("fastapi_camelcase/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
setup(
name="fastapi_camelcase",
version=version,
description="Package provides an easy way to have camelcase request/response bodies for Pydantic",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://nf1s.github.io/fastapi-camelcase/",
author="Ahmed Nafies",
author_email="ahmed.nafies@gmail.com",
license="MIT",
packages=["fastapi_camelcase"],
install_requires=["pydantic", "pyhumps"],
project_urls={
"Documentation": "https://nf1s.github.io/fastapi-camelcase/",
"Source": "https://github.com/nf1s/fastapi-camelcase",
},
classifiers=[
"Intended Audience :: Developers",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
zip_safe=False,
python_requires=">=3.6",
)
|
#!/usr/bin/env python
from setuptools import setup,find_packages
setup(
name="fum-po",
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click',
'pyyaml',
'couchdb',
],
entry_points='''
[console_scripts]
dream=fum_po.dreamcatcher:cli
''',
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index/$', views.index),
url(r'^grades/$', views.grades),
url(r'^students/$', views.students),
url(r'^students1/$', views.students1),
url(r'^grades/(\d+)$', views.grade),
url(r'^students/(\d+)$', views.student),
url(r'^upfile/$', views.upfile),
url(r'^savefile/$', views.savefile),
url(r'studentpage/(\d+)$', views.studentPage),
url(r'ajaxstudents/$', views.ajaxstudens),
url(r'edit/$', views.edit),
url(r'^main/$', views.main),
url(r'^detail/$', views.detail),
url(r'^postfile/$', views.postfile),
url(r'^showinfo/$', views.showinfo),
]
|
import speech_recognition as sr
import gtts
from playsound import playsound
import sys
from typing import Union, Any
from .AISayListenException import AISayListenAttributeTypeError
from .AISayListenException import AISayListenAttributeValueError
from .AISayListenException import AISayListenGttsError
from .AISayListenException import AISayListenMicrophoneError
from .AISayListenException import AISayListenSpeakEnablingError
class AISayListen:
def __check_self_attributes_types(self) -> None:
err = "NA"
try:
err = "self.LANG"
assert isinstance(self.LANG, str)
err = "self.MIC_NAME"
assert isinstance(self.MIC_NAME, str)
err = "self.SAMPLE_RATE"
assert isinstance(self.SAMPLE_RATE, int)
err = "self.CHUNK_SIZE"
assert isinstance(self.CHUNK_SIZE, int)
err = "self.can_speak"
assert isinstance(self.can_speak, bool)
err = "self.DEVICE_ID"
assert isinstance(self.DEVICE_ID, int)
except:
raise AISayListenAttributeTypeError(err, self)
def __init__(self,
LANG: str = "fr",
MIC_NAME: str = "default",
SAMPLE_RATE: int = 48000,
CHUNK_SIZE: int = 2048,
can_speak: bool = True):
self.can_speak, self.LANG, self.SAMPLE_RATE, self.CHUNK_SIZE = can_speak, LANG, SAMPLE_RATE, CHUNK_SIZE
if self.SAMPLE_RATE <= 0 or CHUNK_SIZE <= 0:
raise AISayListenAttributeValueError("assert not(self.SAMPLE_RATE <= 0 or CHUNK_SIZE <= 0)")
if not can_speak:
self.DEVICE_ID = -1
self.MIC_NAME = "N/A"
else:
self.change_microphone(MIC_NAME)
self.recognizer = sr.Recognizer()
self.__check_self_attributes_types()
def say(self, speech: str) -> None:
if speech is None:
print("WARNING: Got None value for AISayListen.say", file=sys.stderr)
return
if isinstance(speech, str) is False:
raise AISayListenAttributeTypeError("AISayListen.say(speech) speech was not of type str", self)
try:
tts = gtts.gTTS(speech, lang=self.LANG)
tts.save("/tmp/answer.mp3")
playsound("/tmp/answer.mp3")
except:
raise AISayListenGttsError(speech)
def listen(self) -> Union[str, None]:
if self.can_speak is not True:
raise AISayListenSpeakEnablingError()
try:
with sr.Microphone(self.DEVICE_ID, self.SAMPLE_RATE, self.CHUNK_SIZE) as source:
self.recognizer.adjust_for_ambient_noise(source)
return self.recognizer.recognize_google(
self.recognizer.listen(source),
language=self.LANG)
except Exception as e:
print(f'Listen Exception: {e}', file=sys.stderr)
return None
def change_microphone(self, mic_name: str) -> None:
if isinstance(mic_name, str) is False:
raise AISayListenAttributeTypeError("AISayListen.change_microphone(mic_name) mic_name was not of type str", self)
if self.can_speak is not True:
raise AISayListenSpeakEnablingError()
self.MIC_NAME = mic_name
mic_list = sr.Microphone.list_microphone_names()
try:
if not len(mic_list):
raise AISayListenMicrophoneError("No mics are aviable on this device")
elif self.MIC_NAME not in mic_list:
raise AISayListenMicrophoneError(f'[{self.MIC_NAME}] Microphone could not be found in: {mic_list}')
except AISayListenMicrophoneError as e:
self.DEVICE_ID = -1
self.MIC_NAME = "N/A"
raise AISayListenMicrophoneError(e)
self.DEVICE_ID = mic_list.index(self.MIC_NAME)
print(f'Sucessfully changed microphone to {self.MIC_NAME}:{self.DEVICE_ID}')
def set_canspeak_status(self, status: bool) -> None:
if isinstance(status, bool) is False:
raise AISayListenAttributeTypeError("AISayListen.set_canspeak_status(lang) status was not of type bool", self)
self.can_speak = status
def get_canspeak_status(self) -> bool:
return self.can_speak
def set_language(self, lang: str) -> None:
if isinstance(lang, str) is False:
raise AISayListenAttributeTypeError("AISayListen.set_language(lang) lang was not of type str", self)
self.LANG = lang
def get_language(self) -> str:
return self.LANG
def __str__(self) -> str:
return f'[ DeviceID: {self.DEVICE_ID}, Microphone Name: {self.MIC_NAME}, Sample Rate: {self.SAMPLE_RATE}, Chunk Size: {self.CHUNK_SIZE}, Can Speak: {self.can_speak}, Language: {self.LANG} ]'
__repr__ = __str__
|
# Generated by Django 3.2.3 on 2021-07-22 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pythons_auth', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='id',
new_name='user',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.