content stringlengths 5 1.05M |
|---|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import CreateView
from authentication_service.user_migration import views
migration_wizard = views.MigrateUserWizard.as_view(
url_name="migrate_user_step"
)
urlpatterns = [
# Migrate user wizard
url(
r"^migrate/(?P<token>[\w:-]+)/(?P<step>.+)/$",
migration_wizard,
name="migrate_user_step"
),
url(r"^migrate/(?P<token>[\w:-]+)/$", migration_wizard, name="migrate_user"),
url(
r"^question-gate/(?P<token>[\w:-]+)/$",
views.QuestionGateView.as_view(),
name="question_gate"
),
url(
r"^password-reset/(?P<token>[\w:-]+)/$",
views.PasswordResetView.as_view(),
name="password_reset"
),
]
|
import os
import numpy as np
import scipy.stats as stats
from scipy.stats import norm
class limitfunc:
def __init__(self, func_name = None):
if func_name is None:
raise ValueError('Please provide a limit state function name.')
self.func_name = func_name
def evaluate_func(self, params)
if self.func_name == 'borehole_simcenter'
# params = [rw, r, Tu, Hu-Hl, Tl, L, Kw]
value = (2 * np.pi * params[2] * params[3])/(np.log(params[1]/params[0]) * (1 + params[2]/params[4] + (2 * params[5] * params[2])/(np.log(params[1]/params[0]) * np.power(params[0],2) * params[6])))
return value
else:
raise ValueError('Invalid limit state function name.')
|
import datetime
import matplotlib.pyplot as plt
import pytest
from sitdown.filters import FilterSet, StringFilter
from sitdown.core import MutationSet
from sitdown.views import MonthSet, MonthMatrix, Month, MonthBin, MonthSeries
from tests.factories import MutationFactory
@pytest.fixture
def mock_filtered_data(long_mutation_sequence):
return MutationSet(mutations=long_mutation_sequence)
@pytest.fixture
def a_month_set():
mutations = [
MutationFactory(amount=10, date=datetime.date(2018, 1, 1)) for _ in range(10)
] + [MutationFactory(amount=30, date=datetime.date(2018, 4, 15)) for _ in range(10)]
return MonthSet(mutations)
@pytest.fixture
def shop_a_b_filtered_data_set():
"""FilteredDataSet with mutations from both shop A and shop B
For testing filtering
"""
mutations = {MutationFactory(description="shop A") for _ in range(20)} | {
MutationFactory(description="shop B") for _ in range(20)} | {
MutationFactory(description="shop B", date=datetime.date(year=2017, month=9, day=1))} | {
MutationFactory(description="shop C")
}
filter_set = FilterSet(
filters=[
StringFilter(string_to_match="shop A"),
StringFilter(string_to_match="shop B"),
StringFilter(string_to_match="shop C")
]
)
return filter_set.get_filtered_data_set(mutations)
def test_month():
# A month can be created by string or by date.
assert Month("1999/11") == Month(datetime.date(year=1999, month=11, day=1))
# For creating from date, the day part is ignored and set to 1
assert Month("1999/11") == Month(datetime.date(year=1999, month=11, day=10))
assert Month("2018/01").date == datetime.date(year=2018, month=1, day=1)
with pytest.raises(ValueError):
Month(2018)
def test_data_per_month(a_month_set):
ms = a_month_set
assert len(ms.bins()) == 2
assert ms.bins()[0].sum() == 100
assert ms.bins()[1].sum() == 300
def test_month_series(a_month_set):
series = a_month_set.get_series()
assert len(series) == 4
assert len(series[Month("2018/03")]) == 0
assert len(series[Month("2018/04")]) == 10
assert series.sums() == [100, 0, 0, 300]
def test_month_set_accessor(a_month_set):
dpm = a_month_set
assert type(dpm[Month(datetime.date(year=2018, month=4, day=1))]) == MonthBin
with pytest.raises(KeyError):
dpm[Month(datetime.date(year=2018, month=3, day=1))]
def test_month_set_cast_to_series(a_month_set):
assert len(a_month_set.bins()) == 2
assert len(list(a_month_set.bins())[0].mutations) == 10
series = MonthSeries.from_month_set(a_month_set)
assert len(series.bins()) == 4
assert len(list(series.bins())[0].mutations) == 10
def test_month_set_plotting(long_mutation_sequence):
dpm = MonthSet(long_mutation_sequence)
dpm.plot()
#plt.show()
def test_month_matrix(shop_a_b_filtered_data_set):
matrix = MonthMatrix(filtered_data_list=shop_a_b_filtered_data_set)
# with this input data, matrix should have two categories:
assert list(matrix.keys()) == ['shop A', 'shop B', 'shop C']
# month series should be accessible as keys
assert type(matrix['shop A']) == MonthSeries
# Both series should have the same date range
assert matrix['shop A'].min_month == matrix['shop B'].min_month
assert matrix['shop A'].max_month == matrix['shop B'].max_month
def test_month_matrix_plotting(shop_a_b_filtered_data_set):
matrix = MonthMatrix(filtered_data_list=shop_a_b_filtered_data_set)
matrix.plot()
#plt.show()
|
x1 = float(input())
y1 = float(input())
x2 = float(input())
y2 = float(input())
width = max(x1, x2) - min(x1, x2)
height = max(y1, y2) - min(y1, y2)
print(width * height)
print(2 * (width + height)) |
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class IamPrivilegeSetAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'name': 'str',
'privilege_names': 'list[str]',
'account': 'IamAccount',
'associated_privilege_sets': 'list[IamPrivilegeSet]',
'privileges': 'list[IamPrivilege]',
'system': 'IamSystem'
}
attribute_map = {
'description': 'Description',
'name': 'Name',
'privilege_names': 'PrivilegeNames',
'account': 'Account',
'associated_privilege_sets': 'AssociatedPrivilegeSets',
'privileges': 'Privileges',
'system': 'System'
}
def __init__(self,
description=None,
name=None,
privilege_names=None,
account=None,
associated_privilege_sets=None,
privileges=None,
system=None,
local_vars_configuration=None): # noqa: E501
"""IamPrivilegeSetAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._name = None
self._privilege_names = None
self._account = None
self._associated_privilege_sets = None
self._privileges = None
self._system = None
self.discriminator = None
if description is not None:
self.description = description
if name is not None:
self.name = name
if privilege_names is not None:
self.privilege_names = privilege_names
if account is not None:
self.account = account
if associated_privilege_sets is not None:
self.associated_privilege_sets = associated_privilege_sets
if privileges is not None:
self.privileges = privileges
if system is not None:
self.system = system
@property
def description(self):
"""Gets the description of this IamPrivilegeSetAllOf. # noqa: E501
Description of the privilege set. # noqa: E501
:return: The description of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this IamPrivilegeSetAllOf.
Description of the privilege set. # noqa: E501
:param description: The description of this IamPrivilegeSetAllOf. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this IamPrivilegeSetAllOf. # noqa: E501
Name of the privilege set. # noqa: E501
:return: The name of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IamPrivilegeSetAllOf.
Name of the privilege set. # noqa: E501
:param name: The name of this IamPrivilegeSetAllOf. # noqa: E501
:type: str
"""
self._name = name
@property
def privilege_names(self):
"""Gets the privilege_names of this IamPrivilegeSetAllOf. # noqa: E501
:return: The privilege_names of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: list[str]
"""
return self._privilege_names
@privilege_names.setter
def privilege_names(self, privilege_names):
"""Sets the privilege_names of this IamPrivilegeSetAllOf.
:param privilege_names: The privilege_names of this IamPrivilegeSetAllOf. # noqa: E501
:type: list[str]
"""
self._privilege_names = privilege_names
@property
def account(self):
"""Gets the account of this IamPrivilegeSetAllOf. # noqa: E501
:return: The account of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: IamAccount
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this IamPrivilegeSetAllOf.
:param account: The account of this IamPrivilegeSetAllOf. # noqa: E501
:type: IamAccount
"""
self._account = account
@property
def associated_privilege_sets(self):
"""Gets the associated_privilege_sets of this IamPrivilegeSetAllOf. # noqa: E501
A reference to a iamPrivilegeSet resource. When the $expand query parameter is specified, the referenced resource is returned inline. A privilege set can be associated to other privilege sets. # noqa: E501
:return: The associated_privilege_sets of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: list[IamPrivilegeSet]
"""
return self._associated_privilege_sets
@associated_privilege_sets.setter
def associated_privilege_sets(self, associated_privilege_sets):
"""Sets the associated_privilege_sets of this IamPrivilegeSetAllOf.
A reference to a iamPrivilegeSet resource. When the $expand query parameter is specified, the referenced resource is returned inline. A privilege set can be associated to other privilege sets. # noqa: E501
:param associated_privilege_sets: The associated_privilege_sets of this IamPrivilegeSetAllOf. # noqa: E501
:type: list[IamPrivilegeSet]
"""
self._associated_privilege_sets = associated_privilege_sets
@property
def privileges(self):
"""Gets the privileges of this IamPrivilegeSetAllOf. # noqa: E501
A reference to a iamPrivilege resource. When the $expand query parameter is specified, the referenced resource is returned inline. Reference to the privileges. Privilege represents an action which can be performed in Intersight such as creating server profile, deleting a user etc. Privileges are assigned to a user using privilege sets and roles. # noqa: E501
:return: The privileges of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: list[IamPrivilege]
"""
return self._privileges
@privileges.setter
def privileges(self, privileges):
"""Sets the privileges of this IamPrivilegeSetAllOf.
A reference to a iamPrivilege resource. When the $expand query parameter is specified, the referenced resource is returned inline. Reference to the privileges. Privilege represents an action which can be performed in Intersight such as creating server profile, deleting a user etc. Privileges are assigned to a user using privilege sets and roles. # noqa: E501
:param privileges: The privileges of this IamPrivilegeSetAllOf. # noqa: E501
:type: list[IamPrivilege]
"""
self._privileges = privileges
@property
def system(self):
"""Gets the system of this IamPrivilegeSetAllOf. # noqa: E501
:return: The system of this IamPrivilegeSetAllOf. # noqa: E501
:rtype: IamSystem
"""
return self._system
@system.setter
def system(self, system):
"""Sets the system of this IamPrivilegeSetAllOf.
:param system: The system of this IamPrivilegeSetAllOf. # noqa: E501
:type: IamSystem
"""
self._system = system
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IamPrivilegeSetAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IamPrivilegeSetAllOf):
return True
return self.to_dict() != other.to_dict()
|
import numpy
import pandas.testing
from ..adapters.dataframe import DataFrameAdapter
from ..adapters.mapping import MapAdapter
from ..client import from_tree
tree = MapAdapter(
{
# a dataframe divided into three partitions
"basic": DataFrameAdapter.from_pandas(
pandas.DataFrame(
{
"x": 1 * numpy.ones(100),
"y": 2 * numpy.ones(100),
"z": 3 * numpy.ones(100),
}
),
npartitions=3,
),
# a dataframe with just one partition
"single_partition": DataFrameAdapter.from_pandas(
pandas.DataFrame(
{
"x": 1 * numpy.ones(100),
"y": 2 * numpy.ones(100),
"z": 3 * numpy.ones(100),
}
),
npartitions=1,
),
}
)
def test_dataframe_basic():
client = from_tree(tree)
expected = tree["basic"].read()
actual = client["basic"].read()
assert client["basic"].structure().macro.npartitions == 3
pandas.testing.assert_frame_equal(actual, expected)
def test_dataframe_column_access():
client = from_tree(tree)
expected_df = tree["basic"].read()
for col in expected_df.columns:
expected = expected_df[col]
actual = client["basic"][col]
pandas.testing.assert_series_equal(actual, expected)
def test_dataframe_single_partition():
client = from_tree(tree)
expected = tree["single_partition"].read()
actual = client["single_partition"].read()
assert client["single_partition"].structure().macro.npartitions == 1
pandas.testing.assert_frame_equal(actual, expected)
|
#!/usr/bin/env python
"""A simple parser for power tool output"""
from __future__ import print_function
from collections import namedtuple
import argparse
import re
import sys
PowerOutput = namedtuple('PowerOutput', ['filename', 'frequency', 'realtime',
'systime', 'usrtime', 'pkg_energy',
'pp0_energy', 'pkg_power'])
def process_arguments():
"""Returns a dictionary of command line arguments values."""
parser = argparse.ArgumentParser(description='Power Output Formatter')
parser.add_argument('-if', '--infiles', required=True, nargs='+',
type=argparse.FileType('r'),
help='Output files to consider')
return parser.parse_args()
def parse_file(infile):
"""Parses a single entry of the form:
program output
...
Real Time: 1m 00s 628us
Sys Time: 0s 0us
User Time: 0s 0us
PKG Energy: 43.785278J
PP0 Energy: 0.000000J
PKG Power: 0.729747W
"""
frequency_re = re.compile(r"(\d+\.\d+|\d+)\s?(GHz|MHz)")
frequency_match = re.search(frequency_re, infile.name)
if frequency_match is None:
sys.exit("ERROR! Filename {} lacks CPU frequency".format(infile.name))
if frequency_match.group(2) == 'GHz':
frequency_unit = 1e9
elif frequency_match.group(2) == 'MHz':
frequency_unit = 1e6
else:
sys.exit('Unknown frequency unit: {}'.format(frequency_unit))
frequency_hz = float(frequency_match.group(1)) * frequency_unit
realtime_re = re.compile(r"^Real Time:\s*(\d+)m\s*(\d+)s\s*(\d+)us$")
systime_re = re.compile(r"^Sys Time:\s*(\d+)s\s*(\d+)us$")
usrtime_re = re.compile(r"^User Time:\s*(\d+)s\s*(\d+)us$")
pkg_energy_re = re.compile(r"^PKG Energy:\s*(\d+\.?\d+?)J$")
pp0_energy_re = re.compile(r"^PP0 Energy:\s*(\d+\.?\d+?)J$")
pkg_power_re = re.compile(r"^PKG Power:\s*(\d+\.?\d+?)W$")
realtime = None
systime = None
usrtime = None
pkg_energy = None
pp0_energy = None
pkg_power = None
for line in infile:
match = re.match(realtime_re, line)
if match is not None:
print(match.group(0))
if realtime is not None:
sys.exit("Duplicate Real Time value in {}".format(infile.name))
realtime = int(match.group(1)) * 60 \
+ int(match.group(2)) \
+ int(match.group(3)) * 1e-6 # Mins, Secs, Microsecs
match = re.match(systime_re, line)
if match is not None:
print(match.group(0))
if systime is not None:
sys.exit("Duplicate Sys Time value in {}".format(infile.name))
systime = int(match.group(1)) + int(match.group(2)) * 1e-6
match = re.match(usrtime_re, line)
if match is not None:
print(match.group(0))
if usrtime is not None:
sys.exit("Duplicate User Time value in {}".format(infile.name))
usrtime = int(match.group(1)) + int(match.group(2)) * 1e-6
match = re.match(pkg_energy_re, line)
if match is not None:
print(match.group(0))
if pkg_energy is not None:
sys.exit("Duplicate PKG Energy in {}".format(infile.name))
pkg_energy = float(match.group(1))
match = re.match(pp0_energy_re, line)
if match is not None:
print(match.group(0))
if pp0_energy is not None:
sys.exit("Duplicate PP0 Energy in {}".format(infile.name))
pp0_energy = float(match.group(1))
match = re.match(pkg_power_re, line)
if match is not None:
print(match.group(0))
if pkg_power is not None:
sys.exit("Duplicate PKG Power in {}".format(infile.name))
pkg_power = float(match.group(1))
return PowerOutput(filename=infile.name,
frequency=frequency_hz,
realtime=realtime,
systime=systime,
usrtime=usrtime,
pkg_energy=pkg_energy,
pp0_energy=pp0_energy,
pkg_power=pkg_power)
def main():
"""Program Entry Point"""
args = process_arguments()
data = []
for infile in args.infiles:
data.append(parse_file(infile))
infile.close()
print(data)
if __name__ == '__main__':
main()
|
import rain.lexer as L
import rain.token as K
import pytest
def test_factory():
assert L.factory('return') == K.keyword_token('return')
assert L.factory('this') == K.name_token('this')
assert L.factory('Multi_Word') == K.name_token('multiword')
def test_keywords():
stream = L.stream(' '.join(L.KEYWORDS))
for token, keyword in zip(stream, L.KEYWORDS):
assert token == K.keyword_token(keyword)
def test_operators():
stream = L.stream(' '.join(L.KW_OPERATORS))
for token, keyword in zip(stream, L.KW_OPERATORS):
assert token == K.operator_token(keyword)
stream = L.stream(' '.join(L.OPERATORS))
for token, keyword in zip(stream, L.OPERATORS):
assert token == K.operator_token(keyword)
def test_literals():
stream = L.stream('0 10 0.0 0.1 0.12 1.23 12.34 true false "string" "escaped \\"string\\"" null table')
assert next(stream) == K.int_token(0)
assert next(stream) == K.int_token(10)
assert next(stream) == K.float_token(0.0)
assert next(stream) == K.float_token(0.1)
assert next(stream) == K.float_token(0.12)
assert next(stream) == K.float_token(1.23)
assert next(stream) == K.float_token(12.34)
assert next(stream) == K.bool_token('true')
assert next(stream) == K.bool_token('false')
assert next(stream) == K.string_token('"string"')
assert next(stream) == K.string_token('"escaped \\"string\\""')
assert next(stream) == K.null_token('null')
assert next(stream) == K.table_token('table')
assert next(stream) == K.end_token()
def test_whitespace():
stream = L.stream('1\n'
'\n' # extra blank lines
' \n'
'2\n'
' 3\n' # indent
'\n' # blank lines in a block
'\n'
' 4\n'
' 5 \n'
'\n' # all sorts of whitespace
' \n'
' \n'
' \n'
' \n'
' 6 \n' # trailing whitespace
' 7\n' # dedent
' 8\n'
'9\n' # multiple simultaneous dedents
' 10\n'
' 11\n') # ending dedent
assert next(stream) == K.int_token(1)
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(2)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(3)
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(4)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(5)
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(6)
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(7)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(8)
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(9)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(10)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(11)
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.end_token()
def test_comments():
stream = L.stream('# full line\n'
'1 # end of line\n'
'2 # end of line\n'
' 3 # end of line\n'
'# end of block\n'
'4 # end of line\n'
'# end of program')
assert next(stream) == K.int_token(1)
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(2)
assert next(stream) == K.indent_token()
assert next(stream) == K.int_token(3)
assert next(stream) == K.newline_token()
assert next(stream) == K.dedent_token()
assert next(stream) == K.newline_token()
assert next(stream) == K.int_token(4)
assert next(stream) == K.newline_token()
assert next(stream) == K.end_token()
def test_prints():
assert str(K.end_token) == 'EOF'
assert repr(K.end_token) == '<EOF>'
assert str(K.end_token()) == 'EOF'
assert repr(K.end_token()) == '<EOF>'
assert str(K.int_token(5)) == 'int 5'
assert repr(K.int_token(5)) == '<int 5>'
|
import json
from stability_label_algorithm.modules.dataset_generator.dataset import Dataset
from stability_label_algorithm.modules.dataset_generator.dataset_item import DatasetItem
class DatasetJsonReader:
def __init__(self):
pass
@staticmethod
def from_json(json_str: str):
json_object = json.loads(json_str)
dataset_items = [DatasetItem.from_str(dataset_item_str) for dataset_item_str in json_object['dataset_items']]
dataset = Dataset(json_object['name'], json_object['argumentation_system_name'])
dataset.dataset_items = dataset_items
return dataset
def read_from_json(self, file_path: str) -> Dataset:
with open(file_path, 'r') as reader:
argumentation_system_json = reader.read()
return self.from_json(argumentation_system_json)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Hosts Test
"""
# Import Local Modules
from marvin.codes import FAILED
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
from marvin.sshClient import SshClient
import time
from time import sleep
_multiprocess_shared_ = False
class TestHostHA(cloudstackTestCase):
def setUp(self):
self.logger = logging.getLogger('TestHM')
self.stream_handler = logging.StreamHandler()
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(self.stream_handler)
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.mgtSvrDetails = self.config.__dict__["mgtSvr"][0].__dict__
self.dbclient = self.testClient.getDbConnection()
self.services = self.testClient.getParsedTestDataConfig()
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
self.pod = get_pod(self.apiclient, self.zone.id)
self.cleanup = []
self.services = {
"service_offering": {
"name": "Ultra Tiny Instance",
"displaytext": "Ultra Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"service_offering_local": {
"name": "Ultra Tiny Local Instance",
"displaytext": "Ultra Tiny Local Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
"storagetype": "local"
},
"vm": {
"username": "root",
"password": "password",
"ssh_port": 22,
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
}
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def checkHostDown(self, fromHostIp, testHostIp):
try:
ssh = SshClient(fromHostIp, 22, "root", "password")
res = ssh.execute("ping -c 1 %s" % testHostIp)
result = str(res)
if result.count("100% packet loss") == 1:
return True, 1
else:
return False, 1
except Exception as e:
self.logger.debug("Got exception %s" % e)
return False, 1
def checkHostUp(self, fromHostIp, testHostIp):
try:
ssh = SshClient(fromHostIp, 22, "root", "password")
res = ssh.execute("ping -c 1 %s" % testHostIp)
result = str(res)
if result.count(" 0% packet loss") == 1:
return True, 1
else:
return False, 1
except Exception as e:
self.logger.debug("Got exception %s" % e)
return False, 1
def checkHostStateInCloudstack(self, state, hostId):
try:
listHost = Host.list(
self.apiclient,
type='Routing',
zoneid=self.zone.id,
podid=self.pod.id,
id=hostId
)
self.assertEqual(
isinstance(listHost, list),
True,
"Check if listHost returns a valid response"
)
self.assertEqual(
len(listHost),
1,
"Check if listHost returns a host"
)
self.logger.debug(" Host state is %s " % listHost[0].state)
if listHost[0].state == state:
return True, 1
else:
return False, 1
except Exception as e:
self.logger.debug("Got exception %s" % e)
return False, 1
def updateConfigurAndRestart(self, name, value):
Configurations.update(self.apiclient, name, value)
self.RestartServers()
time.sleep(self.services["sleep"])
def RestartServers(self):
""" Restart management
server and usage server """
sshClient = SshClient(self.mgtSvrDetails["mgtSvrIp"],
22,
self.mgtSvrDetails["user"],
self.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management restart"
sshClient.execute(command)
return
@attr(
tags=[
"advanced",
"advancedns",
"smoke",
"basic",
"eip",
"sg"],
required_hardware="true")
def test_01_host_ha_with_nfs_storagepool_with_vm(self):
Configurations.update(self.apiclient, "ping.timeout", "150")
self.updateConfigurAndRestart("ping.interval", "150")
listHost = Host.list(
self.apiclient,
type='Routing',
zoneid=self.zone.id,
podid=self.pod.id,
)
for host in listHost:
self.logger.debug('Hypervisor = {}'.format(host.id))
hostToTest = listHost[0]
hostUpInCloudstack = wait_until(40, 10, self.checkHostStateInCloudstack, "Up", hostToTest.id)
if not(hostUpInCloudstack):
raise self.fail("Host is not up %s, in cloudstack so failing test " % (hostToTest.ipaddress))
return
|
"""
Aries Cloud Agent
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0.7.2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from acapy_client.api_client import ApiClient, Endpoint as _Endpoint
from acapy_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
)
from acapy_client.model.attribute_mime_types_result import AttributeMimeTypesResult
from acapy_client.model.cred_info_list import CredInfoList
from acapy_client.model.cred_revoked_result import CredRevokedResult
from acapy_client.model.indy_cred_info import IndyCredInfo
from acapy_client.model.vc_record import VCRecord
from acapy_client.model.vc_record_list import VCRecordList
from acapy_client.model.w3_c_credentials_list_request import W3CCredentialsListRequest
class CredentialsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.credential_credential_id_delete_endpoint = _Endpoint(
settings={
"response_type": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/{credential_id}",
"operation_id": "credential_credential_id_delete",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"credential_id",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
},
"location_map": {
"credential_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_credential_id_get_endpoint = _Endpoint(
settings={
"response_type": (IndyCredInfo,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/{credential_id}",
"operation_id": "credential_credential_id_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"credential_id",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
},
"location_map": {
"credential_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_mime_types_credential_id_get_endpoint = _Endpoint(
settings={
"response_type": (AttributeMimeTypesResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/mime-types/{credential_id}",
"operation_id": "credential_mime_types_credential_id_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"credential_id",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
},
"location_map": {
"credential_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_revoked_credential_id_get_endpoint = _Endpoint(
settings={
"response_type": (CredRevokedResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/revoked/{credential_id}",
"operation_id": "credential_revoked_credential_id_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"credential_id",
"_from",
"to",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [
"_from",
"to",
],
},
root_map={
"validations": {
("_from",): {
"regex": {
"pattern": r"^[0-9]*$", # noqa: E501
},
},
("to",): {
"regex": {
"pattern": r"^[0-9]*$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
"_from": (str,),
"to": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
"_from": "from",
"to": "to",
},
"location_map": {
"credential_id": "path",
"_from": "query",
"to": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_w3c_credential_id_delete_endpoint = _Endpoint(
settings={
"response_type": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/w3c/{credential_id}",
"operation_id": "credential_w3c_credential_id_delete",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"credential_id",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
},
"location_map": {
"credential_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_w3c_credential_id_get_endpoint = _Endpoint(
settings={
"response_type": (VCRecord,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential/w3c/{credential_id}",
"operation_id": "credential_w3c_credential_id_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"credential_id",
],
"required": [
"credential_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"credential_id": (str,),
},
"attribute_map": {
"credential_id": "credential_id",
},
"location_map": {
"credential_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credentials_get_endpoint = _Endpoint(
settings={
"response_type": (CredInfoList,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credentials",
"operation_id": "credentials_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"count",
"start",
"wql",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [
"count",
"start",
],
},
root_map={
"validations": {
("count",): {
"regex": {
"pattern": r"^[1-9][0-9]*$", # noqa: E501
},
},
("start",): {
"regex": {
"pattern": r"^[0-9]*$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"count": (str,),
"start": (str,),
"wql": (str,),
},
"attribute_map": {
"count": "count",
"start": "start",
"wql": "wql",
},
"location_map": {
"count": "query",
"start": "query",
"wql": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credentials_w3c_post_endpoint = _Endpoint(
settings={
"response_type": (VCRecordList,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credentials/w3c",
"operation_id": "credentials_w3c_post",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"count",
"start",
"wql",
"body",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [
"count",
"start",
],
},
root_map={
"validations": {
("count",): {
"regex": {
"pattern": r"^[1-9][0-9]*$", # noqa: E501
},
},
("start",): {
"regex": {
"pattern": r"^[0-9]*$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"count": (str,),
"start": (str,),
"wql": (str,),
"body": (W3CCredentialsListRequest,),
},
"attribute_map": {
"count": "count",
"start": "start",
"wql": "wql",
},
"location_map": {
"count": "query",
"start": "query",
"wql": "query",
"body": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
def credential_credential_id_delete(self, credential_id, **kwargs):
"""Remove credential from wallet by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_credential_id_delete(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
bool, date, datetime, dict, float, int, list, str, none_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return self.credential_credential_id_delete_endpoint.call_with_http_info(
**kwargs
)
def credential_credential_id_get(self, credential_id, **kwargs):
"""Fetch credential from wallet by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_credential_id_get(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
IndyCredInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return self.credential_credential_id_get_endpoint.call_with_http_info(**kwargs)
def credential_mime_types_credential_id_get(self, credential_id, **kwargs):
"""Get attribute MIME types from wallet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_mime_types_credential_id_get(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
AttributeMimeTypesResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return (
self.credential_mime_types_credential_id_get_endpoint.call_with_http_info(
**kwargs
)
)
def credential_revoked_credential_id_get(self, credential_id, **kwargs):
"""Query credential revocation status by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_revoked_credential_id_get(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_from (str): Earliest epoch of revocation status interval of interest. [optional]
to (str): Latest epoch of revocation status interval of interest. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CredRevokedResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return self.credential_revoked_credential_id_get_endpoint.call_with_http_info(
**kwargs
)
def credential_w3c_credential_id_delete(self, credential_id, **kwargs):
"""Remove W3C credential from wallet by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_w3c_credential_id_delete(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
bool, date, datetime, dict, float, int, list, str, none_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return self.credential_w3c_credential_id_delete_endpoint.call_with_http_info(
**kwargs
)
def credential_w3c_credential_id_get(self, credential_id, **kwargs):
"""Fetch W3C credential from wallet by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_w3c_credential_id_get(credential_id, async_req=True)
>>> result = thread.get()
Args:
credential_id (str): Credential identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VCRecord
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["credential_id"] = credential_id
return self.credential_w3c_credential_id_get_endpoint.call_with_http_info(
**kwargs
)
def credentials_get(self, **kwargs):
"""Fetch credentials from wallet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credentials_get(async_req=True)
>>> result = thread.get()
Keyword Args:
count (str): Maximum number to retrieve. [optional]
start (str): Start index. [optional]
wql (str): (JSON) WQL query. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CredInfoList
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
return self.credentials_get_endpoint.call_with_http_info(**kwargs)
def credentials_w3c_post(self, **kwargs):
"""Fetch W3C credentials from wallet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credentials_w3c_post(async_req=True)
>>> result = thread.get()
Keyword Args:
count (str): Maximum number to retrieve. [optional]
start (str): Start index. [optional]
wql (str): (JSON) WQL query. [optional]
body (W3CCredentialsListRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VCRecordList
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
return self.credentials_w3c_post_endpoint.call_with_http_info(**kwargs)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for cublas-functions benchmark."""
import numbers
from tests.helper import decorator
from superbench.benchmarks import BenchmarkRegistry, BenchmarkType, ReturnCode, Platform
@decorator.cuda_test
def test_cublas_functions():
"""Test cublas-function benchmark."""
# Test for default configuration
context = BenchmarkRegistry.create_benchmark_context(
'cublas-function', platform=Platform.CUDA, parameters='--num_warmup 10 --num_steps 10 --num_in_step 100'
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (benchmark.name == 'cublas-function')
assert (benchmark.type == BenchmarkType.MICRO)
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.num_warmup == 10)
assert (benchmark._args.num_steps == 10)
assert (benchmark._args.num_in_step == 100)
# Check results and metrics.
assert (benchmark.run_count == 1)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert ('raw_output_0' in benchmark.raw_data)
assert (len(benchmark.raw_data['raw_output_0']) == 1)
assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))
assert (19 <= len(benchmark.result))
for metric in list(benchmark.result.keys()):
assert (len(benchmark.result[metric]) == 1)
assert (isinstance(benchmark.result[metric][0], numbers.Number))
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
# Test for custom configuration
custom_config_str = '{"name":"cublasCgemm","m":512,"n":512,"k":32,"transa":1,"transb":0}'
context = BenchmarkRegistry.create_benchmark_context(
'cublas-function',
platform=Platform.CUDA,
parameters='--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' + custom_config_str
)
assert (BenchmarkRegistry.is_benchmark_context_valid(context))
benchmark = BenchmarkRegistry.launch_benchmark(context)
# Check basic information.
assert (benchmark)
assert (benchmark.name == 'cublas-function')
assert (benchmark.type == BenchmarkType.MICRO)
# Check parameters specified in BenchmarkContext.
assert (benchmark._args.num_warmup == 10)
assert (benchmark._args.num_steps == 10)
assert (benchmark._args.num_in_step == 100)
# Check results and metrics.
assert (benchmark.run_count == 1)
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert ('raw_output_0' in benchmark.raw_data)
assert (len(benchmark.raw_data['raw_output_0']) == 1)
assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))
assert (1 == len(benchmark.result))
for metric in list(benchmark.result.keys()):
assert (len(benchmark.result[metric]) == 1)
assert (isinstance(benchmark.result[metric][0], numbers.Number))
assert (len(benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
|
# Example 5.1 from "Test and roll: profit maximizing A/B tests"
# https://pubsonline.informs.org/doi/abs/10.1287/mksc.2019.1194
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
def optimal_sample_size(N, s, sigma):
# eqn 10
t = (s/sigma)**2
n = np.sqrt(0.25*N*t + (0.75*t)**2) - 0.75*t
return n
def prob_error(n1, n2, s, sigma):
# eqn 12
x = np.sqrt(2)*sigma/s * np.sqrt(n1*n2/(n1+n2))
p = 0.25 - 1/(2*np.pi)*np.arctan(x)
return 2*p # could have m1<m2 or m1>m2
def eprofit_deploy(N, n1, n2, s, mu, sigma):
# eqn 9
numer = np.sqrt(2)*sigma**2
tmp = 2*sigma**2 + (n1+n2) / (n1*n2) * (s**2)
denom = np.sqrt(np.pi)*np.sqrt(tmp)
return (N-n1-n2)*(mu + numer/denom)
def eprofit_test(N, n1, n2, s, mu, sigma):
# eqn 7
return (n1+n2)*mu
def eprofit_total(N, n1, n2, s, mu, sigma):
p1 = eprofit_test(N, n1, n2, s, mu, sigma)
p2 = eprofit_deploy(N, n1, n2, s, mu, sigma)
return p1+p2
mu = 0.68
sigma = 0.03
N = 100000
s = np.sqrt(mu*(1-mu))
nopt = optimal_sample_size(N, s, sigma)
print(nopt) # 2283.9
n1 = nopt
n2 = nopt
p = prob_error(n1, n2, s, sigma)
print(p) # 0.10
print(eprofit_test(N, n1, n2, s, mu, sigma)) # 3106
print(eprofit_deploy(N, n1, n2, s, mu, sigma)) # 66429.9
eprofit_opt = eprofit_total(N, n1, n2, s, mu, sigma)
error_rate_opt = prob_error(n1, n2, s, sigma)
ns = np.linspace(0, 50000, 1000)
K = len(ns)
eprofit = np.zeros(K)
error_rate = np.zeros(K)
for k, n in enumerate(ns):
n1 = n; n2 = n
eprofit[k] = eprofit_total(N, n1, n2, s, mu, sigma)
error_rate[k] = prob_error(n1, n2, s, sigma)
plt.figure();
plt.plot(ns, eprofit)
plt.xlabel('Test size')
plt.ylabel('Expected #conversions')
plt.axvline(nopt)
plt.axhline(eprofit_opt)
plt.text(nopt, eprofit_opt, 'n*={:0.1f}'.format(nopt))
pml.savefig('ab_profit.pdf')
plt.show()
plt.figure();
plt.plot(ns, error_rate)
plt.xlabel('Test size')
plt.ylabel('Expected error rate')
plt.axvline(nopt)
plt.axhline(error_rate_opt)
plt.text(nopt, error_rate_opt, 'n*={:0.1f}'.format(nopt))
pml.savefig('ab_error.pdf')
plt.show() |
from kubernetes import client
from django.conf import settings
from libs.utils import get_list
from polyaxon_k8s import constants as k8s_constants
from scheduler.spawners.templates import constants
from scheduler.spawners.templates.env_vars import get_resources_env_vars
from scheduler.spawners.templates.gpu_volumes import get_gpu_volumes_def
from scheduler.spawners.templates.project_jobs.labels import get_labels
from scheduler.spawners.templates.resources import get_resources
def get_project_pod_spec(volume_mounts,
volumes,
image,
command,
args,
ports,
env_vars=None,
container_name=None,
resources=None,
node_selector=None,
restart_policy=None,
use_service_account=False):
"""Pod spec to be used to create pods for project: tensorboard, notebooks."""
env_vars = get_list(env_vars)
volume_mounts = get_list(volume_mounts)
volumes = get_list(volumes)
gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources)
volume_mounts += gpu_volume_mounts
volumes += gpu_volumes
ports = [client.V1ContainerPort(container_port=port) for port in ports]
env_vars += get_resources_env_vars(resources=resources)
containers = [client.V1Container(name=container_name,
image=image,
command=command,
args=args,
ports=ports,
env=env_vars,
resources=get_resources(resources),
volume_mounts=volume_mounts)]
service_account_name = None
if use_service_account and settings.K8S_RBAC_ENABLED:
service_account_name = settings.K8S_SERVICE_ACCOUNT_NAME
return client.V1PodSpec(restart_policy=restart_policy,
service_account_name=service_account_name,
containers=containers,
volumes=volumes,
node_selector=node_selector)
def get_pod(namespace,
app,
name,
project_name,
project_uuid,
job_name,
job_uuid,
volume_mounts,
volumes,
image,
command,
args,
ports,
container_name,
env_vars=None,
resources=None,
node_selector=None,
type=None, # pylint:disable=redefined-builtin
role=None,
restart_policy=None,
use_service_account=False):
pod_spec = get_project_pod_spec(
volume_mounts=volume_mounts,
volumes=volumes,
image=image,
container_name=container_name,
command=command,
args=args,
resources=resources,
node_selector=node_selector,
ports=ports,
env_vars=env_vars,
use_service_account=use_service_account,
restart_policy=restart_policy)
labels = get_labels(app=app,
project_name=project_name,
project_uuid=project_uuid,
job_name=job_name,
job_uuid=job_uuid,
role=role,
type=type)
metadata = client.V1ObjectMeta(
name=constants.JOB_NAME.format(name=name, job_uuid=job_uuid),
labels=labels,
namespace=namespace)
return client.V1Pod(api_version=k8s_constants.K8S_API_VERSION_V1,
kind=k8s_constants.K8S_POD_KIND,
metadata=metadata,
spec=pod_spec)
|
import os
import classification.data_loading as dl
from classification.lstm_model import SmelLSTM
from classification.util import get_classes_list
from e_nose.measurements import DataType
# specify model to test
model_name = 'LSTMTrainable_15750966_1740_batch_size=128,dim_hidden=6,lr=0.004831,return_sequences=True_2020-03-05_08-08-45fs4p25pg'
checkpoint = 200
path = './models/rnn/'
# model configuration
# get this from model_name
dim_hidden = 6
# configure input data
batch_size = 1 # Does not need to match batch_size in in model_name. Set this to 1 in order to analyse one sample at a time.
sequence_length = 45
# If return_sequences = True (which is usually the case), specifiying the exact sequence length is not necessary to load the model.
# However, to obtain good results the sequence to predict should be similiar to the training sequences (similiar starting point and sequence length).
dim = 34 # Dimensions of input data (the number of channels model was trained on). Must equal the number of working channels of data to test on.
input_shape = (batch_size, sequence_length, dim)
masking_value = 100. # Values to be ignored.
# data loading
data_path_train = os.path.join(os.getcwd(), '../data_train')
data_path_test = os.path.join(os.getcwd(), '../data_test')
measurements_tr, measurements_te, correct_func = dl.get_measurements_train_test_from_dir(data_path_train, data_path_test)
print('correct_func', correct_func)
classes_list = get_classes_list(measurements_te)
print(classes_list)
lstm = SmelLSTM(input_shape=input_shape, dim_hidden=dim_hidden, num_classes=len(classes_list), masking_value=masking_value)
lstm.summary()
lstm.load_weights(model_name=model_name, checkpoint=checkpoint, path=path)
counter = 0
counter_correct = 0
# loop over measurements to be classified
for m in measurements_te:
if m.label == 'raisin' or m.label == 'coffee_powder':
continue
#max_length = m.data.shape[0]
print('Ground Truth: ', m.label)
data = m.get_data_as(DataType.HIGH_PASS)[:sequence_length]
print(data.shape)
prediction = lstm.predict_from_batch(data)
lstm.reset_states()
print('prediction: ', prediction)
#input(' ')
if prediction == m.label:
counter_correct += 1
counter += 1
print('accuracy: ', counter_correct/counter) |
import discord
import os
import requests
from discord.ext import commands, tasks
from nerdlandbot.commands.GuildData import get_all_guilds_data, get_guild_data, GuildData
from nerdlandbot.helpers.channel import get_channel
from nerdlandbot.helpers.log import info, fatal
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.scheduler.YoutubeScheduler import get_latest_video
from nerdlandbot.translations.Translations import get_text as translate
class Youtube(commands.Cog, name="Youtube_lists"):
@commands.command(
name="add_youtube", usage="add_youtube_usage", help="add_youtube_help",
)
async def add_youtube_channel(
self, ctx: commands.Context, youtube_channel_id: str, text_channel: str
):
"""
Add a Youtube channel to be notified
:param ctx: The current context. (discord.ext.commands.Context)
:param youtube_channel_id: The Youtube channel to be notified of (str)
:param text_channel: The text channel that will receive the notification (str)
"""
guild_data = await get_guild_data(ctx.message.guild.id)
# Error if not admin
if not guild_data.user_is_admin(ctx.author):
gif = translate("not_admin_gif", await culture(ctx))
return await ctx.send(gif)
# TODO: throw specific error with message when channel ID is wrong
latest_video = await get_latest_video(youtube_channel_id)
# Get the channel
channel = get_channel(ctx, text_channel)
# TODO: Give information to the user when the text channel does not exist
if not channel:
await ctx.channel.send(translate("membercount_channel_nonexistant", await culture(ctx)))
raise Exception("Invalid text channel provided")
if isinstance(channel, discord.VoiceChannel):
await ctx.channel.send(translate("channel_is_voice", await culture(ctx)))
return
add_response = await guild_data.add_youtube_channel(
youtube_channel_id, channel, latest_video["video_id"]
)
msg = ""
if add_response:
msg = translate("youtube_added", await culture(ctx)).format(
youtube_channel_id, channel
)
else:
msg = translate("youtube_exists", await culture(ctx)).format(
youtube_channel_id
)
info(msg)
await ctx.send(msg)
@commands.command(
name="remove_youtube", usage="remove_youtube_usage", help="remove_youtube_help",
)
async def remove_youtube_channel(
self, ctx: commands.Context, youtube_channel_id: str):
"""
Remove a Youtube channel that was being notified
:param ctx: The current context. (discord.ext.commands.Context)
:param youtube_channel_id: The Youtube channel to be notified of (str)
"""
guild_data = await get_guild_data(ctx.message.guild.id)
# Error if not admin
if not guild_data.user_is_admin(ctx.author):
gif = translate("not_admin_gif", await culture(ctx))
return await ctx.send(gif)
remove_response = await guild_data.remove_youtube_channel(youtube_channel_id)
msg = ""
if remove_response:
msg = translate("youtube_removed", await culture(ctx)).format(
youtube_channel_id
)
else:
msg = translate("youtube_no_exists", await culture(ctx)).format(
youtube_channel_id
)
info(msg)
await ctx.send(msg)
@commands.command(
name="list_youtube", help="list_youtube_help",
)
async def list_youtube_channels(self, ctx: commands.Context):
"""
List all Youtube channels that are being monitored
"""
guild_data = await get_guild_data(ctx.message.guild.id)
msg = translate("youtube_list_title", await culture(ctx))
for channel_id, channel_data in guild_data.youtube_channels.items():
msg = (
msg
+ f"\n - Channel `{channel_id}` posts in <#{channel_data['text_channel_id']}>, last video ID: `{channel_data['latest_video_id']}`"
)
await ctx.send(msg)
def setup(bot: commands.bot):
bot.add_cog(Youtube(bot))
|
"""
Copyright 2016 Platform9 Systems Inc.(http://www.platform9.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .nova_lease_handler import NovaLeaseHandler
from .fake_lease_handler import FakeLeaseHandler
def get_lease_handler(conf):
if conf.get("DEFAULT", "lease_handler") == "test":
return FakeLeaseHandler(conf)
else:
return NovaLeaseHandler(conf)
|
#
# Copyright (C) Open End AB 2007-2009, All rights reserved
# See LICENSE.txt
#
import os
import mimetypes
from BaseHTTPServer import BaseHTTPRequestHandler
from wsgiref import headers
from wsgiref.util import shift_path_info
def status_string(code):
return "%d %s" % (code, BaseHTTPRequestHandler.responses[code][0])
class Serve(object):
def __init__(self, servefunc=None):
if servefunc:
self.serve = servefunc
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'POST':
length = environ.get('CONTENT_LENGTH')
if length is None:
data = ''
else:
data = environ['wsgi.input'].read(int(length))
else:
data = None
resp = self.serve(environ, data)
if type(resp) is not tuple:
resp = (resp,)
return self.respond(start_response, *resp)
def respond(self, start_response, data, mimetype='text/plain', cache=True):
if type(data) is int:
status = status_string(data)
start_response(status, [])
return [status + '\n']
respHeaders = headers.Headers([])
if type(mimetype) is tuple:
mimetype, charset = mimetype
respHeaders.add_header('content-type', mimetype,
charset=charset)
else:
respHeaders.add_header('content-type', mimetype)
if not cache:
respHeaders.add_header('cache-control', 'no-cache')
start_response(status_string(200), respHeaders.items())
return [data]
def serve(self, path, data):
raise NontImplementedError
class Dispatch(object):
def __init__(self, appmap):
self.appmap = sorted(appmap.items(), reverse=True)
def notFound(self, start_response):
status = status_string(404)
start_response(status, [])
return [status + '\n']
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
for prefix, app in self.appmap:
if path.startswith(prefix):
slashes = prefix.count('/')
if prefix[-1] != '/':
if path != prefix:
break
slashes += 1
for i in range(slashes - 1):
shift_path_info(environ)
return app(environ, start_response)
# no match
return self.notFound(start_response)
class ServeFiles(Serve):
def __init__(self, root, cache=True):
self.root = root
self.cache = cache
def find(self, path):
p = os.path.join(self.root, path)
p = os.path.abspath(p)
if not p.startswith(os.path.abspath(self.root)):
return None
if not os.path.isfile(p):
return None
if not os.access(p, os.R_OK):
return None
return p
def serve(self, env, data):
if data is not None:
return 405
path = env['PATH_INFO']
if not path:
return 404
if path[0] != '/':
return 404
path = path[1:]
if (not path or '..' in path or path[0] == '/' or
path[-1] == '/'):
return 404
p = self.find(path)
if p is None:
return 404
f = open(p, 'rb')
try:
data = f.read()
finally:
f.close()
mimetype, _ = mimetypes.guess_type(p, True)
return data, mimetype, self.cache
|
import os
import mg_server # register components
from mg_server.morphable_graph_state_machine import DEFAULT_CONFIG
SERVER_TYPE_MAP = dict()
SERVER_TYPE_MAP["tcp"] = "animation_websocket_server"
SERVER_TYPE_MAP["websocket"] = "animation_server"
DATA_DIR = r".\data"
IN_CONFIG = dict()
def setup_scene(app, model_path, port=8888, in_config=IN_CONFIG, visualize=False):
config = DEFAULT_CONFIG
config.update(in_config)
if "n_tree_search_candidates" in config:
config["algorithm"]["n_cluster_search_candidates"] = config["n_tree_search_candidates"]
use_all_joints = False
if "use_all_joints" in config:
use_all_joints = config["use_all_joints"]
server_type = SERVER_TYPE_MAP["tcp"]
if "connection_type" in config:
connection_type = config["connection_type"]
if connection_type in SERVER_TYPE_MAP:
server_type = SERVER_TYPE_MAP[connection_type]
if os.path.isfile(model_path):
o = app.scene.object_builder.create_object_from_file("mg.zip", model_path, use_all_joints=use_all_joints, config=config)
else:
o = app.scene.object_builder.create_object("mg_from_db",model_path, use_all_joints=use_all_joints, config=config)
if o is None or "morphablegraph_state_machine" not in o._components:
print("Error: Could not load model", model_path)
return
c = o._components["morphablegraph_state_machine"]
c.show_skeleton = visualize
c.activate_emit = False
server = app.scene.object_builder.create_component(server_type, o, "morphablegraph_state_machine", port)
o.name = "morphablegraph_state_machine"+str(port)
if "search_for_message_header" in config:
server.search_message_header = config["search_for_message_header"]
c.update_transformation()
o._components["animation_server"].start()
if "scene_interface_port" in config:
o = app.scene.object_builder.create_object("scene_interface", config["scene_interface_port"])
o._components["scene_interface"].start()
return c
|
#! /usr/bin/env python3
import serial
import sys
ser = serial.Serial('/dev/ttyACM0', 9600)
while 1 :
tmp = ser.readline()
print tmp,
sys.stdout.flush()
|
sample_replay_filepath = './Game_20210408T225110.slp'
sample_replay_video = './Game_20210408T225110.avi'
def test_record_replay():
pass
def test_generate_commfile():
pass
def test_video_trim():
pass
def test_video_num_frames():
pass
|
from django.shortcuts import render
# Create your views here.
def index(req):
return render(req, 'landing/index.html') |
"""
Faça um Programa que leia um vetor de 10 caracteres, e diga quantas consoantes foram lidas. Imprima as consoantes.
"""
vetor = list()
vogais = ['A', 'E', 'I', 'O', 'U']
consoantes = list()
for i in range(0,10):
vetor.append(input('Letra: ').upper())
if vetor[i] not in vogais:
pass
else:
consoantes.append(vetor[i])
for i in consoantes:
print(i, end = ' ')
|
"""
Class for retrieving data from several datasets
"""
import importlib
import os.path
from .metadatareader.xray_image_metadata import XRayImageMetadata
class DatasetBaseInterface: #pylint: disable=too-few-public-methods
"""
Interface for Dataset Reader Classes
"""
dataset = None
def get_data(self):
"""
Get images and metadata from a dataset
"""
raise NotImplementedError
class DatasetBase(DatasetBaseInterface): #pylint: disable=too-few-public-methods
"""
Store the path where the files are saved so that the data can be read.
"""
dataset = None
metadata_folder = None
images_folder = None
def __init__(self, **kwargs):
self.path = kwargs.get("path")
self.metadata_path = os.path.join(self.path,
self.metadata_folder)
self.images_path = os.path.join(self.path,
self.images_folder)
def get_data(self):
metadata_reader = importlib.import_module(
f"xrayreader.metadatareader.{self.dataset}").Reader
images_reader = importlib.import_module(
f"xrayreader.images.{self.dataset}").Reader
data = {"data": {}}
data['data']['dataset'] = self.dataset
if self.images_folder:
data['data']['images'] = images_reader(path=self.images_path).get_images()
if self.metadata_folder:
data['data']['metadata'] = metadata_reader(path=self.metadata_path).parse_files()
return data
class ChinaDataset(DatasetBase):#pylint: disable=too-few-public-methods
"""
Get metadata and images from China.
"""
dataset = 'china'
metadata_folder = 'ClinicalReadings'
images_folder = 'CXR_png'
class MontgomeryDataset(DatasetBase): #pylint: disable=too-few-public-methods
"""
Get metadata and images from Montgomery.
"""
dataset = 'montgomery'
metadata_folder = 'ClinicalReadings'
images_folder = 'CXR_png'
class IndiaDataset(DatasetBaseInterface):
"""
Get information from India.
"""
dataset = 'india'
def __init__(self, **kwargs):
self.path = kwargs.get("path")
self.images_folder = kwargs.get("folder", "DatasetA")
@staticmethod
def get_metadata(imagename, filename):
"""
Return the name of the file and indicate
whether the patient from India has tb.
:param: imagename
:type: string
:return: list with the filenames and `True` if the patient has tb, `False` otherwise
:rtype: list
"""
return (imagename, XRayImageMetadata(
filename=filename,
check_normality=imagename[0] == 'p'
))
def get_image_reader_module(self):
"""
Returns the path where the images are saved
and the dataset where this images are from.
"""
images_reader = importlib.import_module(
f"xrayreader.images.{self.dataset}").Reader
return images_reader(path=self.path,
dataset=self.images_folder)
def get_data(self):
data = {"data": {}}
data['data']['dataset'] = self.dataset
if self.images_folder:
images = self.get_image_reader_module().get_images()
data['data']['images'] = images
data['data']['metadata'] = dict([
self.get_metadata(imagename, img.filename)
for imagename, img in images.items()
])
return data
class Dataset: #pylint: disable=too-few-public-methods
"""
Return the data from a specific dataset,
India, China, or Montgomery.
"""
_datasets = {
"india": IndiaDataset,
"montgomery": MontgomeryDataset,
"china": ChinaDataset
}
def __init__(self, name, path):
self.name = name
self.path = path
def _get_dataset(self):
"""
Return the name and the path of the dataset.
:return: name of the dataset and it's path
:rtype: string
"""
if self.name not in self._datasets:
raise ValueError("Dataset not found")
return self._datasets.get(self.name)(path=self.path)
def get_data(self):
"""
Return the data from the dataset.
:return: list with the metadata of the dataset
:rtype: list
"""
dataset = self._get_dataset()
return dataset.get_data()
|
# Copyright 2017 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mdts.lib.bindings import BindingManager
from mdts.lib.vtm_neutron import NeutronTopologyManager
from mdts.utils.utils import bindings
import time
# +------------------+
# | network X |
# | |
# | |
# +-+--------------+-+
# | |
# |router |router
# |interface |interface
# | AX_UP_IP | BX_UP_IP
# | |
# +---------------+ +---------+-+ +-+---------+ +---------------+
# | BGP speaker A | | router A | | router B | | BGP speaker B |
# | AS 64512 +--+ | | +--+ AS 64513 |
# | | | | | | | |
# | | | | | | | |
# +------+--------+ +----+------+ +----+------+ +---------+-----+
# | | | |
# +---+-------+ |router |router +--+--------+
# | BGP peer | |interface |interface | BGP peer |
# | BX_UP_IP | | A_PRIV_UP_IP | B_PRIV_UP_IP | AX_UP_IP |
# | AS 64513 | | | | AS 64512 |
# | | +----+------+ +----+------+ | |
# +-----------+ | network A | | network B | +-----------+
# | | | |
# | | | |
# | | | |
# +----+------+ +----+------+
# | |
# | |
# PLEFT PRIGHT
# A_VM_IP B_VM_IP
PLEFT = 'port_left'
PRIGHT = 'port_right'
A_PRIV_UP_IP = "10.0.0.1"
B_PRIV_UP_IP = "20.0.0.1"
AX_UP_IP = "30.0.0.1"
BX_UP_IP = "30.0.0.2"
A_VM_IP = "10.0.0.2"
B_VM_IP = "20.0.0.2"
class BgpIpTopologyManager(NeutronTopologyManager):
def build(self, binding_data=None):
a_as = 64512
b_as = 64513
a_net = self.create_network("A")
self.create_subnet("A", a_net, '10.0.0.0/24')
a_router = self.create_router("A")
a_iface = self.create_port(
"A_IFACE", a_net, fixed_ips=[A_PRIV_UP_IP],
port_security_enabled=False)
self.add_router_interface(a_router, port=a_iface)
a_speaker = self.create_bgp_speaker(
"A", a_as, a_router['id'])
b_net = self.create_network("B")
self.create_subnet("B", b_net, '20.0.0.0/24')
b_router = self.create_router("B")
b_iface = self.create_port(
"B_IFACE", b_net, fixed_ips=[B_PRIV_UP_IP],
port_security_enabled=False)
self.add_router_interface(b_router, port=b_iface)
b_speaker = self.create_bgp_speaker(
"B", b_as, b_router['id'])
x_net = self.create_network("X")
self.create_subnet("X", x_net, '30.0.0.0/24')
ax_iface = self.create_port(
"AX", x_net, fixed_ips=[AX_UP_IP], port_security_enabled=False)
self.add_router_interface(a_router, port=ax_iface)
bx_iface = self.create_port(
"BX", x_net, fixed_ips=[BX_UP_IP], port_security_enabled=False)
self.add_router_interface(b_router, port=bx_iface)
self.create_port(PLEFT, a_net, port_security_enabled=False,
fixed_ips=[A_VM_IP])
self.create_port(PRIGHT, b_net, port_security_enabled=False,
fixed_ips=[B_VM_IP])
a_to_b_peer = self.create_bgp_peer("AtoB", BX_UP_IP, b_as)
b_to_a_peer = self.create_bgp_peer("BtoA", AX_UP_IP, a_as)
self.add_bgp_speaker_peer(a_speaker['id'], a_to_b_peer['id'])
self.add_bgp_speaker_peer(b_speaker['id'], b_to_a_peer['id'])
binding_onehost_intra_tenant_mm1 = {
'description': 'on single MM (intra tenant)',
'bindings': [
{'vport': PLEFT,
'interface': {
'definition': {'ipv4_gw': A_PRIV_UP_IP},
'hostname': 'midolman1',
'type': 'vmguest'
}},
{'vport': PRIGHT,
'interface': {
'definition': {'ipv4_gw': B_PRIV_UP_IP},
'hostname': 'midolman1',
'type': 'vmguest'
}},
],
'config': {
'tenants': ('tenant_left', 'tenant_left', 'tenant_left')
}
}
binding_onehost_intra_tenant_mm2 = {
'description': 'on single MM (intra tenant)',
'bindings': [
{'vport': PLEFT,
'interface': {
'definition': {'ipv4_gw': A_PRIV_UP_IP},
'hostname': 'midolman2',
'type': 'vmguest'
}},
{'vport': PRIGHT,
'interface': {
'definition': {'ipv4_gw': B_PRIV_UP_IP},
'hostname': 'midolman2',
'type': 'vmguest'
}},
],
'config': {
'tenants': ('tenant_left', 'tenant_left', 'tenant_left')
}
}
VTM = BgpIpTopologyManager()
BM = BindingManager(None, VTM)
@bindings(binding_onehost_intra_tenant_mm1, binding_onehost_intra_tenant_mm2, binding_manager=BM)
def test_bgp_ip_basic():
# We need time for the bgpd instances to peer up.
time.sleep(60)
vmport_left = BM.get_interface_on_vport(PLEFT)
cmd = 'ping -c 10 -i 0.5 %s' % B_VM_IP
(result, exec_id) = vmport_left.do_execute(cmd, stream=True)
retcode = vmport_left.compute_host.check_exit_status(
exec_id, result, timeout=10)
assert(retcode == 0)
vmport_right = BM.get_interface_on_vport(PRIGHT)
cmd = 'ping -c 10 -i 0.5 %s' % A_VM_IP
(result, exec_id) = vmport_right.do_execute(cmd, stream=True)
retcode = vmport_right.compute_host.check_exit_status(
exec_id, result, timeout=10)
assert(retcode == 0)
|
#!/usr/bin/env python
"""
Setup file.
"""
from setuptools import setup, find_packages
setup(
name='wx',
version='0.1',
description='',
long_description=open('README.md').read(),
author='Kyle Marek-Spartz',
author_email='kyle.marek.spartz@gmail.com',
py_modules=['wx'],
url='',
include_package_data=True,
packages=find_packages(exclude=['tests*']),
install_requires=['flask-peewee'],
test_suite='nose.collector',
classifiers=["Private :: Do Not Upload"], # TODO
license='' # TODO
)
|
import numpy as np
from utils.transforms.transform_base import Transform
class Mixup(Transform):
def __init__(self, mix_labels=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mix_labels = mix_labels
def forward(self, array_list: list[np.ndarray], mix_array_list: list[np.ndarray],
*args, **kwargs):
return [self.mix(array, mix_array) if self.mix_labels else array for
array, mix_array in zip(array_list, mix_array_list)]
backward = forward
# assumes arrays are one-hot
@staticmethod
def mix(array, mix_array, alpha=1.):
_lambda = np.random.beta(alpha, alpha) if alpha > 0 else 1
output_array = _lambda * array + (1 - _lambda) * mix_array
return output_array
|
from micropython import const
PACKETCONFIG1 = const(0x37) |
import os
import shutil
import tempfile
from asgiref.sync import sync_to_async
from channels.routing import URLRouter
from channels.testing import WebsocketCommunicator
from django.contrib.auth.models import Permission, User
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django_htmx.middleware import HtmxMiddleware
from boards.models import IMAGE_TYPE, Board, BoardPreferences, Image, Post, Topic
from boards.routing import websocket_urlpatterns
from boards.views import BoardView
def dummy_request(request):
return HttpResponse("Hello!")
class IndexViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
Board.objects.create(title="Test Board", description="Test Board Description", owner=test_user1)
def test_anonymous_permissions(self):
response = self.client.get(reverse("boards:index"))
self.assertEqual(response.status_code, 200)
def test_board_search_success(self):
board = Board.objects.get(title="Test Board")
response = self.client.post(reverse("boards:index"), {"board_slug": board.slug})
self.assertEqual(response.status_code, 302)
def test_board_search_invalid(self):
response = self.client.post(reverse("boards:index"), {"board_slug": "invalid"})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "board_slug", "ID format needs to be ######.")
def test_board_search_not_found(self):
board = Board.objects.get(title="Test Board")
bad_slug = "000000" if board.slug != "000000" else "111111"
response = self.client.post(reverse("boards:index"), {"board_slug": bad_slug})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "board_slug", "Board does not exist.")
def test_board_search_no_slug(self):
response = self.client.post(reverse("boards:index"))
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "board_slug", "This field is required.")
class IndexAllBoardsViewTest(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="1X<ISRUkw+tuK", is_staff=True)
def test_anonymous_all_boards(self):
response = self.client.get(reverse("boards:index-all"))
self.assertEqual(response.status_code, 302)
def test_board_non_staff_all_boards(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:index-all"))
self.assertEqual(response.status_code, 403)
def test_board_staff_all_boards(self):
self.client.login(username="testuser2", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:index-all"))
self.assertEqual(response.status_code, 200)
class BoardViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
Board.objects.create(title="Test Board", description="Test Description", owner=test_user1, slug="000001")
def setUp(self):
self.factory = RequestFactory()
self.htmx_middleware = HtmxMiddleware(dummy_request)
super().setUp()
def test_anonymous_permissions(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_htmx_requests(self):
board = Board.objects.get(title="Test Board")
user = User.objects.get(username="testuser1")
kwargs = {"slug": board.slug}
# request with no current_url
request = self.factory.get(reverse("boards:board", kwargs=kwargs), HTTP_HX_REQUEST="true")
session_middleware = SessionMiddleware(request)
session_middleware.process_request(request)
request.session.save()
request.user = user
self.htmx_middleware(request)
response = BoardView.as_view()(request, **kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "boards/board_index.html")
# request from index
request = self.factory.get(
reverse("boards:board", kwargs=kwargs),
HTTP_HX_REQUEST="true",
HTTP_HX_CURRENT_URL=reverse("boards:index"),
)
session_middleware = SessionMiddleware(request)
session_middleware.process_request(request)
request.session.save()
request.user = user
self.htmx_middleware(request)
response = BoardView.as_view()(request, **kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "boards/board_index.html")
# request from index-all
request = self.factory.get(
reverse("boards:board", kwargs=kwargs),
HTTP_HX_REQUEST="true",
HTTP_HX_CURRENT_URL=reverse("boards:index-all"),
)
session_middleware = SessionMiddleware(request)
session_middleware.process_request(request)
request.session.save()
request.user = user
self.htmx_middleware(request)
response = BoardView.as_view()(request, **kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "boards/board_index.html")
# request from board URL
request = self.factory.get(
reverse("boards:board", kwargs=kwargs),
HTTP_HX_REQUEST="true",
HTTP_HX_CURRENT_URL=reverse("boards:board", kwargs=kwargs),
)
session_middleware = SessionMiddleware(request)
session_middleware.process_request(request)
request.session.save()
request.user = user
self.htmx_middleware(request)
response = BoardView.as_view()(request, **kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "boards/components/board.html")
# request from another board URL
request = self.factory.get(
reverse("boards:board", kwargs=kwargs),
HTTP_HX_REQUEST="true",
HTTP_HX_CURRENT_URL=reverse("boards:board", kwargs={"slug": "000000"}),
)
session_middleware = SessionMiddleware(request)
session_middleware.process_request(request)
request.session.save()
request.user = user
self.htmx_middleware(request)
response = BoardView.as_view()(request, **kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "boards/board_index.html")
class BoardPreferencesViewTest(TestCase):
board_preferences_changed_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
cls.board_preferences_changed_url = reverse("boards:board-preferences", kwargs={"slug": board.slug})
def test_board_preferences_anonymous_permissions(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-preferences", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f"/accounts/login/?next=/boards/{board.slug}/preferences/")
def test_board_references_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-preferences", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 403)
def test_board_preferences_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-preferences", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_board_preferences_nonexistent_preferences(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
board.preferences.delete()
self.assertRaises(BoardPreferences.DoesNotExist, BoardPreferences.objects.get, board=board)
response = self.client.get(reverse("boards:board-preferences", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
preferences = BoardPreferences.objects.get(board=board)
self.assertEqual(preferences.board, board)
async def test_preferences_changed_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
response = await sync_to_async(self.client.post)(
self.board_preferences_changed_url,
data={
"background_type": "c",
"background_color": "#ffffff",
"background_image": "",
"background_opacity": "0.5",
"require_approval": True,
"enable_latex": True,
"reaction_type": "v",
},
)
message = await communicator.receive_from()
self.assertIn("board_preferences_changed", message)
self.assertEqual(response.status_code, 204)
class CreateBoardViewTest(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
def test_anonymous_permissions(self):
response = self.client.get(reverse("boards:board-create"))
self.assertEqual(response.status_code, 302)
def test_user_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:board-create"))
self.assertEqual(str(response.context["user"]), "testuser1")
self.assertEqual(response.status_code, 200)
def test_board_create_success(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.post(
reverse("boards:board-create"), {"title": "Test Board", "description": "Test Board Description"}
)
self.assertEqual(response.status_code, 200)
board = Board.objects.get(title="Test Board")
self.assertEqual(board.description, "Test Board Description")
def test_board_create_blank(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.post(reverse("boards:board-create"), {"title": "", "description": ""})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "title", "This field is required.")
self.assertFormError(response, "form", "description", "This field is required.")
def test_board_create_invalid(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.post(reverse("boards:board-create"), {"title": "x" * 51, "description": "x" * 101})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "title", "Ensure this value has at most 50 characters (it has 51).")
self.assertFormError(
response, "form", "description", "Ensure this value has at most 100 characters (it has 101)."
)
class UpdateBoardViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
def test_anonymous_permissions(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-update", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-update", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 403)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-update", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_staff_permissions(self):
User.objects.create_user(username="staff", password="83jKJ+!fdjP", is_staff=True)
self.client.login(username="staff", password="83jKJ+!fdjP")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-update", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_board_update_success(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:board-update", kwargs={"slug": board.slug}),
{"title": "Test Board NEW", "description": "Test Board Description NEW"},
)
self.assertEqual(response.status_code, 200)
board = Board.objects.get(id=board.id)
self.assertEqual(board.title, "Test Board NEW")
self.assertEqual(board.description, "Test Board Description NEW")
def test_board_update_blank(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:board-update", kwargs={"slug": board.slug}),
{"title": "", "description": ""},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "title", "This field is required.")
self.assertFormError(response, "form", "description", "This field is required.")
def test_board_update_invalid(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:board-update", kwargs={"slug": board.slug}),
{"title": "x" * 51, "description": "x" * 101},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "title", "Ensure this value has at most 50 characters (it has 51).")
self.assertFormError(
response, "form", "description", "Ensure this value has at most 100 characters (it has 101)."
)
class DeleteBoardViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
def test_anonymous_permissions(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 403)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Board.objects.all()), 0)
def test_staff_permissions(self):
User.objects.create_user(username="staff", password="83jKJ+!fdjP", is_staff=True)
self.client.login(username="staff", password="83jKJ+!fdjP")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("boards:board-delete", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Board.objects.all()), 0)
class TopicCreateViewTest(TestCase):
topic_created_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
cls.topic_created_url = reverse("boards:topic-create", kwargs={"slug": board.slug})
def test_anonymous_permissions(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:topic-create", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:topic-create", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 403)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:topic-create", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_staff_permissions(self):
User.objects.create_user(username="staff", password="83jKJ+!fdjP", is_staff=True)
self.client.login(username="staff", password="83jKJ+!fdjP")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:topic-create", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
def test_topic_create_success(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:topic-create", kwargs={"slug": board.slug}),
data={"subject": "Test Topic"},
)
self.assertEqual(response.status_code, 204)
self.assertIsNotNone(Topic.objects.get(subject="Test Topic"))
def test_topic_create_blank(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:topic-create", kwargs={"slug": board.slug}),
data={"subject": ""},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "subject", "This field is required.")
def test_topic_create_invalid(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.post(
reverse("boards:topic-create", kwargs={"slug": board.slug}),
data={"subject": "x" * 100},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "subject", "Ensure this value has at most 50 characters (it has 100).")
async def test_topic_created_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
await sync_to_async(self.client.post)(self.topic_created_url, data={"subject": "Test Topic"})
topic = await sync_to_async(Topic.objects.get)(subject="Test Topic")
message = await communicator.receive_from()
self.assertIn("topic_created", message)
self.assertIn(f'"topic_pk": {topic.id}', message)
class TopicUpdateViewTest(TestCase):
topic_updated_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
topic = Topic.objects.create(subject="Test Topic", board=board)
cls.topic_updated_url = reverse("boards:topic-update", kwargs={"slug": board.slug, "pk": topic.pk})
def test_anonymous_permissions(self):
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 302)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 403)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 200)
def test_staff_permissions(self):
User.objects.create_user(username="staff", password="83jKJ+!fdjP", is_staff=True)
self.client.login(username="staff", password="83jKJ+!fdjP")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 200)
def test_topic_update_success(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.post(
reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}),
data={"subject": "Test Topic NEW"},
)
self.assertEqual(response.status_code, 204)
self.assertIsNotNone(Topic.objects.get(subject="Test Topic NEW"))
def test_topic_update_blank(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.post(
reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}),
data={"subject": ""},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "subject", "This field is required.")
def test_topic_update_invalid(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.post(
reverse("boards:topic-update", kwargs={"slug": topic.board.slug, "pk": topic.id}),
data={"subject": "x" * 100},
)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "subject", "Ensure this value has at most 50 characters (it has 100).")
async def test_topic_updated_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
topic = await sync_to_async(Topic.objects.get)(subject="Test Topic")
await sync_to_async(self.client.post)(self.topic_updated_url, data={"subject": "Test Topic NEW"})
message = await communicator.receive_from()
self.assertIn("topic_updated", message)
self.assertIn(f'"topic_pk": {topic.id}', message)
class TopicDeleteViewTest(TestCase):
topic_deleted_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
topic = Topic.objects.create(subject="Test Topic", board=board)
cls.topic_deleted_url = reverse("boards:topic-delete", kwargs={"slug": board.slug, "pk": topic.id})
def test_anonymous_permissions(self):
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 302)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 403)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertRaises(Topic.DoesNotExist, Topic.objects.get, id=topic.id)
self.assertEqual(response.status_code, 204)
def test_staff_permissions(self):
User.objects.create_user(username="staff", password="83jKJ+!fdjP", is_staff=True)
self.client.login(username="staff", password="83jKJ+!fdjP")
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("boards:topic-delete", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertRaises(Topic.DoesNotExist, Topic.objects.get, id=topic.id)
self.assertEqual(response.status_code, 204)
async def test_topic_deleted_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
topic = await sync_to_async(Topic.objects.get)(subject="Test Topic")
await sync_to_async(self.client.post)(self.topic_deleted_url)
message = await communicator.receive_from()
self.assertIn("topic_deleted", message)
self.assertIn(f'"topic_pk": {topic.id}', message)
class PostCreateViewTest(TestCase):
post_create_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.save()
topic = Topic.objects.create(subject="Test Topic", board=board)
cls.post_create_url = reverse("boards:post-create", kwargs={"slug": board.slug, "topic_pk": topic.id})
def test_anonymous_permissions(self):
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(
reverse("boards:post-create", kwargs={"slug": topic.board.slug, "topic_pk": topic.id})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("boards:post-create", kwargs={"slug": topic.board.slug, "topic_pk": topic.id}),
data={"content": "Test Message anon"},
)
topic = Topic.objects.prefetch_related("posts").get(subject="Test Topic")
self.assertEqual(response.status_code, 204)
self.assertEqual(topic.posts.first().content, "Test Message anon")
def test_post_session_key(self):
topic = Topic.objects.get(subject="Test Topic")
self.client.post(
reverse("boards:post-create", kwargs={"slug": topic.board.slug, "topic_pk": topic.id}),
data={"content": "Test Post"},
)
post = Post.objects.get(content="Test Post")
self.assertEqual(self.client.session.session_key, post.session_key)
def test_post_approval(self):
board = Board.objects.get(title="Test Board")
board.preferences.require_approval = True
board.preferences.save()
topic = Topic.objects.get(subject="Test Topic")
self.client.post(
reverse("boards:post-create", kwargs={"slug": board.slug, "topic_pk": topic.pk}),
data={"content": "Test Post"},
)
post = Post.objects.get(content="Test Post")
self.assertEqual(post.approved, False)
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
self.client.post(
reverse("boards:post-create", kwargs={"slug": board.slug, "topic_pk": topic.pk}),
data={"content": "Test Post user1"},
)
post = Post.objects.get(content="Test Post user1")
self.assertEqual(post.approved, True) # Board owner can post without approval
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
self.client.post(
reverse("boards:post-create", kwargs={"slug": board.slug, "topic_pk": topic.pk}),
data={"content": "Test Post user2"},
)
post = Post.objects.get(content="Test Post user2")
self.assertEqual(post.approved, False) # Normal user needs approval
self.client.login(username="testuser3", password="3y6d0A8sB?5")
self.client.post(
reverse("boards:post-create", kwargs={"slug": board.slug, "topic_pk": topic.pk}),
data={"content": "Test Post user3"},
)
post = Post.objects.get(content="Test Post user3")
self.assertEqual(post.approved, True) # Moderator can post without approval
async def test_post_created_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
await sync_to_async(self.client.post)(self.post_create_url, data={"content": "Test Post"})
post = await sync_to_async(Post.objects.get)(content="Test Post")
self.assertIsNotNone(post)
topic = await sync_to_async(Topic.objects.get)(subject="Test Topic")
message = await communicator.receive_from()
self.assertIn("post_created", message)
self.assertIn(f'"topic_pk": {topic.id}', message)
class PostUpdateViewTest(TestCase):
post_updated_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.save()
topic = Topic.objects.create(subject="Test Topic", board=board)
post = Post.objects.create(
content="Test Post",
topic=topic,
)
cls.post_updated_url = reverse("boards:post-update", kwargs={"slug": board.slug, "pk": post.id})
def test_anonymous_permissions(self):
topic = Topic.objects.get(subject="Test Topic")
response = self.client.post(
reverse("boards:post-create", kwargs={"slug": "test-board", "topic_pk": topic.pk}),
data={"content": "Test Post anon"},
)
post = Post.objects.get(content="Test Post anon")
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session.session_key, post.session_key)
response = self.client.post(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id}),
data={"content": "Test Post anon NEW"},
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Post.objects.get(id=post.id).content, "Test Post anon NEW")
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 403)
def test_board_moderator_permissions(self):
self.client.login(username="testuser3", password="3y6d0A8sB?5")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id}),
data={"content": "Test Post NEW"},
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Post.objects.get(id=post.id).content, "Test Post NEW")
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("boards:post-update", kwargs={"slug": post.topic.board.slug, "pk": post.id}),
data={"content": "Test Post NEW"},
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Post.objects.get(id=post.id).content, "Test Post NEW")
async def test_post_updated_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
post = await sync_to_async(Post.objects.get)(content="Test Post")
await sync_to_async(self.client.post)(self.post_updated_url, data={"content": "Test Post NEW"})
message = await communicator.receive_from()
self.assertIn("post_updated", message)
self.assertIn(f'"post_pk": {post.id}', message)
class PostDeleteViewTest(TestCase):
post_deleted_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.save()
topic = Topic.objects.create(subject="Test Topic", board=board)
post = Post.objects.create(content="Test Post", topic=topic)
cls.post_deleted_url = reverse("boards:post-delete", kwargs={"slug": board.slug, "pk": post.id})
def test_anonymous_permissions(self):
post = Post.objects.get(content="Test Post")
self.client.post(reverse("boards:post-delete", kwargs={"slug": "test-board", "pk": post.pk}))
self.assertEqual(Post.objects.count(), 1)
self.client.post(
reverse("boards:post-create", kwargs={"slug": "test-board", "topic_pk": post.topic.pk}),
data={"content": "Test Post anon"},
)
post2 = Post.objects.get(content="Test Post anon")
self.assertEqual(Post.objects.count(), 2)
self.client.post(reverse("boards:post-delete", kwargs={"slug": "test-board", "pk": post2.pk}))
self.assertEqual(Post.objects.count(), 1)
def test_other_user_permissions(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
post = Post.objects.get(content="Test Post")
response = self.client.post(
reverse("boards:post-delete", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Post.objects.count(), 1)
def test_board_moderator_permissions(self):
self.client.login(username="testuser3", password="3y6d0A8sB?5")
post = Post.objects.get(content="Test Post")
response = self.client.post(
reverse("boards:post-delete", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Post.objects.count(), 0)
def test_owner_permissions(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
post = Post.objects.get(content="Test Post")
response = self.client.post(
reverse("boards:post-delete", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Post.objects.count(), 0)
async def test_post_deleted_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
post = await sync_to_async(Post.objects.get)(content="Test Post")
await sync_to_async(self.client.post)(self.post_deleted_url)
message = await communicator.receive_from()
self.assertIn("post_deleted", message)
self.assertIn(f'"post_pk": {post.id}', message)
class BoardListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
test_user2 = User.objects.create_superuser(username="testuser2", password="2HJ1vRV0Z&3iD")
for i in range(3):
Board.objects.create(
title=f"Test Board {i} - {test_user1.username}", description="Test Description", owner=test_user1
)
Board.objects.create(
title=f"Test Board {i} - {test_user2.username}", description="Test Description", owner=test_user2
)
def test_anonymous_permissions(self):
response = self.client.get(reverse("boards:board-list"))
self.assertEqual(response.status_code, 302)
def test_user_index(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:board-list"), {}, HTTP_REFERER=reverse("boards:index"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "boards/components/board_list.html")
self.assertEqual(len(response.context["boards"]), 3)
def test_user_no_perm_all_boards(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:board-list"), {}, HTTP_REFERER=reverse("boards:index-all"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "boards/components/board_list.html")
self.assertEqual(len(response.context["boards"]), 3)
def test_user_perm_all_boards(self):
test_user1 = User.objects.get(username="testuser1")
test_user1.user_permissions.add(Permission.objects.get(codename="can_view_all_boards"))
test_user1.save()
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
response = self.client.get(reverse("boards:board-list"), {}, HTTP_REFERER=reverse("boards:index-all"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "boards/components/board_list.html")
self.assertEqual(len(response.context["boards"]), 5)
self.assertEqual(len(response.context["page_obj"].paginator.page_range), 2)
class TopicFetchViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
Topic.objects.create(subject="Test Topic", board=board)
def test_topic_fetch(self):
topic = Topic.objects.get(subject="Test Topic")
response = self.client.get(reverse("boards:topic-fetch", kwargs={"slug": topic.board.slug, "pk": topic.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["topic"], topic)
class PostFetchViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.save()
topic = Topic.objects.create(subject="Test Topic", board=board)
Post.objects.create(content="Test Post", topic=topic, session_key="testing_key", approved=False)
def test_post_fetch(self):
post = Post.objects.get(content="Test Post")
post.approved = True
post.save()
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["post"], post)
self.assertContains(response, "Test Post", html=True)
def test_post_fetch_content_anonymous_not_approved(self):
board = Board.objects.get(title="Test Board")
board.preferences.require_approval = True
board.preferences.save()
self.client.get(reverse("boards:board", kwargs={"slug": board.slug}))
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertNotContains(response, "Test Post", html=True)
def test_post_fetch_content_other_user_not_approved(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
board.preferences.require_approval = True
board.preferences.save()
self.client.get(reverse("boards:board", kwargs={"slug": board.slug}))
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertNotContains(response, "Test Post", html=True)
def test_post_fetch_content_board_moderator_not_approved(self):
self.client.login(username="testuser3", password="3y6d0A8sB?5")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertContains(response, "Test Post", html=True)
def test_post_fetch_content_owner_not_approved(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertContains(response, "Test Post", html=True)
def test_post_fetch_content_can_approve_not_approved(self):
test_user4 = User.objects.create_user(username="testuser4", password="2HJ1vRV0Z&3iD")
test_user4.user_permissions.add(Permission.objects.get(codename="can_approve_posts"))
self.client.login(username="testuser4", password="2HJ1vRV0Z&3iD")
post = Post.objects.get(content="Test Post")
response = self.client.get(
reverse("boards:post-fetch", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertContains(response, "Test Post", html=True)
class PostToggleApprovalViewTest(TestCase):
post_approval_url = ""
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.require_approval = True
board.preferences.save()
topic = Topic.objects.create(subject="Test Topic", board=board)
post = Post.objects.create(content="Test Post", topic=topic, session_key="testing_key", approved=False)
cls.post_approval_url = reverse("boards:post-toggle-approval", kwargs={"slug": board.slug, "pk": post.id})
def test_post_toggle_approval_anonymous(self):
post = Post.objects.get(content="Test Post")
self.assertFalse(post.approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, f"/accounts/login/?next=/boards/{post.topic.board.slug}/posts/{post.id}/approval/"
)
def test_post_toggle_approval_other_user(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
post = Post.objects.get(content="Test Post")
self.assertFalse(post.approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 403)
def test_post_toggle_approval_board_moderator(self):
self.client.login(username="testuser3", password="3y6d0A8sB?5")
post = Post.objects.get(content="Test Post")
self.assertFalse(post.approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertTrue(Post.objects.get(content="Test Post").approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertFalse(Post.objects.get(content="Test Post").approved)
def test_post_toggle_approval_owner(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
post = Post.objects.get(content="Test Post")
self.assertFalse(post.approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertTrue(Post.objects.get(content="Test Post").approved)
response = self.client.post(
reverse("boards:post-toggle-approval", kwargs={"slug": post.topic.board.slug, "pk": post.id})
)
self.assertEqual(response.status_code, 204)
self.assertFalse(Post.objects.get(content="Test Post").approved)
async def test_post_toggle_websocket_message(self):
application = URLRouter(websocket_urlpatterns)
board = await sync_to_async(Board.objects.get)(title="Test Board")
communicator = WebsocketCommunicator(application, f"/ws/boards/{board.slug}/")
connected, _ = await communicator.connect()
self.assertTrue(connected, "Could not connect")
await sync_to_async(self.client.login)(username="testuser1", password="1X<ISRUkw+tuK")
message = await communicator.receive_from()
self.assertIn("session_connected", message)
post = await sync_to_async(Post.objects.get)(content="Test Post")
await sync_to_async(self.client.post)(self.post_approval_url)
message = await communicator.receive_from()
self.assertIn("post_updated", message)
self.assertIn(f'"post_pk": {post.id}', message)
await sync_to_async(self.client.post)(self.post_approval_url)
message = await communicator.receive_from()
self.assertIn("post_updated", message)
self.assertIn(f'"post_pk": {post.id}', message)
MEDIA_ROOT = tempfile.mkdtemp()
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class ImageSelectViewTest(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
module_dir = os.path.dirname(__file__)
image_path = os.path.join(module_dir, "images/white_horizontal.png")
for type, text in IMAGE_TYPE:
for i in range(5):
with open(image_path, "rb") as image_file:
image = Image(
type=type,
image=SimpleUploadedFile(
name=f"{type}-{i}.png",
content=image_file.read(),
content_type="image/png",
),
title=f"{text} {i}",
)
image.save()
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def test_image_select_anonymous(self):
for type, _ in IMAGE_TYPE:
response = self.client.get(reverse("boards:image-select", kwargs={"type": type}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f"/accounts/login/?next=/boards/image_select/{type}/")
def test_image_select_logged_in(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
for type, _ in IMAGE_TYPE:
response = self.client.get(reverse("boards:image-select", kwargs={"type": type}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["images"].count(), Image.objects.filter(type=type).count())
class QrViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username="testuser1", password="1X<ISRUkw+tuK")
User.objects.create_user(username="testuser2", password="2HJ1vRV0Z&3iD")
test_user3 = User.objects.create_user(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.create(title="Test Board", description="Test Description", owner=test_user1)
board.preferences.moderators.add(test_user3)
board.preferences.save()
def test_qr_anonymous(self):
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-qr", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f"/accounts/login/?next=/boards/{board.slug}/qr/")
def test_qr_other_user(self):
self.client.login(username="testuser2", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-qr", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 403)
def test_qr_board_moderator(self):
self.client.login(username="testuser3", password="3y6d0A8sB?5")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-qr", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
self.assertIn("data:image/png;base64", response.content.decode("utf-8"))
def test_qr_owner(self):
self.client.login(username="testuser1", password="1X<ISRUkw+tuK")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-qr", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
self.assertIn("data:image/png;base64", response.content.decode("utf-8"))
def test_qr_staff(self):
User.objects.create_user(username="testuser4", password="2HJ1vRV0Z&3iD", is_staff=True)
self.client.login(username="testuser4", password="2HJ1vRV0Z&3iD")
board = Board.objects.get(title="Test Board")
response = self.client.get(reverse("boards:board-qr", kwargs={"slug": board.slug}))
self.assertEqual(response.status_code, 200)
|
import io
import json
import logging
from vortex.restful.RestfulResource import HTTP_REQUEST
from vortex.restful.RestfulResource import PluginRestfulResource
from twisted.trial import unittest
from twisted.web.test.requesthelper import DummyRequest
from vortex.Tuple import Tuple
from vortex.Tuple import TupleField
from vortex.Tuple import addTupleType
logger = logging.getLogger(__name__)
@addTupleType
class DummyTuple(Tuple):
__tupleType__ = "DummyTuple"
id = TupleField()
text = TupleField()
class TestHandlerController:
def testMethod(self, tuple_: DummyTuple):
if hasattr(tuple_, "tupleToRestfulJsonDict"):
# vortexpy > 2.1.3
return tuple_.tupleToRestfulJsonDict()
else:
return tuple_.tupleToSmallJsonDict()
class RestfulServerTest(unittest.TestCase):
def setUp(self):
handlerController = TestHandlerController()
self.pluginAResource = PluginRestfulResource()
self.pluginAResource.registerMethod(
handlerController.testMethod,
DummyTuple,
b"test",
[HTTP_REQUEST.GET, HTTP_REQUEST.POST],
)
self.pluginAResource.registerMethod(
handlerController.testMethod,
DummyTuple,
b"test/test",
[HTTP_REQUEST.GET, HTTP_REQUEST.POST],
)
self.pluginAResource.registerMethod(
handlerController.testMethod,
DummyTuple,
b"test/test/test",
[HTTP_REQUEST.GET, HTTP_REQUEST.POST],
)
def _dictToBytes(self, dictionary: dict) -> bytes:
return bytes(json.dumps(dictionary), "utf-8")
def _bytesToDict(self, bytes_: bytes) -> dict:
string = bytes_[0].decode("utf-8")
return json.loads(string)
def _check(self, expected: dict, actual: dict) -> bool:
# expected dict should be equal or a subnet of actual dict
return expected.items() <= actual.items()
def testValidJsonRequest(self):
requestDict = {"id": 1, "text": "text"}
request = DummyRequest([])
request.content = io.BytesIO(self._dictToBytes(requestDict))
jsonResource = self.pluginAResource.getChild(b"test", request)
jsonResource.render(request)
self.assertEqual(request.responseCode, 200)
responseDict = self._bytesToDict(request.written)
self.assertTrue(self._check(requestDict, responseDict))
def testInvalidJsonRequest(self):
request = DummyRequest([])
request.content = io.BytesIO(b"##invalid json}")
jsonResource = self.pluginAResource.getChild(b"test", request)
jsonResource.render(request)
self.assertEqual(request.responseCode, 500)
def testPath(self):
requestDict = {"id": 1, "text": "text"}
for path in [b"test", b"test/test", b"test/test/test"]:
request = DummyRequest([])
request.content = io.BytesIO(self._dictToBytes(requestDict))
jsonResource = self.pluginAResource.getChild(path, request)
jsonResource.render(request)
self.assertEqual(request.responseCode, 200)
responseDict = self._bytesToDict(request.written)
self.assertTrue(self._check(requestDict, responseDict))
|
import logging
from fractions import Fraction as F
from functools import cmp_to_key
from typing import Dict, List, Tuple
from .config import Config
from .order import Order
from .order_util import IntegerTraits
logger = logging.getLogger(__name__)
def sorted_orders_by_exec_priority(orders):
"""Sorts orders by decreasing xrate, breaking ties with larger orders first,
breaking ties with order id.
"""
def order_cmp(o1, o2):
if o1.max_xrate != o2.max_xrate:
return o2.max_xrate - o1.max_xrate
if o1.max_sell_amount != o2.max_sell_amount:
return o2.max_sell_amount - o1.max_sell_amount
return -1 if o1.id < o2.id else 1
return sorted(orders, key=cmp_to_key(order_cmp))
def compute_solution_metrics(prices, accounts_updated, orders, fee):
"""Compute objective function values and other metrics."""
# Init objective values.
obj = {'volume': 0,
'utility': 0,
'utility_disreg': 0,
'utility_disreg_touched': 0,
'fees': 0,
'orders_touched': 0}
for order in orders:
if prices[order.buy_token] is None or prices[order.sell_token] is None:
assert order.buy_amount == 0 and order.sell_amount == 0
continue
else:
sell_token_price = prices[order.sell_token]
buy_token_price = prices[order.buy_token]
# Volume (referring to sell amount).
obj['volume'] += order.sell_amount * sell_token_price
xrate = F(buy_token_price, sell_token_price)
# Utility at current prices.
u = IntegerTraits.compute_utility_term(
order=order,
xrate=xrate,
buy_token_price=buy_token_price,
fee=fee
)
# Compute maximum possible utility analogously to the smart contract
# (i.e., depending on the remaining token balance after order execution).
if order.account_id is not None:
balance_updated = accounts_updated[order.account_id].get(order.sell_token, 0)
else:
balance_updated = 0
umax = IntegerTraits.compute_max_utility_term(
order=order,
xrate=xrate,
buy_token_price=buy_token_price,
fee=fee,
balance_updated=balance_updated
)
if u > umax:
logger.warning(
"Computed utility of <%s> larger than maximum utility:", order.index
)
logger.warning("u = %d", u)
logger.warning("umax = %d", umax)
obj['utility'] += u
obj['utility_disreg'] += max(umax - u, 0)
if order.sell_amount > 0:
obj['orders_touched'] += 1
obj['utility_disreg_touched'] += (umax - u)
order.utility = u
order.utility_disreg = (umax - u)
# Fee amount as net difference of fee token sold/bought.
if order.sell_token == fee.token:
obj['fees'] += order.sell_amount
elif order.buy_token == fee.token:
obj['fees'] -= order.buy_amount
return obj
def filter_orders_tokenpair(
orders: List[Order],
token_pair: Tuple[str, str]
) -> List[Dict]:
"""Find all orders on a single given token pair.
Args:
orders: List of orders.
tokenpair: Tuple of two token IDs.
Returns:
The filtered orders.
"""
return [
order for order in orders
if set(token_pair) == {order.sell_token, order.buy_token}
]
def restrict_order_sell_amounts_by_balances(
orders: List[Order],
accounts: Dict[str, Dict[str, int]]
) -> List[Dict]:
"""Restrict order sell amounts to available account balances.
This method also filters out orders that end up with a sell amount of zero.
Args:
orders: List of orders.
accounts: Dict of accounts and their token balances.
Returns:
The capped orders.
"""
orders_capped = []
# Init dict for remaining balance per account and token pair.
remaining_balances = {}
# Iterate over orders sorted by limit price (best -> worse).
for order in sorted_orders_by_exec_priority(orders):
aID, tS, tB = order.account_id, order.sell_token, order.buy_token
# Init remaining balance for new token pair on some account.
if (aID, tS, tB) not in remaining_balances:
sell_token_balance = F(accounts.get(aID, {}).get(tS, 0))
remaining_balances[(aID, tS, tB)] = sell_token_balance
# Get sell amount (capped by available account balance).
sell_amount_old = order.max_sell_amount
sell_amount_new = min(sell_amount_old, remaining_balances[aID, tS, tB])
# Skip orders with zero sell amount.
if sell_amount_new == 0:
continue
else:
assert sell_amount_old > 0
# Update remaining balance.
remaining_balances[aID, tS, tB] -= sell_amount_new
assert remaining_balances[aID, tS, tB] >= 0
order.max_sell_amount = sell_amount_new
# Append capped order.
orders_capped.append(order)
return orders_capped
def count_nr_exec_orders(orders):
return sum(order.buy_amount > 0 for order in orders)
def compute_objective(prices, accounts_updated, orders, fee):
"""Compute objective function value of solution."""
# Init objective values.
total_u = 0
total_umax = 0
for order in orders:
if prices[order.buy_token] is None or prices[order.sell_token] is None:
assert order.buy_amount == 0 and order.sell_amount == 0
continue
else:
sell_token_price = prices[order.sell_token]
buy_token_price = prices[order.buy_token]
xrate = F(buy_token_price, sell_token_price)
# Utility at current prices.
u = IntegerTraits.compute_utility_term(
order=order,
xrate=xrate,
buy_token_price=buy_token_price,
fee=fee
)
# Compute maximum possible utility analogously to the smart contract
# (i.e., depending on the remaining token balance after order execution).
if order.account_id is not None:
balance_updated = accounts_updated[order.account_id].get(order.sell_token, 0)
else:
balance_updated = 0
umax = IntegerTraits.compute_max_utility_term(
order=order,
xrate=xrate,
buy_token_price=buy_token_price,
fee=fee,
balance_updated=balance_updated
)
umax = max(u, umax)
total_u += u
total_umax += umax
return 2 * total_u - total_umax
# Update accounts from order execution.
def update_accounts(accounts, orders):
for order in orders:
account_id = order.account_id
buy_token = order.buy_token
sell_token = order.sell_token
if order.buy_token not in accounts[account_id]:
accounts[account_id][order.buy_token] = 0
accounts[account_id][buy_token] = int(accounts[account_id][buy_token])
accounts[account_id][sell_token] = int(accounts[account_id][sell_token])
accounts[account_id][buy_token] += order.buy_amount
accounts[account_id][sell_token] -= order.sell_amount
def compute_connected_tokens(orders, fee_token):
"""Return the list of tokens connected to the fee_token."""
# Get subsets of tokens bought and sold.
tokens_sold = {o.sell_token for o in orders}
tokens_bought = {o.buy_token for o in orders}
# Create token->[token,...,token] lookup adjacency list,
# considering only tokens that are both sold and bought.
token_adjacency = {
token: set()
for token in tokens_sold.intersection(tokens_bought).union({fee_token})
}
for order in orders:
sell_token, buy_token = order.sell_token, order.buy_token
if all(t in token_adjacency.keys() for t in [sell_token, buy_token]):
token_adjacency[buy_token].add(sell_token)
token_adjacency[sell_token].add(buy_token)
# Breadth-first search: keep adding adjacent tokens until all visited.
# The loop below has at most len(tokens) iterations.
connected_tokens = [fee_token]
cur_token_idx = 0
while len(connected_tokens) > cur_token_idx:
cur_token = connected_tokens[cur_token_idx]
# new_tokens: All tokens directly connected to cur_token that are
# not yet visited but must be visited eventually.
new_tokens = token_adjacency[cur_token] - set(connected_tokens)
connected_tokens += list(new_tokens)
cur_token_idx += 1
# Return the set of visited tokens.
return set(connected_tokens)
def compute_total_fee(orders, prices, fee, arith_traits):
"""Compute total fee in the solution."""
return sum(
o.fee(prices, fee) for o in orders
)
def compute_average_order_fee(orders, prices, fee, arith_traits):
return compute_total_fee(orders, prices, fee, arith_traits) \
/ count_nr_exec_orders(orders)
def is_economic_viable(orders, prices, fee, arith_traits):
# Trivial solution is economically viable.
if count_nr_exec_orders(orders) == 0:
return True
# Shortcut to avoid computing fees.
if Config.MIN_AVERAGE_ORDER_FEE == 0 and Config.MIN_ABSOLUTE_ORDER_FEE == 0:
return True
# Check minimum absolute order fee.
if any(
o.fee(prices, fee) < Config.MIN_ABSOLUTE_ORDER_FEE
for o in orders
if o.sell_token != fee.token and o.buy_amount > 0
):
return False
# Check minimum average order fee.
average_order_fee = compute_average_order_fee(orders, prices, fee, arith_traits)
return average_order_fee >= Config.MIN_AVERAGE_ORDER_FEE
def is_trivial(orders):
return count_nr_exec_orders(orders) == 0
# Note: this is an approximation, there is no guarantee that the returned
# subset is economically viable (or even feasible) at all.
def compute_approx_economic_viable_subset(orders, prices, fee, arith_traits):
# Shortcut.
if Config.MIN_AVERAGE_ORDER_FEE == 0 and Config.MIN_ABSOLUTE_ORDER_FEE == 0:
return orders
# Compute maximal subset of orders that satisfy the minimum economic
# viability constraint (but may fail to satify other constraints)
# 1. Minimum absolute fee per order.
orders = [
o for o in orders
if o.sell_token == fee.token
or o.fee(prices, fee) >= Config.MIN_ABSOLUTE_ORDER_FEE
]
# 2. Minimum average fee per order.
# Remove empty orders.
orders_by_dec_volume = [o for o in orders if o.buy_amount > 0]
# Sort orders by decreasing volume
orders_by_dec_volume = sorted(
orders_by_dec_volume,
key=lambda o: o.volume(prices),
reverse=True
)
i = 1
while i < len(orders_by_dec_volume) and \
compute_average_order_fee(
orders_by_dec_volume[:i], prices, fee, arith_traits
) >= Config.MIN_AVERAGE_ORDER_FEE:
i += 1
orders = orders_by_dec_volume[:i]
# If there are only buy orders or only sell orders in the subset then
# the subset can be further reduced to the trivial solution.
if len({o.buy_token for o in orders}) == 1:
return []
return orders
|
from nr.utils.re import MatchAllError, match_all
import pytest
def test_match_all():
matches = [x.groups() for x in match_all(r'(\d+)([A-Z]+)', '30D2A53BO')]
assert matches == [('30', 'D'), ('2', 'A'), ('53', 'BO')]
matches = [x.group(0) for x in match_all(r'\d{2}', '1234567890')]
assert matches == ['12', '34', '56', '78', '90']
with pytest.raises(MatchAllError) as excinfo:
list(match_all(r'\d{2}', '123456789'))
assert excinfo.value.string == '123456789'
assert excinfo.value.endpos == 8
|
from enum import IntEnum, unique
@unique
class SupportedLanguage(IntEnum):
"""
Language supported by the app.
"""
EN = 1
@staticmethod
def get(ietf_tag: str) -> int:
"""
Return language by IETF language tag.
- "EN" will be returned, if specified
language not supported.
"""
ietf_tag = ietf_tag.lower()
languages = {
"en": SupportedLanguage.EN,
"en-us": SupportedLanguage.EN,
"en-gb": SupportedLanguage.EN
}
return languages.get(ietf_tag, SupportedLanguage.EN)
|
import socket
from threading import *
from _thread import *
import wave, pyaudio, time
HANDSHAKE_MESSAGE = "mysecretfornow"
BUFFER_SIZE = 1024
BROADCASTING_PORT = 11067
TEXT_LISTENING_PORT = 3480
VOICE_LISTENING_PORT = 3481
VOICE_SENDING_PORT = 11067
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
HOST_IP = s.getsockname()[0]
s.close()
# Text Server
text_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
text_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
text_server.bind((HOST_IP, TEXT_LISTENING_PORT))
text_server.listen(100)
list_of_clients = []
client_names = []
received_frames = {}
list_of_client_ips = []
# Voice server
voice_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
voice_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
voice_server.bind((HOST_IP, VOICE_LISTENING_PORT))
def clientthread(conn, addr):
keepAlive = 1
while keepAlive:
try:
message = conn.recv(BUFFER_SIZE)
if message:
if HANDSHAKE_MESSAGE in message.decode():
connected_user = message.decode().split(HANDSHAKE_MESSAGE)[0].strip()
if connected_user != client_names:
client_names.append(connected_user)
userlist = ';'.join(client_names) + HANDSHAKE_MESSAGE
broadcast_message(userlist)
else:
print("<" + addr[0] + ">" + message.decode())
message_to_send = message.decode()
broadcast_message(message_to_send)
else:
remove(conn)
except Exception as e:
conn.close()
remove(conn)
keepAlive = 0
def client_thread_voice(conn, addr):
CHUNK = 50*1024
wf = wave.open("test.wav")
sample_rate = wf.getframerate()
number_of_frames = int(CHUNK/wf.getsampwidth()/wf.getnchannels())
data = wf.readframes(number_of_frames)
while data:
try:
voice_server.sendto(data, (addr[0],11067))
time.sleep(60/wf.getframerate()*number_of_frames/48)
data = wf.readframes(int(CHUNK/wf.getsampwidth()/wf.getnchannels()))
except Exception as e:
print(e)
def broadcast_message(message):
for client in list_of_clients:
try:
client.send(message.encode())
except Exception as e:
print(e)
client.close()
# if the link is broken, we remove the client
remove(client)
def remove(connection):
if connection in list_of_clients:
list_of_clients.remove(connection)
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
def udp_receive():
while True:
soundData, addr = voice_server.recvfrom(CHUNK * 2 * 2) # CHUNK * 2 * channels
if addr[0] in received_frames.keys():
received_frames[addr[0]].append(soundData)
else:
received_frames[addr[0]] = []
udp.close()
def broadcast_voice():
while True:
for sending_client in received_frames.keys():
if len(received_frames[sending_client]) > 0:
frame = received_frames[sending_client].pop(0)
for client in list_of_client_ips:
if True:#client != sending_client:
voice_server.sendto(str("user:" + sending_client).encode(), (client, VOICE_SENDING_PORT))
voice_server.sendto(frame, (client, VOICE_SENDING_PORT))
if __name__ == '__main__':
stream = p.open(format=FORMAT, channels = CHANNELS, rate = RATE, output = True, frames_per_buffer = CHUNK)
Ts = Thread(target = udp_receive, args=())
Tr = Thread(target = broadcast_voice, args=())
Tr.setDaemon(True)
Ts.setDaemon(True)
Ts.start()
Tr.start()
while True:
conn, addr = text_server.accept()
#text_server.listen()
list_of_clients.append(conn)
list_of_client_ips.append(addr[0])
print (addr[0] + " connected")
start_new_thread(clientthread,(conn,addr))
#start_new_thread(client_thread_voice, (conn, addr))
# New code, switching from TCP to UDP
#message, addr = voice_server.recvfrom(BUFFER_SIZE)
#if addr not in list_of_clients:
# list_of_clients.append(addr)
#if HANDSHAKE_MESSAGE in message.decode():
# connected_user = message.decode().split(HANDSHAKE_MESSAGE)[0].strip()
# if connected_user != client_names:
# client_names.append(connected_user)
# userlist = ';'.join(client_names) + HANDSHAKE_MESSAGE
# broadcast_message(userlist.encode())
#else:
# print("<" + addr[0] + ">" + message.decode())
# broadcast_message(message)
#server.sendto(b"Welcome to Dissension!", addr)
conn.close()
server.close() |
#!/usr/bin/env python
from qiskit import QuantumRegister, QuantumCircuit, Aer, execute
q = QuantumRegister(1)
circuit = QuantumCircuit(q)
simulator = Aer.get_backend("statevector_simulator")
job = execute(circuit, simulator)
result = job.result()
statevector = result.get_statevector()
print(statevector)
print(circuit.draw())
|
#!/usr/bin/env python3
import sports_book_manager.model_probability as mp
import unittest
class TestMP(unittest.TestCase):
"""
Tests valid inputs for model_probability return results.
"""
def test_valid_tests(self):
"""
Tests to make sure outputs are occuring with valid inputs.
"""
self.assertTrue(mp.model_probability(-0.5, 0.5, 0.5))
self.assertTrue(mp.model_probability(4, 0.86, -2.6))
|
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import numpy
import pandas as pd
import psycopg2
import semver
import yaml
from dash.dependencies import Input, Output
from dotenv import load_dotenv
from plotly import express as px
from plotly import graph_objects as go
# for debugging dataframes printed to console
pd.set_option('min_rows', 10)
pd.set_option('max_rows', 500)
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 1098)
load_dotenv()
pg_host = os.getenv('PGHOST')
pg_port = os.getenv('PGPORT')
pg_database = os.getenv('PGDATABASE')
pg_user = os.getenv('PGUSER')
pg_password = os.getenv('PGPASSWORD')
conn = psycopg2.connect(
host=pg_host,
port=pg_port,
database=pg_database,
user=pg_user,
password=pg_password
)
with open('component-mappings.yaml', 'r') as file:
group_config = yaml.load(file, Loader=yaml.FullLoader)
file.close()
value_columns = ['q95_value', 'avg_value', 'min_value', 'max_value']
def db_numeric_to_float(df):
for v in value_columns:
df[v] = df[v].astype('float')
df = assign_groupings(df)
return df
def order_versions():
versions = []
try:
v = semver.VersionInfo.parse('4.6.0')
versions.append(str(v))
# inc minor ver
for i in range(1):
# inc patch ver
for k in range(20):
v = v.bump_patch()
versions.append(str(v))
v = v.replace(patch=0)
v = v.bump_minor()
versions.append(str(v))
except Exception as e:
print(f"exception: {e}")
pass
return versions
def df_mem_bytes_to_gigabytes(df):
for v in value_columns:
df[v] = df[v] / 10.0 ** 9
return df
def assign_groupings(df=pd.DataFrame()):
groups = numpy.empty(len(df), dtype=object)
try:
df.insert(len(df.columns), 'group', groups)
except KeyError as e:
print(f"dataframe exception: {e}")
for grp, namespaces in group_config.items():
for ns in namespaces:
df.loc[df['namespace'] == ns, ['group']] = grp
return df
def executeQuery(query):
cur = conn.cursor()
cur.execute(query)
desc = cur.description
columns = [col[0] for col in desc]
rows = [row for row in cur.fetchall()]
df = pd.DataFrame([[c for c in r] for r in rows])
df.rename(inplace=True, columns=dict(enumerate(columns)))
df = db_numeric_to_float(df)
return df
def sort_by_version(df=pd.DataFrame()) -> pd.DataFrame:
df['version'] = df['version'].map(semver.parse_version_info)
df.sort_values(by='version', inplace=True)
df['version'] = df['version'].astype(dtype=str)
return df
def get_mem_metrics():
query_mem = """
SELECT * FROM caliper_metrics WHERE metric = 'container_memory_bytes';
"""
df = executeQuery(query_mem)
df = df_mem_bytes_to_gigabytes(df)
return df
def get_cpu_metrics():
query_cpu = """
SELECT * FROM caliper_metrics WHERE metric = 'cpu_usage_ratio';
"""
df = executeQuery(query_cpu)
for v in value_columns:
df[v] = df[v] * 100
return df
def trim_and_group(df, op=''):
df = df.groupby(by=['version', 'group'], sort=True, as_index=False).sum()
df = df.groupby(by=['version'], sort=True, as_index=False).apply(
lambda frame: frame.sort_values(by=[op], inplace=False))
df.reset_index(inplace=True)
return df
def get_max_bar_height(df=pd.DataFrame()):
df_summed = df.groupby(by=['version']).sum()
m = max(df_summed.select_dtypes(include='float64').max())
return m
def pad_range(r=0):
return r * 1.1
def color_map(df=pd.DataFrame(), by='') -> dict:
cm = {}
colors = px.colors.qualitative.G10
try:
grp = df.groupby(by=by, as_index=True, sort=True)
except Exception as e:
raise KeyError(f'color_map.dataframe.groupby: {type(e)}: input value {e} raised exception')
i = 0
for g in grp.groups:
cm[g] = colors[i]
i += 1
return cm
def pod_max(df=pd.DataFrame(), op='', by='') -> pd.DataFrame():
return df.groupby(by=['version', by, op], sort=True, as_index=True).max(numeric_only=True).reset_index()
def pod_min(df=pd.DataFrame()) -> pd.DataFrame(): return
def pod_avg(df=pd.DataFrame()) -> pd.DataFrame(): return
def pod_q95(df=pd.DataFrame()) -> pd.DataFrame(): return
op_map = {
'max': pod_max,
'min': pod_min,
'avg': pod_avg,
'q95': pod_q95,
}
def operators(df=pd.DataFrame()) -> pd.DataFrame(): return
def bar_fig(df=pd.DataFrame(), op='', y_max=0.0, title='', y_title='', x_title='', suffix='', legend_title=''):
df['version'] = df['version'].map(semver.parse_version_info)
df.sort_values(by=['version'], inplace=True)
df['version'] = df['version'].astype(dtype=str)
fig = px.bar(
data_frame=df,
x='version',
y=['group', op],
color='group',
title=title,
color_discrete_map=color_map(df, by='group'),
)
fig.update_yaxes(
go.layout.YAxis(
title=y_title,
ticksuffix=suffix,
range=[0, y_max],
fixedrange=True,
))
fig.update_xaxes(go.layout.XAxis(
title=x_title,
))
fig.update_layout(
go.Layout(
legend=go.layout.Legend(
title=legend_title,
traceorder='reversed',
),
)
)
return fig
def line_fig(df=pd.DataFrame(), op='', y_max=0.0, title='', y_title='', x_title='', tick_suffix=''):
fig = go.Figure()
fig.update_layout({
"title": title,
"legend": {
"traceorder": 'grouped+reversed',
},
})
fig.update_yaxes({
"title": y_title,
"ticksuffix": tick_suffix,
"fixedrange": True,
"range": [0, y_max]
})
fig.update_xaxes({
"title": x_title
})
try:
cm = color_map(df, by='group')
groups = df.groupby(by='group', sort=True)
for name, g in groups:
g['version'] = g['version'].map(semver.parse_version_info)
g.sort_values(by='version', inplace=True)
g['version'] = g['version'].astype(str)
fig.add_trace(
go.Scatter(
name=name,
x=g['version'],
y=g[op],
legendgroup=1,
marker={'color': cm[name]},
)
)
except Exception as e:
raise Exception(f'line_fig: {e}')
return fig
def bar_group_fig(df=pd.DataFrame(), op='', y_max=0.0, title='', y_title='', x_title='', tick_suffix=''):
df = df[['version', 'group', 'namespace', 'pod', op]]
df = df.groupby(by=['version', 'group']).max().reset_index()
y_max = pad_range(df[op].max())
fig = go.Figure()
fig.update_layout({
'title': title,
'barmode': 'group'
})
fig.update_yaxes(
{
'title': y_title,
'ticksuffix': tick_suffix,
'fixedrange': True,
'range': [0, y_max]
}
)
fig.update_xaxes({
'title': x_title
})
try:
cm = color_map(df, by='group')
versions = pd.unique(df['version'])
for name, group in df.groupby(by='group', sort=True):
fig.add_trace(
go.Bar(
name=name,
x=versions,
y=group[op],
legendgroup=1,
marker={'color': cm[name]}
)
)
except Exception as e:
print(f'bar_group_fig: {e}')
return fig
radio_options = [
{'label': '95th-%', 'value': 'q95_value'},
{'label': 'Average', 'value': 'avg_value'},
{'label': 'Min', 'value': 'min_value'},
{'label': 'Max', 'value': 'max_value'},
]
app = dash.Dash(__name__, external_stylesheets=['./style.css'])
app.layout = html.Div(children=[
html.H1(children='Caliper - Basic Dashboard'),
html.H2(children='Net Resource Usage by an Idle 6 Node Cluster, Span 10min'),
html.Div(children=[
dcc.Graph(id='mem-group'),
dcc.RadioItems(id='memory-group-op-radio', value='q95_value', options=radio_options),
]),
html.Div(children=[
dcc.Graph(id='memory-graph'),
dcc.RadioItems(id='memory-op-radio', value='q95_value', options=radio_options),
]),
html.Div(children=[
dcc.Graph(id='cpu-graph'),
dcc.RadioItems(id='cpu-op-radio', value='q95_value', options=radio_options)
]),
html.Div(children=[
dcc.Graph(id='mem-line'),
dcc.RadioItems(id='mem-line-input', value='q95_value', options=radio_options)
]),
html.Div(children=[
dcc.Graph(id='cpu-line'),
dcc.RadioItems(id='cpu-line-input', value='q95_value', options=radio_options)
])
])
@app.callback(
Output(component_id='mem-group', component_property='figure'),
Input(component_id='memory-group-op-radio', component_property='value')
)
def mem_group(op):
try:
df_mem = get_mem_metrics()
y_max = pad_range(get_max_bar_height(df_mem))
trim_and_group(df_mem, op)
return bar_group_fig(df=df_mem, op=op, y_max=y_max, title='test grouping', tick_suffix='Gb',
y_title='memory', x_title='OCP Version')
except Exception as e:
print(f'mem_group: got exception type {type(e)}:\n{e}')
@app.callback(
Output(component_id='memory-graph', component_property='figure'),
Input(component_id='memory-op-radio', component_property='value')
)
def mem_response(op):
try:
df_mem = get_mem_metrics()
y_max = pad_range(get_max_bar_height(df_mem))
df_mem = trim_and_group(df_mem, op=op)
return bar_fig(df=df_mem, op=op, y_max=y_max, title='Net Memory Usage By Version', suffix='Gb',
y_title='Memory (Gb)',
x_title='OCP Version')
except Exception as e:
print(f'mem_response: got exception type {type(e)}:\n{e}')
@app.callback(
Output(component_id='cpu-graph', component_property='figure'),
Input(component_id='cpu-op-radio', component_property='value')
)
def cpu_response(op):
try:
df_cpu = get_cpu_metrics()
y_max = pad_range(get_max_bar_height(df_cpu))
df_cpu = trim_and_group(df_cpu, op)
return bar_fig(df_cpu, op, y_max, title='CPU % by OCP Version', suffix='%',
y_title='Net CPU Time in Hours', x_title='OCP Versions', legend_title='')
except Exception as e:
print(f'cpu_response: got exception type {type(e)}:\n{e}')
@app.callback(
Output(component_id='mem-line', component_property='figure'),
Input(component_id='mem-line-input', component_property='value')
)
def mem_line_response(op):
try:
df_mem = get_mem_metrics()
df_mem = trim_and_group(df_mem, op)
y_max = pad_range(df_mem['max_value'].max())
return line_fig(df=df_mem, op=op, y_max=y_max, tick_suffix='Gb', title='Memory Trends by Version',
y_title='Net Memory Consumed in Gigabytes', x_title='OCP Version')
except Exception as e:
print(f'mem_line_response: got exception type {type(e)}:\n{e}')
@app.callback(
Output(component_id='cpu-line', component_property='figure'),
Input(component_id='cpu-line-input', component_property='value')
)
def cpu_line_response(op):
try:
df_mem = get_cpu_metrics()
df_mem = trim_and_group(df_mem, op)
y_max = pad_range(df_mem['max_value'].max())
return line_fig(df=df_mem, op=op, y_max=y_max, tick_suffix='%', title='CPU % Trends by Version',
y_title='Net CPU Time in Hours', x_title='OCP Version')
except Exception as e:
print(f'cpu_line_response: got exception type {type(e)}:\n{e}')
if __name__ == '__main__':
app.run_server(debug=True, port=8050, host='0.0.0.0')
|
"""
Note: The high-level Python interface is currently experimental and may change in a future release.
``CodesFile`` class that implements a file that is readable by ecCodes and
closes itself and its messages when it is no longer needed.
Author: Daniel Lee, DWD, 2016
"""
from .. import eccodes
import io
class CodesFile(io.FileIO):
"""
An abstract class to specify and/or implement common behaviour that files
read by ecCodes should implement.
A {prod_type} file handle meant for use in a context manager.
Individual messages can be accessed using the ``next`` method. Of course,
it is also possible to iterate over each message in the file::
>>> with {classname}(filename) as {alias}:
... # Print number of messages in file
... len({alias})
... # Open all messages in file
... for msg in {alias}:
... print(msg[key_name])
... len({alias}.open_messages)
>>> # When the file is closed, any open messages are closed
>>> len({alias}.open_messages)
"""
#: Type of messages belonging to this file
MessageClass = None
def __init__(self, filename, mode="rb"):
"""Open file and receive codes file handle."""
#: File handle for working with actual file on disc
#: The class holds the file it works with because ecCodes'
# typechecking does not allow using inherited classes.
self.file_handle = open(filename, mode)
#: Number of message in file currently being read
self.message = 0
#: Open messages
self.open_messages = []
self.name = filename
def __exit__(self, exception_type, exception_value, traceback):
"""Close all open messages, release file handle and close file."""
while self.open_messages:
# Note: if the message was manually closed, this has no effect
self.open_messages.pop().close()
self.file_handle.close()
def __len__(self):
"""Return total number of messages in file."""
return eccodes.codes_count_in_file(self.file_handle)
def __enter__(self):
return self
def close(self):
"""Possibility to manually close file."""
self.__exit__(None, None, None)
def __iter__(self):
return self
def next(self):
try:
return self.MessageClass(self)
except IOError:
raise StopIteration()
def __next__(self):
return self.next()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
__author__ = "Ivo Woltring"
__copyright__ = "Copyright (c) 2013 Ivo Woltring"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__doc__ = """
Recipe 6.17. Implementing the Null Object Design Pattern
Credit: Dinu C. Gherman, Holger Krekel
from:
Python Cookbook, 2nd Edition
By David Ascher, Alex Martelli, Anna Ravenscroft
Publisher : O'Reilly
Pub Date : March 2005
ISBN : 0-596-00797-3
Pages : 844
Problem:
You want to reduce the need for conditional statements in your
code, particularly the need to keep checking for special cases.
Solution:
The usual placeholder object for "there's nothing here" is None,
but we may be able to do better than that by defining a class
meant exactly to act as such a placeholder
Discussion
You can use an instance of the Null class instead of the primitive value None.
By using such an instance as a placeholder, instead of None, you can avoid many
conditional statements in your code and can often express algorithms with little
or no checking for special values. This recipe is a sample implementation of the
Null Object Design Pattern. (See B. Woolf, "The Null Object Pattern" in Pattern
Languages of Programming [PLoP 96, September 1996].)
This recipe's Null class ignores all parameters passed when constructing or
calling instances, as well as any attempt to set or delete attributes. Any call
or attempt to access an attribute (or a method, since Python does not
distinguish between the two, calling __getattr__ either way) returns the same
Null instance (i.e., selfno reason to create a new instance). For example, if
you have a computation such as:
def compute(x, y):
try:
lots of computation here to return some appropriate object
except SomeError:
return None
and you use it like this:
for x in xs:
for y in ys:
obj = compute(x, y)
if obj is not None:
obj.somemethod(y, x)
you can usefully change the computation to:
def compute(x, y):
try:
lots of computation here to return some appropriate object
except SomeError:
return Null( )
and thus simplify its use down to:
for x in xs:
for y in ys:
compute(x, y).somemethod(y, x)
The point is that you don't need to check whether compute has returned a real
result or an instance of Null: even in the latter case, you can safely and
innocuously call on it whatever method you want. Here is another, more specific
use case:
log = err = Null( )
if verbose:
log = open('/tmp/log', 'w')
err = open('/tmp/err', 'w')
log.write('blabla')
err.write('blabla error')
This obviously avoids the usual kind of "pollution" of your code from guards
such as if verbose: strewn all over the place. You can now call
log.write('bla'), instead of having to express each such call as if log is not
None: log.write('bla').
In the new object model, Python does not call __getattr__ on an instance for
any special methods needed to perform an operation on the instance (rather, it
looks up such methods in the instance class' slots). You may have to take care
and customize Null to your bootstrap's needs regarding operations on null
objects, and therefore special methods of the null objects' class, either
directly in the class' sources or by subclassing it appropriately. For example,
with this recipe's Null, you cannot index Null instances, nor take their length,
nor iterate on them. If this is a problem for your purposes, you can add all the
special methods you need (in Null itself or in an appropriate subclass) and
implement them appropriatelyfor example:
class SeqNull(Null):
def __len__(self): return 0
def __iter__(self): return iter(( ))
def __getitem__(self, i): return self
def __delitem__(self, i): return self
def __setitem__(self, i, v): return self
Similar considerations apply to several other operations.
The key goal of Null objects is to provide an intelligent replacement for the
often-used primitive value None in Python. (Other languages represent the lack
of a value using either null or a null pointer.) These nobody-lives-here
markers/placeholders are used for many purposes, including the important case in
which one member of a group of otherwise similar elements is special. This usage
usually results in conditional statements all over the place to distinguish
between ordinary elements and the primitive null (e.g., None) value, but Null
objects help you avoid that.
Among the advantages of using Null objects are the following:
Superfluous conditional statements can be avoided by providing a first-class
object alternative for the primitive value None, thereby improving code
readability.
Null objects can act as placeholders for objects whose behavior is not yet
implemented.
Null objects can be used polymorphic with instances of just about any other
class (perhaps needing suitable subclassing for special methods, as previously
mentioned).
Null objects are very predictable.
The one serious disadvantage of Null is that it can hide bugs. If a function
returns None, and the caller did not expect that return value, the caller most
likely will soon thereafter try to call a method or perform an operation that
None doesn't support, leading to a reasonably prompt exception and traceback. If
the return value that the caller didn't expect is a Null, the problem might stay
hidden for a longer time, and the exception and traceback, when they eventually
happen, may therefore be harder to reconnect to the location of the defect in
the code. Is this problem serious enough to make using Null inadvisable? The
answer is a matter of opinion. If your code has halfway decent unit tests, this
problem will not arise; while, if your code lacks decent unit tests, then using
Null is the least of your problems. But, as I said, it boils down to a matter of
opinions. I use Null very widely, and I'm extremely happy with the effect it has
had on my productivity.
The Null class as presented in this recipe uses a simple variant of the
"Singleton" pattern (shown earlier in Recipe 6.15), strictly for optimization
purposesnamely, to avoid the creation of numerous passive objects that do
nothing but take up memory. Given all the previous remarks about customization
by subclassing, it is, of course, crucial that the specific implementation of
"Singleton" ensures a separate instance exists for each subclass of Null that
gets instantiated. The number of subclasses will no doubt never be so high as to
eat up substantial amounts of memory, and anyway this per-subclass distinction
can be semantically crucial.
"""
from Singleton import Singleton
# noinspection PyUnusedLocal
class _Null(Singleton):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return ""
def __str__(self):
return ""
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __len__(self):
return 0
def __iter__(self):
return iter(( ))
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
if not __name__ == "__main__":
import sys
#Import the instantiation in stead of the module
sys.modules[__name__] = _Null()
else:
Null = _Null()
Test = Null
print Test['a']
for x in range(10):
print Test[x]
Test.helloworld(1)
# No error even though the method does not exist
|
# coding: utf-8
# In[1]:
import tensorflow as tf
import os
from tensorflow import keras
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif,chi2
import numpy as np
import pandas as pd
import pickle
from keras.callbacks import EarlyStopping
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
import keras.preprocessing.text as kpt
from keras.preprocessing.text import Tokenizer
from json import dumps
sess = tf.Session()
print(tf.__version__)
# In[2]:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation
from keras.layers.embeddings import Embedding
import nltk
nltk.download('stopwords')
import re
import string
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from sklearn.manifold import TSNE
# In[3]:
# Path to dataset
df =pd.read_csv('/home/exacon02/Desktop/XSent/Sentiment Analysis Dataset.csv',nrows=10000, sep=',',usecols=['SentimentText', 'Sentiment'],error_bad_lines=False)
df= df.dropna()
df = df[df.Sentiment.apply(lambda x: x !="")]
df = df[df.SentimentText.apply(lambda x: x !="")]
labels=df['Sentiment']
pickle_path = 'vec.pkl'
tok_pickle = open(pickle_path, 'wb')
vocabulary_size = 20000
tokenizer = Tokenizer(num_words= vocabulary_size)
tokenizer.fit_on_texts(df['SentimentText'])
sequences = tokenizer.texts_to_sequences(df['SentimentText'])
# print(sequences)
data = pad_sequences(sequences, maxlen=50)
pickle.dump(tokenizer, tok_pickle)
model = Sequential()
model.add(Embedding(20000, 128, input_length=50))
model.add(LSTM(128,dropout=0.5,return_sequences=True))
model.add(LSTM(128,dropout=0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
## Fit train data
model.fit(data, np.array(labels),
batch_size=512,
epochs=20,
verbose=1,
validation_split=0.2,
callbacks = [EarlyStopping(monitor='val_loss', patience=1)],
shuffle=True)
# In[32]:
model.save("model.h5")
#
# # In[6]:
#
#
# def clean_text(text):
#
# ## Remove puncuation
# text = text.translate(string.punctuation)
#
# ## Convert words to lower case and split them
# text = text.lower().split()
#
# ## Remove stop words
# stops = set(stopwords.words("english"))
# text = [w for w in text if not w in stops and len(w) >= 3]
#
# text = " ".join(text)
# ## Clean the text
# text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
# text = re.sub(r"what's", "what is ", text)
# text = re.sub(r"\'s", " ", text)
# text = re.sub(r"\'ve", " have ", text)
# text = re.sub(r"n't", " not ", text)
# text = re.sub(r"i'm", "i am ", text)
# text = re.sub(r"\'re", " are ", text)
# text = re.sub(r"\'d", " would ", text)
# text = re.sub(r"\'ll", " will ", text)
# text = re.sub(r",", " ", text)
# text = re.sub(r"\.", " ", text)
# text = re.sub(r"!", " ! ", text)
# text = re.sub(r"\/", " ", text)
# text = re.sub(r"\^", " ^ ", text)
# text = re.sub(r"\+", " + ", text)
# text = re.sub(r"\-", " - ", text)
# text = re.sub(r"\=", " = ", text)
# text = re.sub(r"'", " ", text)
# text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
# text = re.sub(r":", " : ", text)
# text = re.sub(r" e g ", " eg ", text)
# text = re.sub(r" b g ", " bg ", text)
# text = re.sub(r" u s ", " american ", text)
# text = re.sub(r"\0s", "0", text)
# text = re.sub(r" 9 11 ", "911", text)
# text = re.sub(r"e - mail", "email", text)
# text = re.sub(r"j k", "jk", text)
# text = re.sub(r"\s{2,}", " ", text)
# ## Stemming
# text = text.split()
# stemmer = nltk.stem.SnowballStemmer('english')
# stemmed_words = [stemmer.stem(word) for word in text]
# text = " ".join(stemmed_words)
# return text
#
#
# # In[7]:
#
#
# df['SentimentText'] = df['SentimentText'].map(lambda x: clean_text(x))
#
#
# # In[8]:
#
#
# labels=df['Sentiment']
#
#
# # In[17]:
#
#
# pickle_path = '/home/exacon02/git/KerasPOC-SentimentAnalysis/vec.pkl'
# tok_pickle = open(pickle_path, 'wb')
#
#
# # In[9]:
#
#
# # sel_pickle_path = '/home/exacon03/Jupyter/sel_pickle.pkl'
# # sel_pickle = open(sel_pickle_path, 'wb')
# vocabulary_size = 20000
# tokenizer = Tokenizer(num_words= vocabulary_size)
# tokenizer.fit_on_texts(df['SentimentText'])
# sequences = tokenizer.texts_to_sequences(df['SentimentText'])
# # print(sequences)
# data = pad_sequences(sequences, maxlen=50)
#
#
# # In[18]:
#
#
# pickle.dump(tokenizer, tok_pickle)
#
#
# # In[10]:
#
#
# embeddings_index = dict()
# f = open('/home/exacon02/glove.6B.100d.txt')
# for line in f:
# values = line.split()
# word = values[0]
# coefs = np.asarray(values[1:], dtype='float32')
# embeddings_index[word] = coefs
# f.close()
#
#
# # In[11]:
#
#
# embedding_matrix = np.zeros((vocabulary_size, 100))
# for word, index in tokenizer.word_index.items():
# if index > vocabulary_size - 1:
# break
# else:
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# embedding_matrix[index] = embedding_vector
#
#
# # In[ ]:
#
#
# model_glove = Sequential()
# model_glove.add(Embedding(vocabulary_size, 100, input_length=50, weights=[embedding_matrix], trainable=False))
# model_glove.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
# model_glove.add(Dense(1, activation='sigmoid'))
# model_glove.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# ## Fit train data
# model_glove.fit(data, np.array(labels),
# batch_size=512,
# epochs=20,
# verbose=1,
# validation_split=0.2,
# callbacks = [EarlyStopping(monitor='val_loss', patience=1)],
# shuffle=True)
#
#
# # In[16]:
#
#
# model_glove.save("/home/exacon02/git/KerasPOC-SentimentAnalysis/plug.h5")
|
# Content autogenerated. Do not edit.
syscalls_mips64n32 = {
"_newselect": 6022,
"_sysctl": 6152,
"accept": 6042,
"accept4": 6297,
"access": 6020,
"acct": 6158,
"add_key": 6243,
"adjtimex": 6154,
"alarm": 6037,
"bind": 6048,
"bpf": 6319,
"brk": 6012,
"cachectl": 6198,
"cacheflush": 6197,
"capget": 6123,
"capset": 6124,
"chdir": 6078,
"chmod": 6088,
"chown": 6090,
"chroot": 6156,
"clock_adjtime": 6305,
"clock_adjtime64": 6405,
"clock_getres": 6227,
"clock_getres_time64": 6406,
"clock_gettime": 6226,
"clock_gettime64": 6403,
"clock_nanosleep": 6228,
"clock_nanosleep_time64": 6407,
"clock_settime": 6225,
"clock_settime64": 6404,
"clone": 6055,
"clone3": 6435,
"close": 6003,
"close_range": 6436,
"connect": 6041,
"copy_file_range": 6324,
"creat": 6083,
"create_module": 6167,
"delete_module": 6169,
"dup": 6031,
"dup2": 6032,
"dup3": 6290,
"epoll_create": 6207,
"epoll_create1": 6289,
"epoll_ctl": 6208,
"epoll_pwait": 6276,
"epoll_pwait2": 6441,
"epoll_wait": 6209,
"eventfd": 6282,
"eventfd2": 6288,
"execve": 6057,
"execveat": 6320,
"exit": 6058,
"exit_group": 6205,
"faccessat": 6263,
"faccessat2": 6439,
"fadvise64": 6216,
"fallocate": 6283,
"fanotify_init": 6300,
"fanotify_mark": 6301,
"fchdir": 6079,
"fchmod": 6089,
"fchmodat": 6262,
"fchown": 6091,
"fchownat": 6254,
"fcntl": 6070,
"fcntl64": 6212,
"fdatasync": 6073,
"fgetxattr": 6185,
"finit_module": 6312,
"flistxattr": 6188,
"flock": 6071,
"fork": 6056,
"fremovexattr": 6191,
"fsconfig": 6431,
"fsetxattr": 6182,
"fsmount": 6432,
"fsopen": 6430,
"fspick": 6433,
"fstat": 6005,
"fstatfs": 6135,
"fstatfs64": 6218,
"fsync": 6072,
"ftruncate": 6075,
"futex": 6194,
"futex_time64": 6422,
"futex_waitv": 6449,
"futimesat": 6255,
"get_kernel_syms": 6170,
"get_mempolicy": 6232,
"get_robust_list": 6273,
"getcpu": 6275,
"getcwd": 6077,
"getdents": 6076,
"getdents64": 6299,
"getegid": 6106,
"geteuid": 6105,
"getgid": 6102,
"getgroups": 6113,
"getitimer": 6035,
"getpeername": 6051,
"getpgid": 6119,
"getpgrp": 6109,
"getpid": 6038,
"getpmsg": 6174,
"getppid": 6108,
"getpriority": 6137,
"getrandom": 6317,
"getresgid": 6118,
"getresuid": 6116,
"getrlimit": 6095,
"getrusage": 6096,
"getsid": 6122,
"getsockname": 6050,
"getsockopt": 6054,
"gettid": 6178,
"gettimeofday": 6094,
"getuid": 6100,
"getxattr": 6183,
"init_module": 6168,
"inotify_add_watch": 6248,
"inotify_init": 6247,
"inotify_init1": 6292,
"inotify_rm_watch": 6249,
"io_cancel": 6204,
"io_destroy": 6201,
"io_getevents": 6202,
"io_pgetevents": 6332,
"io_pgetevents_time64": 6416,
"io_setup": 6200,
"io_submit": 6203,
"io_uring_enter": 6426,
"io_uring_register": 6427,
"io_uring_setup": 6425,
"ioctl": 6015,
"ioprio_get": 6278,
"ioprio_set": 6277,
"kcmp": 6311,
"kexec_load": 6274,
"keyctl": 6245,
"kill": 6060,
"landlock_add_rule": 6445,
"landlock_create_ruleset": 6444,
"landlock_restrict_self": 6446,
"lchown": 6092,
"lgetxattr": 6184,
"link": 6084,
"linkat": 6259,
"listen": 6049,
"listxattr": 6186,
"llistxattr": 6187,
"lookup_dcookie": 6206,
"lremovexattr": 6190,
"lseek": 6008,
"lsetxattr": 6181,
"lstat": 6006,
"madvise": 6027,
"mbind": 6231,
"membarrier": 6322,
"memfd_create": 6318,
"migrate_pages": 6250,
"mincore": 6026,
"mkdir": 6081,
"mkdirat": 6252,
"mknod": 6131,
"mknodat": 6253,
"mlock": 6146,
"mlock2": 6323,
"mlockall": 6148,
"mmap": 6009,
"mount": 6160,
"mount_setattr": 6442,
"move_mount": 6429,
"move_pages": 6271,
"mprotect": 6010,
"mq_getsetattr": 6239,
"mq_notify": 6238,
"mq_open": 6234,
"mq_timedreceive": 6237,
"mq_timedreceive_time64": 6419,
"mq_timedsend": 6236,
"mq_timedsend_time64": 6418,
"mq_unlink": 6235,
"mremap": 6024,
"msgctl": 6069,
"msgget": 6066,
"msgrcv": 6068,
"msgsnd": 6067,
"msync": 6025,
"munlock": 6147,
"munlockall": 6149,
"munmap": 6011,
"name_to_handle_at": 6303,
"nanosleep": 6034,
"newfstatat": 6256,
"nfsservctl": 6173,
"open": 6002,
"open_by_handle_at": 6304,
"open_tree": 6428,
"openat": 6251,
"openat2": 6437,
"pause": 6033,
"perf_event_open": 6296,
"personality": 6132,
"pidfd_getfd": 6438,
"pidfd_open": 6434,
"pidfd_send_signal": 6424,
"pipe": 6021,
"pipe2": 6291,
"pivot_root": 6151,
"pkey_alloc": 6328,
"pkey_free": 6329,
"pkey_mprotect": 6327,
"poll": 6007,
"ppoll": 6265,
"ppoll_time64": 6414,
"prctl": 6153,
"pread64": 6016,
"preadv": 6293,
"preadv2": 6325,
"prlimit64": 6302,
"process_madvise": 6440,
"process_mrelease": 6448,
"process_vm_readv": 6309,
"process_vm_writev": 6310,
"pselect6": 6264,
"pselect6_time64": 6413,
"ptrace": 6099,
"pwrite64": 6017,
"pwritev": 6294,
"pwritev2": 6326,
"query_module": 6171,
"quotactl": 6172,
"quotactl_fd": 6443,
"read": 6000,
"readahead": 6179,
"readlink": 6087,
"readlinkat": 6261,
"readv": 6018,
"reboot": 6164,
"recvfrom": 6044,
"recvmmsg": 6298,
"recvmmsg_time64": 6417,
"recvmsg": 6046,
"remap_file_pages": 6210,
"removexattr": 6189,
"rename": 6080,
"renameat": 6258,
"renameat2": 6315,
"request_key": 6244,
"restart_syscall": 6214,
"rmdir": 6082,
"rseq": 6331,
"rt_sigaction": 6013,
"rt_sigpending": 6125,
"rt_sigprocmask": 6014,
"rt_sigqueueinfo": 6127,
"rt_sigreturn": 6211,
"rt_sigsuspend": 6128,
"rt_sigtimedwait": 6126,
"rt_sigtimedwait_time64": 6421,
"rt_tgsigqueueinfo": 6295,
"sched_get_priority_max": 6143,
"sched_get_priority_min": 6144,
"sched_getaffinity": 6196,
"sched_getattr": 6314,
"sched_getparam": 6140,
"sched_getscheduler": 6142,
"sched_rr_get_interval": 6145,
"sched_rr_get_interval_time64": 6423,
"sched_setaffinity": 6195,
"sched_setattr": 6313,
"sched_setparam": 6139,
"sched_setscheduler": 6141,
"sched_yield": 6023,
"seccomp": 6316,
"semctl": 6064,
"semget": 6062,
"semop": 6063,
"semtimedop": 6215,
"semtimedop_time64": 6420,
"sendfile": 6039,
"sendfile64": 6219,
"sendmmsg": 6307,
"sendmsg": 6045,
"sendto": 6043,
"set_mempolicy": 6233,
"set_robust_list": 6272,
"set_thread_area": 6246,
"set_tid_address": 6213,
"setdomainname": 6166,
"setfsgid": 6121,
"setfsuid": 6120,
"setgid": 6104,
"setgroups": 6114,
"sethostname": 6165,
"setitimer": 6036,
"setns": 6308,
"setpgid": 6107,
"setpriority": 6138,
"setregid": 6112,
"setresgid": 6117,
"setresuid": 6115,
"setreuid": 6111,
"setrlimit": 6155,
"setsid": 6110,
"setsockopt": 6053,
"settimeofday": 6159,
"setuid": 6103,
"setxattr": 6180,
"shmat": 6029,
"shmctl": 6030,
"shmdt": 6065,
"shmget": 6028,
"shutdown": 6047,
"sigaltstack": 6129,
"signalfd": 6280,
"signalfd4": 6287,
"socket": 6040,
"socketpair": 6052,
"splice": 6267,
"stat": 6004,
"statfs": 6134,
"statfs64": 6217,
"statx": 6330,
"swapoff": 6163,
"swapon": 6162,
"symlink": 6086,
"symlinkat": 6260,
"sync": 6157,
"sync_file_range": 6268,
"syncfs": 6306,
"sysfs": 6136,
"sysinfo": 6097,
"syslog": 6101,
"sysmips": 6199,
"tee": 6269,
"tgkill": 6229,
"timer_create": 6220,
"timer_delete": 6224,
"timer_getoverrun": 6223,
"timer_gettime": 6222,
"timer_gettime64": 6408,
"timer_settime": 6221,
"timer_settime64": 6409,
"timerfd": 6281,
"timerfd_create": 6284,
"timerfd_gettime": 6285,
"timerfd_gettime64": 6410,
"timerfd_settime": 6286,
"timerfd_settime64": 6411,
"times": 6098,
"tkill": 6192,
"truncate": 6074,
"umask": 6093,
"umount2": 6161,
"uname": 6061,
"unlink": 6085,
"unlinkat": 6257,
"unshare": 6266,
"userfaultfd": 6321,
"ustat": 6133,
"utime": 6130,
"utimensat": 6279,
"utimensat_time64": 6412,
"utimes": 6230,
"vhangup": 6150,
"vmsplice": 6270,
"wait4": 6059,
"waitid": 6241,
"write": 6001,
"writev": 6019,
}
|
#!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests HTTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import sys, string, logging, logging.handlers
def main():
import pdb
host = "localhost:%d" % logging.handlers.DEFAULT_HTTP_LOGGING_PORT
gh = logging.handlers.HTTPHandler(host, '/log', 'GET')
ph = logging.handlers.HTTPHandler(host, '/log', 'POST')
logger = logging.getLogger("log_test12")
logger.propagate = 0
logger.addHandler(gh)
logger.addHandler(ph)
logging.getLogger("").setLevel(logging.DEBUG)
logger.info("Jackdaws love my big %s of %s", "sphinx", "quartz")
logger.debug("Pack my %s with twelve dozen %s", "box", "liquor jugs")
gh.close()
ph.close()
logger.removeHandler(gh)
logger.removeHandler(ph)
if __name__ == "__main__":
main()
|
"""Tests for evaluator.py."""
import os
import unittest
from bfv.bfv_decryptor import BFVDecryptor
from bfv.bfv_encryptor import BFVEncryptor
from bfv.bfv_evaluator import BFVEvaluator
from bfv.bfv_key_generator import BFVKeyGenerator
from bfv.bfv_parameters import BFVParameters
from util.plaintext import Plaintext
from util.polynomial import Polynomial
from util.random_sample import sample_uniform
TEST_DIRECTORY = os.path.dirname(__file__)
class TestEvaluator(unittest.TestCase):
def setUp(self):
self.degree = 512
self.plain_modulus = 256
self.ciph_modulus = 0x3fffffff000001
params = BFVParameters(poly_degree=self.degree,
plain_modulus=self.plain_modulus,
ciph_modulus=self.ciph_modulus)
key_generator = BFVKeyGenerator(params)
public_key = key_generator.public_key
secret_key = key_generator.secret_key
self.relin_key = key_generator.relin_key
self.encryptor = BFVEncryptor(params, public_key)
self.decryptor = BFVDecryptor(params, secret_key)
self.evaluator = BFVEvaluator(params)
def run_test_add(self, message1, message2):
poly1 = Polynomial(self.degree, message1)
poly2 = Polynomial(self.degree, message2)
plain1 = Plaintext(poly1)
plain2 = Plaintext(poly2)
plain_sum = Plaintext(poly1.add(poly2, self.plain_modulus))
ciph1 = self.encryptor.encrypt(plain1)
ciph2 = self.encryptor.encrypt(plain2)
ciph_sum = self.evaluator.add(ciph1, ciph2)
decrypted_sum = self.decryptor.decrypt(ciph_sum)
self.assertEqual(str(plain_sum), str(decrypted_sum))
def test_add_01(self):
vec1 = sample_uniform(0, self.plain_modulus, self.degree)
vec2 = sample_uniform(0, self.plain_modulus, self.degree)
self.run_test_add(vec1, vec2)
def run_test_multiply(self, message1, message2):
poly1 = Polynomial(self.degree, message1)
poly2 = Polynomial(self.degree, message2)
plain1 = Plaintext(poly1)
plain2 = Plaintext(poly2)
plain_prod = Plaintext(poly1.multiply(poly2, self.plain_modulus))
ciph1 = self.encryptor.encrypt(plain1)
ciph2 = self.encryptor.encrypt(plain2)
ciph_prod = self.evaluator.multiply(ciph1, ciph2, self.relin_key)
decrypted_prod = self.decryptor.decrypt(ciph_prod)
self.assertEqual(str(plain_prod), str(decrypted_prod))
def test_multiply_01(self):
vec1 = sample_uniform(0, self.plain_modulus, self.degree)
vec2 = sample_uniform(0, self.plain_modulus, self.degree)
self.run_test_multiply(vec1, vec2)
if __name__ == '__main__':
res = unittest.main(verbosity=3, exit=False)
|
from django.db import models
class Student(models.Model):
"""Model for Awana Students"""
MALE = 'M'
FEMALE = 'F'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=40)
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
blank=True
)
birthday = models.DateField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return "{first} {last}".format(first=self.first_name,
last=self.last_name)
class Verse(models.Model):
"""Model for Awana memory verses"""
AWANA_BOOK_CHOICES = (
('C-HON', 'Cubbies Honeycomb'),
('C-APP', 'Cubbies Appleseed')
)
text = models.TextField()
bible_book = models.CharField(max_length=30, blank=True)
bible_chapter = models.IntegerField(null=True, blank=True)
bible_verse = models.IntegerField(null=True, blank=True)
awana_book = models.CharField(max_length=5, choices=AWANA_BOOK_CHOICES)
awana_verse = models.IntegerField()
def __str__(self):
return self.text
class Recitation(models.Model):
"""Model for a student's attempt to recite an Awana memory verse"""
RESULT_CHOICES = (
('F', 'Failed'),
('H', 'Success with help'),
('S', 'Success'),
)
result = models.CharField(
max_length=1,
choices=RESULT_CHOICES,
default='F',
)
student = models.ForeignKey(Student)
verse = models.ForeignKey(Verse)
recited_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.result
def was_successful(self):
return self.result in ('H', 'S')
|
# -*- coding: utf-8 -*-
"""
sphinxnotes.strike
~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2021 Shengyu Zhang.
:license: BSD, See LICENSE
"""
from __future__ import annotations
from os import path
from typing import TYPE_CHECKING, List, Dict, Tuple
from docutils import nodes, utils
if TYPE_CHECKING:
from docutils.nodes import Node, system_message
from docutils.parsers.rst.states import Inliner
if TYPE_CHECKING:
from sphinx.application import Sphinx
from sphinx.config import Config
__title__ = 'sphinxnotes-strike'
__package__ = 'strike'
__author__ = 'Shengyu Zhang'
__description__ = 'Sphinx extension for HTML strikethrough text support'
__license__ = 'BSD'
__version__ = '1.0'
__url__ = 'https://sphinx-notes.github.io/strike'
__keywords__ = 'documentation, sphinx, extension'
class strike_node(nodes.Inline, nodes.TextElement): pass
def strike_role(typ:str, rawtext:str, text:str, lineno:int,
inliner:Inliner, options:Dict={}, content:List[str]=[]
) -> Tuple[List[Node],List[system_message]]:
node = strike_node(text=utils.unescape(text))
node['docname'] = inliner.document.settings.env.docname
node['rawtext'] = rawtext
return [node], []
def html_visit_strike_node(self, node:strike_node) -> None:
self.body.append(self.starttag(node, 'span', '', CLASS='sphinxnotes-strike'))
def html_depart_strike_node(self, node:strike_node) -> None:
self.body.append('</span>')
def on_config_inited(app:Sphinx, cfg:Config) -> None:
static_path = path.abspath(path.join(path.dirname(__file__), '_static'))
cfg.html_static_path.append(static_path)
app.add_css_file('sphinxnotes-strike.css')
def setup(app:Sphinx):
app.add_node(strike_node,
html=(html_visit_strike_node, html_depart_strike_node))
app.add_role('strike', strike_role)
app.add_role('del', strike_role)
# Add static path and include css file
app.connect("config-inited", on_config_inited)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
# -*- coding: utf-8 -*-
from requests import Session
import json
from .storage import nodes
from time import sleep
from pprint import pprint
from itertools import cycle
class Http():
http = Session()
class RpcClient(Http):
""" Simple Steem JSON-RPC API
This class serves as an abstraction layer for easy use of the Steem API.
rpc = RpcClient(nodes = nodes) or rpc = RpcClient()
Args:
nodes (list): A list of Steem HTTP RPC nodes to connect to.
any call available to that port can be issued using the instance
rpc.call('command', *parameters)
"""
headers = {'User-Agent': 'thallid', 'content-type': 'application/json'}
def __init__(self, report = False, **kwargs):
self.report = report
self.num_retries = kwargs.get("num_retries", 5) # Количество попыток подключения к ноде
self.nodes = cycle(kwargs.get("nodes", nodes)) # Перебор нод
self.url = next(self.nodes)
def get_response(self, payload):
data = json.dumps(payload, ensure_ascii = False).encode('utf8')
while True:
if self.report:
print("Trying to connect to node %s" % self.url)
n = 1
while n < self.num_retries:
try:
response = self.http.post(self.url, data = data, headers = self.headers)
return(response)
except:
sleeptime = (n - 1) * 2
if self.report:
print("Lost connection to node during rpcconnect(): %s (%d/%d) " % (self.url, n, self.num_retries))
print("Retrying in %d seconds" % sleeptime)
sleep(sleeptime)
n += 1
self.url = next(self.nodes) # next node
return False
def call(self, name, *params):
payload = {"method": 'condenser_api.' + name, "params": params, "id": 1, "jsonrpc": '2.0'}
response = self.get_response(payload)
if response.status_code != 200:
if self.report:
print('ERROR status_code', response.text)
return False
res = response.json()
if 'error' in res:
if self.report:
pprint(res["error"]["message"])
return False
return(res["result"])
#----- main -----
if __name__ == '__main__':
pass |
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
import os
import sys
import placeholders.Simulation_mock as mock
from python.Application_Companion.common_enums import INTEGRATED_SIMULATOR_APPLICATION as SIMULATOR
from common_enums import DIRECTION
class MockWrapper:
'''Wrapper/Adapter for (NEST/TVB) simulation mocks.'''
def __init__(self, args, simulator):
'''
initializes with mock simulator and set up their communication
directions.
'''
# get data exchange direction
# 1 --> nest to Tvb
# 2 --> tvb to nest
self.__direction = int(args) # NOTE: will be changed
# get mock simulator
self.__simulator = simulator # e.g. NEST or TVB
# initialize the simulator
self.__execute_init_command()
def __execute_init_command(self):
'''
Executes INIT steering command. Determines local minimum stepsize
and sends it to the Application Manager.
NOTE INIT is a system action and thus is done implicitly.
'''
# NOTE: Meanwhile...the InterscaleHub starts execution
# Simulation connect
self.__simulator.get_connection_details()
self.__simulator.connect_to_hub()
# send local minimum step size to Application Manager as a response to
# INIT
# NOTE Application Manager expects a string in the following format:
# {'PID': <int>, 'LOCAL_MINIMUM_STEP_SIZE': <float>}
pid_and_local_minimum_step_size = \
{SIMULATOR.PID.name: os.getpid(),
SIMULATOR.LOCAL_MINIMUM_STEP_SIZE.name: self.__simulator.get_min_delay()}
# Application Manager will read the stdout stream via PIPE
# NOTE the communication with Application Manager via PIPES will be
# changed to some other mechanism
print(f'{pid_and_local_minimum_step_size}')
def execute_start_command(self):
'''
Executes START steering command.
Depending on the direction, it simulates or receives.
'''
# create a dictionary of choices for the communication direction and
# their corresponding executions based on the simulator.
execution_choices = None
# Case: NEST simulator
if isinstance(self.__simulator, mock.NestMock):
execution_choices = {
DIRECTION.NEST_TO_TVB: self.__simulator.simulate,
DIRECTION.TVB_TO_NEST: self.__simulator.receive}
# Case: TVB simulator
if isinstance(self.__simulator, mock.TvbMock):
execution_choices = {
DIRECTION.NEST_TO_TVB: self.__simulator.receive,
DIRECTION.TVB_TO_NEST: self.__simulator.simulate}
# start execution
try:
execution_choices[self.__direction]() # TODO check return codes
except TypeError:
# execution_choices is NoneType and could not be initialized
# because the simulator is not an instance of TVB or NEST
# TODO log the exception with traceback and terminate with error
print(f'{self.__simulator} is not an instance of the simulator',
file=sys.stderr)
raise TypeError
# NOTE InterscaleHub terminates execution with the simulation ends.
# Thus, implicitly executes the END command.
# 3) Stop signal --> disconnect from hub
self.__simulator.disconnect_from_hub()
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.constants as con
import utils as utl
import covariance as cov
import os
# Defining kappa
sol_lum = (con.L_sun*1e7).value
kap_uv = 2.2e-10/sol_lum
# Range of Luminosities (or absolute magnitudes) used
mags_all = np.linspace(-24, -13, 10)
lums_all = utl.m_to_l_wave(mags_all, 1500)
# Location of new data
p1 = os.getcwd() + '/data/New_UV/'
# To save the results
p22 = os.getcwd() + '/Results/Diff_lim/'
f22 = open(p22 + 'sfrd_uv_new000001_new.dat', 'w')
f22.write('#Name_of_the_paper\tZ_up\tZ_down\tSFRD\n')
# List of data files
list_uv = os.listdir(p1)
plt.figure(figsize=(16,9))
for i in range(len(list_uv)):
z1_uv, z2_uv, mst_uv, msterr_uv, phi_uv, phierr_uv, alp_uv, alperr_uv = np.loadtxt(p1 + list_uv[i], usecols=(0,1,2,3,4,5,6,7), unpack=True)
ppr_n = np.loadtxt(p1 + list_uv[i], usecols=8, dtype=str, unpack=True)
#
# This is because some of the data file has only one rows
# and numpy read them as numpy.float64 object, not as numpy.ndarray
#
if type(mst_uv) == np.float64:
lngth = 1
z1_uv, z2_uv, mst_uv, msterr_uv, phi_uv, phierr_uv, alp_uv, alperr_uv, ppr_n\
= np.array([z1_uv]), np.array([z2_uv]), np.array([mst_uv]), np.array([msterr_uv]),\
np.array([phi_uv]), np.array([phierr_uv]), np.array([alp_uv]), np.array([alperr_uv]), np.array([ppr_n])
else:
lngth = len(mst_uv)
#
print('-------------------------------------------------------------')
print('Working on: ' + ppr_n[0])
print('-------------------------------------------------------------')
#
# Calculating SFRD
#
sfrd_uv = np.zeros(len(z1_uv))
sfrd_uv_err = np.zeros(len(z1_uv))
for j in range(len(z1_uv)):
# Computing parameters array
logphi, logphi_err = utl.log_err(phi_uv[j], phierr_uv[j])
mean_all = np.array([mst_uv[j], logphi, alp_uv[j]])
err_all = np.array([msterr_uv[j], logphi_err, alperr_uv[j]])
zcen = (z1_uv[j] + z2_uv[j])/2
#lst11 = utl.m_to_l_wave(mean_all[0], 1500)
lt1 = 0.00001/kap_uv
sfr2, sfr2e = cov.sfrd_w_err(lum=lums_all, z=zcen, mean2=mean_all, err2=err_all, kappa=kap_uv, limit=lt1)
sfrd_uv[j], sfrd_uv_err[j] = sfr2, sfr2e
f22.write(ppr_n[0] + '\t' + str(z1_uv[j]) + '\t' + str(z2_uv[j]) + '\t' + str(sfr2) + '\t' + str(sfr2e) + '\n')
#
# log sfrd and error in it
log_sfr_uv, log_sfr_uv_err = utl.log_err(sfrd_uv, sfrd_uv_err)
#
# Plotting the results
zcen1 = (z1_uv + z2_uv)/2
zup, zdown = np.abs(z1_uv - zcen1), np.abs(zcen1-z2_uv)
plt.errorbar(x=zcen1, xerr=[zup, zdown], y=log_sfr_uv, yerr= log_sfr_uv_err, label=ppr_n[0], fmt='.')
f22.close()
plt.xlabel('Redshift')
plt.ylabel(r'SFRD (in $M_\odot year^{-1} Mpc^{-3}$')
plt.grid()
plt.legend(loc='best')
plt.show() |
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Div, HTML
from dimagi.utils.decorators.memoized import memoized
from .models import (CustomDataFieldsDefinition, is_system_key,
CUSTOM_DATA_FIELD_PREFIX)
def add_prefix(field_dict):
"""
Prefix all keys in the dict with the defined
custom data prefix (such as data-field-whatevs).
"""
return {
"{}-{}".format(CUSTOM_DATA_FIELD_PREFIX, k): v
for k, v in field_dict.iteritems()
}
def get_prefixed(field_dict):
"""
The inverse of add_prefix.
Returns all prefixed elements of a dict with the prefices stripped.
"""
prefix_len = len(CUSTOM_DATA_FIELD_PREFIX) + 1
return {
k[prefix_len:]: v
for k, v in field_dict.items()
if k.startswith(CUSTOM_DATA_FIELD_PREFIX)
}
def _make_field(field):
if field.choices:
return forms.ChoiceField(
label=field.label,
required=field.is_required,
choices=[('', _('Select one'))] + [(c, c) for c in field.choices],
)
return forms.CharField(label=field.label, required=field.is_required)
class CustomDataEditor(object):
"""
Tool to edit the data for a particular entity, like for an individual user.
"""
def __init__(self, field_view, domain, existing_custom_data=None,
post_dict=None, required_only=False):
self.field_view = field_view
self.domain = domain
self.existing_custom_data = existing_custom_data
self.required_only = required_only
self.form = self.init_form(post_dict)
@property
@memoized
def model(self):
definition = CustomDataFieldsDefinition.get_or_create(
self.domain,
self.field_view.field_type,
)
return definition or CustomDataFieldsDefinition()
def is_valid(self):
return self.form.is_valid()
@property
def errors(self):
self.form.is_valid()
return self.form.errors
def get_data_to_save(self):
cleaned_data = self.form.cleaned_data
system_data = {
k: v for k, v in self.existing_custom_data.items()
if is_system_key(k)
} if self.existing_custom_data else {}
# reset form to clear uncategorized data
self.existing_custom_data = None
self.form = self.init_form(add_prefix(cleaned_data))
self.form.is_valid()
return dict(cleaned_data, **system_data)
def init_form(self, post_dict=None):
fields = {
field.slug: _make_field(field)
for field in self.model.get_fields(required_only=self.required_only)
}
field_names = fields.keys()
CustomDataForm = type('CustomDataForm', (forms.Form,), fields)
CustomDataForm.helper = FormHelper()
CustomDataForm.helper.form_tag = False
CustomDataForm.helper.layout = Layout(
Fieldset(
_("Additional Information"),
*field_names
) if field_names else '',
self.get_uncategorized_form(),
)
CustomDataForm._has_uncategorized = bool(
self.get_uncategorized_form()
)
if post_dict:
fields = post_dict
elif self.existing_custom_data is not None:
fields = add_prefix(self.existing_custom_data)
else:
fields = None
self.form = CustomDataForm(fields, prefix=CUSTOM_DATA_FIELD_PREFIX)
return self.form
def get_uncategorized_form(self):
def FakeInput(val):
return HTML(u'<span class="input-xlarge uneditable-input">{}</span>'
.format(val))
def Label(val):
return HTML('<label class="control-label">{}</label>'.format(val))
def _make_field_div(slug, val):
return Div(
Label(slug),
Div(
FakeInput(val),
css_class="controls",
),
css_class="control-group",
)
fields = [f.slug for f in self.model.get_fields()]
help_div = [
_make_field_div(slug, val)
for slug, val in self.existing_custom_data.items()
if (slug not in fields and not is_system_key(slug))
] if self.existing_custom_data is not None else []
msg = """
<strong>Warning!</strong>
This data is not part of the specified user fields and will be
deleted if you save.
You can add them <a href="{}">here</a> to prevent this.
""".format(reverse(
self.field_view.urlname, args=[self.domain]
))
return Fieldset(
_("Unrecognized Information"),
Div(
HTML(msg),
css_class="alert alert-error",
),
*help_div
) if len(help_div) else HTML('')
|
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from chat import views
from chat.serializers import MessageSerializerCreate
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.user = self.scope["user"]
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data=None, bytes_data=None):
text_data_json = json.loads(text_data)
message = text_data_json['message']
sender = int(text_data_json['fromUserId'])
receiver = int(text_data_json['toUserId'])
#receiver = views.PrivChannelView.get_other_id(self.room_name, sender)
data = {'from_user': sender,
'to_user': receiver,
'message': message}
new_message = MessageSerializerCreate(data=data)
if new_message.is_valid():
new_message.save()
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'sender' : sender,
'receiver' : receiver,
'message': message
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
sender = int(event['sender'])
receiver = int(event['receiver'])
# Send message to WebSocket
self.send(text_data=json.dumps({
'senderId': sender,
'receiverId': receiver,
'message': message
}))
|
# Initializers
import os
ON_HEROKU = 'ON_HEROKU' in os.environ
# Facebook App pymultiposter
# FACEBOOK_CLIENT_ID = "101206834030831"
# FACEBOOK_CLIENT_SECRET = "9be8d03bb48f86245d2bad7269831f51"
IMGUR_CLIENT_ID = "3f4360ea159fad4"
IMGUR_CLIENT_SECRET = "be5c53e8daf8645f2f33c3b9d8c8ac9488396fb6"
S3_BUCKET = "pymultiposter-2"
S3_KEY = "AKIAJEYHDBZNM4XQTOHA"
S3_SECRET = "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"
if ON_HEROKU:
# # Facebook App pymultiposter-2
FACEBOOK_CLIENT_ID = "2061306277447865"
FACEBOOK_CLIENT_SECRET = "965931cb788a2268bd5c2545335042a0"
# LinkedIn
LINKEDIN_RETURN_URL = 'https://pymultiposter-2.herokuapp.com/linkedin_redirect'
LINKEDIN_CLIENT_ID = '81spnwn20ee6ve'
LINKEDIN_CLIENT_SECRET = '0Yg845bXH8Z3K4Sf'
# LINKEDIN_CLIENT_ID = '81yloqxu0xgoeo'
# LINKEDIN_CLIENT_SECRET = 'wyElIY59DrZwTRIA'
# Twitter
TWITTER_CLIENT_ID = "ecf8Ygwl3Sr9te5dvHoknoq7h"
TWITTER_CLIENT_SECRET = 'xM7G3WocNnSYRCsIsJw7yeRDasuJ3QzxdRlS7iLZoVr92gKtAg'
TWITTER_REDIRECT_URL = 'https://pymultiposter-2.herokuapp.com/twitter_redirect'
# Tumblr
TUMBLR_CLIENT_ID = "h8QTvJw4B8xMDo9GAFXC8Ll7xbX99MUhDiIA7AFBIfH2cuNzy3"
TUMBLR_CLIENT_SECRET = "g8Kgg8fIm8W8YadqqJy5mKR0dzUGYQXYwg1GvNHLofpgmohQoe"
TUMBLR_REDIRECT_URL = 'https://pymultiposter-2.herokuapp.com/tumblr_redirect'
UPLOAD_PATH = "/tmp/"
else:
# Facebook App pymultiposter-local
FACEBOOK_CLIENT_ID = "1817601901640361"
FACEBOOK_CLIENT_SECRET = "ee3029327b955fac864c7d3eb1c139ae"
# LinkedIn
LINKEDIN_RETURN_URL = 'http://localhost:5000/linkedin_redirect'
# LINKEDIN_CLIENT_ID = '81spnwn20ee6ve'
# LINKEDIN_CLIENT_SECRET = '0Yg845bXH8Z3K4Sf'
LINKEDIN_CLIENT_ID = '81yloqxu0xgoeo'
LINKEDIN_CLIENT_SECRET = 'wyElIY59DrZwTRIA'
# Twitter
TWITTER_CLIENT_ID = "ecf8Ygwl3Sr9te5dvHoknoq7h"
TWITTER_CLIENT_SECRET = 'xM7G3WocNnSYRCsIsJw7yeRDasuJ3QzxdRlS7iLZoVr92gKtAg'
TWITTER_REDIRECT_URL = 'http://localhost:5000/twitter_redirect'
# Tumblr
TUMBLR_CLIENT_ID = "h8QTvJw4B8xMDo9GAFXC8Ll7xbX99MUhDiIA7AFBIfH2cuNzy3"
TUMBLR_CLIENT_SECRET = "g8Kgg8fIm8W8YadqqJy5mKR0dzUGYQXYwg1GvNHLofpgmohQoe"
TUMBLR_REDIRECT_URL = 'http://localhost:5000/tumblr_redirect'
UPLOAD_PATH = "uploads/"
TWITTER_NAME = 'Twitter'
FACEBOOK_NAME = 'Facebook'
TUMBLR_NAME = 'Tumblr'
INSTAGRAM_NAME = 'Instagram'
LINKEDIN_NAME = 'Linkedin'
SUPPORTED_SOCIAL_NETWORKS = [FACEBOOK_NAME,
INSTAGRAM_NAME,
TUMBLR_NAME,
LINKEDIN_NAME,
TWITTER_NAME]
AWS_RDS_URL = "postgresql://administrator:cybertech83@pymultiposter-common-queue.c44vnyfhjrjn.us-east-1.rds.amazonaws.com:5432/postgres"
LOCALHOST_POSTGRES = "postgresql://postgres:cybertech83@localhost/postgres"
|
from __future__ import unicode_literals, division
from django.db import models
from django.utils import timezone
from django.utils.timezone import utc
from django.contrib import auth
from datetime import datetime, timedelta, time
import colorsys
import math
import pytz
from autoslug import AutoSlugField
class Context(models.Model):
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
owner = models.ForeignKey(auth.models.User, related_name='contexts', null=False, default=1)
color = models.IntegerField(null=True, blank=True, default=0)
def __str__(self):
return self.name
def as_hex(self, rgb):
""" Returns representation of color in hex. """
r = int(math.ceil(rgb[0] * 255))
g = int(math.ceil(rgb[1] * 255))
b = int(math.ceil(rgb[2] * 255))
return "#{:02x}{:02x}{:02x}".format(r, g, b)
def header_bg(self):
return self.as_hex(colorsys.hsv_to_rgb(self.color / 360, .46, .45))
def footer_bg(self):
return self.as_hex(colorsys.hsv_to_rgb(self.color / 360, .36, .25))
def header_text(self):
return self.as_hex(colorsys.hsv_to_rgb(self.color / 360, .26, .98))
def footer_text(self):
return self.as_hex(colorsys.hsv_to_rgb(self.color / 360, .26, .98))
def footer_head(self):
return self.as_hex(colorsys.hsv_to_rgb(self.color / 360, .46, .65))
class Goal(models.Model):
STATUS = (
('active', 'Active'),
('inactive', 'Inactive'),
)
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
status = models.CharField(max_length=255, choices=STATUS, default='active')
priority = models.PositiveSmallIntegerField(null=False, default=30)
owner = models.ForeignKey(auth.models.User, related_name='goals', null=False, default=1)
context = models.ForeignKey('Context', null=True, blank=True, related_name='goals')
folder = models.ForeignKey('Folder', null=True, blank=True, related_name='goals')
# Examples:
# target_amount=15, type=minutes, period=day
# target_amount=1, type=times, period=day
# target_amount=10, type=times, period=year
# target_amount=50000, type=words
target_amount = models.PositiveSmallIntegerField(default=1)
type = models.CharField(max_length=255, default='minutes') # times/minutes/hours/words/pages/etc.
period = models.CharField(max_length=255, blank=True, null=True, default='day') # day/week/month/year, optional
stale_period = models.PositiveSmallIntegerField(default=0)
visibility_period = models.PositiveSmallIntegerField(default=0)
last_entry_date = models.DateField(null=True, blank=True, default=timezone.now)
last_completed_date = models.DateField(null=True, blank=True, default=timezone.now)
def __str__(self):
response = '{}/{}'.format(self.context.slug, self.name)
if self.folder:
response += ' ({})'.format(self.folder.slug)
return response
def in_progress(self):
if self.type in ['words', 'times']:
return False
if self.entries.count() > 0:
last_entry = self.entries.last()
if not last_entry.stop_time:
return True
else:
return False
else:
return False
def done_today(self):
if self.get_current_amount_converted() >= self.target_amount and not self.in_progress():
return True
if self.visibility_period > 0 and (timezone.now().date() - self.last_completed_date).days < self.visibility_period:
return True
return False
def convert_to_resolution(self, duration):
if self.type == "minutes":
return (duration / 3600.0) * 60.0
elif self.type == "hours":
return duration / 3600.0
else:
return duration
def get_current_elapsed_time(self):
""" Returns current elapsed time in seconds. """
if self.entries.count() > 0:
now = timezone.now()
last_entry = self.entries.last()
# If we're stopped, there's no current elapsed time
if last_entry.stop_time:
return 0
return (now - last_entry.time).total_seconds()
return 0
def get_current_elapsed_time_converted(self):
return self.convert_to_resolution(self.get_current_elapsed_time())
def get_current_amount(self):
# TODO: expand to week/month/year
if self.period == "day":
# Get all entries for this goal today and sum up the amounts
now = timezone.localtime(timezone.now(), timezone.get_current_timezone())
today = now.date()
tomorrow = today + timedelta(1)
today_start = datetime.combine(today, time())
today_end = datetime.combine(tomorrow, time())
# Convert back to UTC
tz = timezone.get_current_timezone()
d_tz = tz.normalize(tz.localize(today_start))
today_start_utc = d_tz.astimezone(utc)
d_tz = tz.normalize(tz.localize(today_end))
today_end_utc = d_tz.astimezone(utc)
entries = self.entries.filter(time__lte=today_end_utc, time__gte=today_start_utc)
total_time = 0
for entry in entries:
if entry.amount:
total_time += entry.amount
else:
# No stop time yet, so use now, converting to the resolution
total_time += self.get_current_elapsed_time()
return total_time
return 0
def get_current_amount_converted(self, amt=None):
if amt is not None:
return self.convert_to_resolution(amt)
else:
return self.convert_to_resolution(self.get_current_amount())
def get_current_amount_mm_ss(self):
return self.seconds_to_mm_ss(self.get_current_amount())
def get_current_elapsed_time_mm_ss(self):
return self.seconds_to_mm_ss(self.get_current_elapsed_time())
def get_current_percentage(self, amt=None):
if amt is not None:
return min((amt / self.target_amount) * 100.0, 100.0)
else:
return min((self.get_current_amount_converted() / self.target_amount) * 100.0, 100.0)
def get_amount_for_day(self, day):
# Get all entries for this goal on this day and sum up the amounts
tomorrow = day + timedelta(1)
today_start = datetime.combine(day, time())
today_end = datetime.combine(tomorrow, time())
# Convert back to UTC
tz = timezone.get_current_timezone()
d_tz = tz.normalize(tz.localize(today_start))
today_start_utc = d_tz.astimezone(utc)
d_tz = tz.normalize(tz.localize(today_end))
today_end_utc = d_tz.astimezone(utc)
entries = self.entries.filter(time__lte=today_end_utc, time__gte=today_start_utc)
total_time = 0
for entry in entries:
if entry.amount:
total_time += entry.amount
else:
# No stop time yet, so use now, converting to the resolution
#total_time += self.get_current_elapsed_time()
pass
return total_time
def get_amount_for_day_converted(self, day):
return self.convert_to_resolution(self.get_amount_for_day(day))
def seconds_to_mm_ss(self, seconds):
m, s = divmod(seconds, 60)
return '{:d}:{:02d}'.format(int(m), int(s))
def get_percentage_for_day(self, day, target_amount=None):
if not target_amount:
target_amount = self.target_amount
return min((self.get_amount_for_day_converted(day) / target_amount) * 100.0, 100.0)
def get_current_metadata(self):
current_amount = self.get_current_amount()
current_amount_converted = self.get_current_amount_converted(current_amount)
current_amount_mm_ss = self.seconds_to_mm_ss(current_amount)
current_elapsed = self.get_current_elapsed_time()
current_elapsed_mm_ss = self.seconds_to_mm_ss(current_elapsed)
current_percentage = self.get_current_percentage(current_amount_converted)
over = (current_amount_converted >= self.target_amount)
response = {
'current_amount': current_amount,
'current_amount_converted': current_amount_converted,
'current_amount_mm_ss': current_amount_mm_ss,
'current_elapsed': current_elapsed,
'current_elapsed_mm_ss': current_elapsed_mm_ss,
'current_percentage': current_percentage,
'over': over,
}
return response
def daterange(self, start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def get_days(self):
""" Returns list of days that have entries """
entries = self.entries.all().order_by('time')
if entries.count() > 0:
start_date = timezone.localtime(entries[0].time, timezone.get_current_timezone()).date()
end_date = timezone.now().date()
return [x for x in self.daterange(start_date, end_date)]
else:
return []
def get_entries_by_day(self):
# Returns list of days with entries for each
days = self.get_days()
days.reverse()
day_list = []
for day in days:
next_day = day + timedelta(1)
this_day_start = datetime.combine(day, time())
this_day_end = datetime.combine(next_day, time())
entries = self.entries.filter(time__lte=this_day_end, time__gte=this_day_start)
if entries:
target_amount = entries[0].target_amount
else:
target_amount = self.target_amount
amount = self.get_amount_for_day_converted(day)
if self.type in ['minutes', 'hours']:
display_amount = self.seconds_to_mm_ss(self.get_amount_for_day(day))
else:
display_amount = amount
over = (amount >= target_amount)
day_list.append({
'date': day,
'entries': entries,
'over': over,
'target_amount': target_amount,
'amount': amount,
'display_amount': display_amount,
'percentage': self.get_percentage_for_day(day, target_amount),
})
return day_list
def days_since_last_entry(self):
last_entry = self.entries.last()
if last_entry:
last_entry = last_entry.time.replace(tzinfo=utc).replace(hour=0, minute=0, second=0, microsecond=0)
today = datetime.utcnow().replace(tzinfo=utc).replace(hour=0, minute=0, second=0, microsecond=0)
days = (today - last_entry).days
else:
# Very far in the past, so it always shows up stale
days = 100000
return days
def get_stale_period(self):
from django.conf import settings
# If the reading has a stale period, use it instead of the system default
if self.stale_period > 0:
stale_period = self.stale_period
else:
stale_period = settings.STALE_PERIOD
return stale_period
def get_display_type(self):
if self.type == 'minutes' and self.target_amount == 1:
return 'minute'
if self.type == 'words' and self.target_amount == 1:
return 'word'
return self.type
def stale(self):
""" Check to see if this goal is stale. """
stale_period = self.get_stale_period()
# Take the visibility period into account if it's there
if self.visibility_period > 0:
stale_period += self.visibility_period
if self.status == 'active' and stale_period != 0 and self.days_since_last_entry() > stale_period:
return True
return False
def width(self):
if self.type == 'minutes':
# Scale to the number of minutes, 10+ is full width
return min(self.target_amount * 16, 160)
return 160
def type_truncated(self):
if self.type == 'minutes':
return 'min'
return self.type
class Entry(models.Model):
goal = models.ForeignKey(Goal, related_name='entries')
amount = models.PositiveSmallIntegerField(null=True, default=None, blank=True)
target_amount = models.PositiveSmallIntegerField(null=True, default=0, blank=True)
time = models.DateTimeField()
stop_time = models.DateTimeField(null=True, default=None, blank=True)
def save(self, *args, **kwargs):
if not self.time:
self.time = timezone.now()
if self.stop_time:
self.amount = (self.stop_time - self.time).total_seconds()
return super(Entry, self).save(*args, **kwargs)
def __unicode__(self):
return "Entry"
def convert_to_resolution(self, duration):
if self.goal.type == "minutes":
return (duration / 3600.0) * 60.0
elif self.goal.type == "hours":
return duration / 3600.0
else:
return duration
def get_elapsed_time(self):
# If there's already a stop time then we're good
if self.stop_time:
return self.amount
now = timezone.now()
return (now - self.time).total_seconds()
def get_elapsed_time_converted(self):
return self.convert_to_resolution(self.get_elapsed_time())
class Meta:
verbose_name_plural = "entries"
class Folder(models.Model):
name = models.CharField(max_length=100)
slug = AutoSlugField(populate_from='name')
order = models.PositiveSmallIntegerField(default=100)
owner = models.ForeignKey(auth.models.User, related_name='folders', default=1)
context = models.ForeignKey('Context', null=True, blank=True, related_name='folders')
def __str__(self):
return '{}/{}'.format(self.context.slug, self.name)
def active_goals(self):
goals = self.goals.filter(status='active').distinct().order_by('priority')
return goals
def active_goals_today(self):
goals = [x for x in self.goals.filter(status='active').distinct().order_by('priority') if not x.done_today()]
# Now sort stale first
goals = sorted(goals, key=lambda k: 1 - k.stale())
return goals
class Meta:
ordering = ['order']
|
from discord.ext import commands
from discord.utils import get
from datetime import datetime, timedelta
from unicodedata import normalize
from inspect import Parameter
from aiohttp import ClientSession
from typing import Union
async def get_json(link: str, headers: dict = None, json: bool = True) -> Union[dict, str]:
async with ClientSession() as s:
async with s.get(link, headers=headers) as resp:
return await resp.json() if json else await resp.text()
def normalize_string(s: str) -> str:
return normalize(u'NFKD', s).encode('ascii', 'ignore').decode('utf8')
def now(utc: bool = False) -> datetime:
if utc:
return datetime.utcnow()
return datetime.utcnow() + timedelta(hours=2)
def has_higher_perms() -> None:
async def extended_check(ctx: commands.Context) -> bool:
args = ctx.message.content.split()
member = get(ctx.guild.members, id=int(args[1].strip('<@!>')))
if not member:
raise commands.MissingRequiredArgument(Parameter('member', Parameter.POSITIONAL_ONLY))
author_perms = ctx.author.guild_permissions
member_perms = member.guild_permissions
if author_perms.administrator and not member_perms.administrator:
return True
elif author_perms.manage_roles and not member_perms.manage_roles:
return True
elif author_perms.manage_guild and not member_perms.manage_guild:
return True
elif author_perms.ban_members and not member_perms.ban_members:
return True
elif author_perms.kick_members and not member_perms.kick_members:
return True
elif author_perms.manage_messages and not member_perms.manage_messages:
return True
else:
raise commands.MissingPermissions('')
return commands.check(extended_check)
def vc_check():
async def extended_check(ctx: commands.Context) -> bool:
if not ctx.guild or not ctx.author.voice:
raise commands.CommandInvokeError('channel')
entry = await ctx.bot.db.pending.find({'guild_id': ctx.guild.id, 'voc_id': ctx.author.voice.channel.id})
if not entry:
raise commands.CommandInvokeError('Not temp')
owner = ctx.guild.get_member(entry['owner'])
if ctx.author != owner and not ctx.author.guild_permissions.manage_channels:
raise commands.CommandInvokeError('Not owner')
return True
return commands.check(extended_check)
|
import numpy as np
import pandas as pd
import os
from cost_prophet.utils.evaluation import test_error
from cost_prophet.utils.linear_alebra import split_tests_sets, get_known_indices
from dask import delayed, compute
from time import time
from dotenv import dotenv_values
from collections import OrderedDict
from copy import deepcopy
from matrix_completion import svt_solve
config = dotenv_values()
OUTPUT_DIR = config.get("OUTPUT_DIR")
class ImputeRunner:
def __init__(self, solver_cls, solver_kwargs, params):
self.solver_cls = solver_cls
self.solver_kwargs = solver_kwargs
self.params = params
def solve(self, X:np.ndarray, X_train: np.ndarray, param_set: OrderedDict, test_indices: list, trial:int) -> float:
solver_kwargs = deepcopy(self.solver_kwargs) | param_set
solver = self.solver_cls(**solver_kwargs)
outputs = solver.fit_transform(X_train)
results = self.tranform(outputs, X, param_set, test_indices, trial)
return results
def tranform(self, outputs: np.ndarray, X:np.ndarray, param_set: OrderedDict, test_indices:list, trial:int):
_test_error = test_error(outputs, X, test_indices)
results = list(param_set.values()) +[trial, _test_error]
return results
def run_trial(self, X: np.ndarray, known_indices: list, param_set: OrderedDict, trial: int) -> list:
test_indices, train_indices, X_train = split_tests_sets(known_indices, X)
_test_error_data = self.solve(X, X_train, param_set, test_indices, trial)
return [_test_error_data]
def run(self, X: np.ndarray, trials: int):
errors = []
known_indices = get_known_indices(X)
for param_set in self.params:
for trial in range(trials):
_test_error_data = delayed(self.run_trial)(X, known_indices, param_set, trial)
errors+=_test_error_data
errors = compute(errors)
self.save_results(errors)
def save_results(self, errors):
columns = list(self.params[0].keys())
columns += ['trial', 'test_error']
df = pd.DataFrame(data=errors[0], columns=columns)
df.to_csv(os.path.join(OUTPUT_DIR, f'{self.solver_cls.__name__}-{time()}'))
class SoftImputeRunner(ImputeRunner):
def tranform(self, outputs: list, X:np.ndarray, param_set: OrderedDict, test_indices:list, trial:int):
results = []
for shrinkage_value, X_out in outputs:
_test_error = test_error(X_out, X, test_indices)
results.append( list(param_set.values()) +[trial, shrinkage_value, _test_error])
_test_error = test_error(X_out, X, test_indices)
return results
def save_results(self, errors):
columns = list(self.params[0].keys())
columns += ['trial', 'shrinkage_value', 'test_error']
df = pd.DataFrame(data=errors[0], columns=columns)
df.to_csv(os.path.join(OUTPUT_DIR, f'{self.solver_cls.__name__}-{time()}'))
class SVTImputeRunner(ImputeRunner):
def solve(self, X:np.ndarray, X_train: np.ndarray, param_set: OrderedDict, test_indices: list) -> float:
mask_train = np.logical_not(np.isnan(X_train))
solver_kwargs = deepcopy(self.solver_kwargs) | param_set
X_out = self.solver_cls(X_train, mask_train, **solver_kwargs)
_test_error = test_error(X_out, X, test_indices)
return _test_error
def run_trial(self, X: np.ndarray, known_indices: list, param_set: OrderedDict, trial: int) -> list:
test_indices, train_indices, X_train = split_tests_sets(known_indices, X)
_test_error = self.solve(X, X_train, param_set, test_indices)
result = list(param_set.values()) + [trial, _test_error]
return result
|
#! /usr/bin/env python3
# -*-coding:utf-8 -*-
# @Time : 2019/06/19 18:57:07
# @Author : che
# @Email : ch1huizong@gmail.com
import ZODB
import ZODB.FileStorage
import transaction
from account import Account
filestorage = ZODB.FileStorage.FileStorage("zodb_account.db")
db = ZODB.DB(filestorage)
conn = db.open()
root = conn.root()
che = root["che"]
print("Before Withdraw")
print("=" * 60)
print(che)
wang = root["wang"]
print(wang)
print("-" * 60)
transaction.begin()
che.withdraw(300)
wang.deposit(300)
transaction.commit()
print("After Withdraw")
print("=" * 60)
print(che)
print(wang)
print("-" * 60)
conn.close()
|
"""
zoom.packages
Provide a simple shorthand way to include external components in projects.
"""
import json
import os
import zoom
default_packages = {
'c3': {
'libs': [
'https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.15/c3.min.js'
],
'styles': [
'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.15/c3.min.css',
]
},
'fontawsome': {
'libs': [
'https://use.fontawesome.com/releases/v5.0.1/js/all.js'
]
},
}
def load(pathname):
"""Load a packages file into a dict"""
if os.path.isfile(pathname):
with open(pathname, 'r', encoding='utf-8') as data:
return json.load(data)
return {}
def get_registered_packages():
"""Returns the list of packages known to the site
>>> request = zoom.request.Request(dict(PATH_INFO='/'))
>>> zoom.system.site = zoom.site.Site(request)
>>> packages = get_registered_packages()
>>> 'c3' in packages
True
"""
registered = {}
packages_list = [
default_packages,
zoom.system.site.packages,
# app_packages,
]
for packages in packages_list:
registered.update(packages)
return registered
def requires(*package_names):
"""Inform framework of the packages required for rendering
>>> request = zoom.request.Request(dict(PATH_INFO='/'))
>>> zoom.system.site = zoom.site.Site(request)
>>> requires('c3')
>>> libs = zoom.component.composition.parts.parts['libs']
>>> list(libs)[0]
'https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.min.js'
>>> try:
... requires('d4')
... except Exception as e:
... 'Missing required' in str(e) and 'raised!'
'raised!'
"""
parts = zoom.Component()
registered_packages = get_registered_packages()
for name in package_names:
package = registered_packages.get(name)
if package:
parts += zoom.Component(**package)
else:
missing = set(package_names) - set(registered_packages)
raise Exception('Missing required packages: {}'.format(missing))
zoom.component.composition.parts += parts
|
import pcap
import dpkt
a=pcap.pcap()
a.setfilter('arp')
for i,j in a:
tem=dpkt.ethernet.Ethernet(j)
print ("%s %x",i,tem) |
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark, Sick-R and STS12-16 datasets.
Another function of this examples is to save the extracted word embeddings to `summary_path`
Usage:
python evaluation_stsbenchmark_save_embed.py --pooling aver --layer_index 1,12 --whitening --encoder_name bert-base-cased --summary_dir ./save_embeddings
"""
import sys
sys.path.append("../../")
from torch.utils.data import DataLoader
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, evaluation, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SequentialEvaluator
from sentence_transformers.readers import STSBenchmarkDataReader
from sentence_transformers.evaluation.WhiteningEmbeddingSimilarityEvaluator import WhiteningEmbeddingSimilarityEvaluator
from sentence_transformers.models.MyPooling import EasyPooling, Layer2Pooling, LayerNPooling
from sentence_transformers.evaluation.SimilarityFunction import SimilarityFunction
import logging
import sys
import os
import torch
import numpy as np
import argparse
script_folder_path = os.path.dirname(os.path.realpath(__file__))
torch.set_num_threads(4)
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("--whitening", action='store_true',
help="Whether to do whitening.")
parser.add_argument("--last2avg", action='store_true',
help="Whether to do avg-first-last.")
parser.add_argument("--wk", action='store_true',
help="Whether to do avg-first-last.")
parser.add_argument("--pooling", default="cls", type=str,
help="['cls', 'aver', 'max']")
parser.add_argument("--batch_size", default=256, type=int,)
parser.add_argument("--embed_dim", default=768, type=int,)
parser.add_argument("--encoder_name", default='bert-base-uncased', type=str,
help="['bert-base-uncased', ''roberta-base]")
parser.add_argument("--layer_index", default='12', type=str,
help="['bert-base-uncased', ''roberta-base]")
parser.add_argument("--sts_corpus", default="../datasets/stsbenchmark/", type=str,)
parser.add_argument("--summary_dir", default="./save_embeddings", type=str,)
args = parser.parse_args()
target_eval_files = ['sts-b', 'sickr',
'sts12.MSRpar',
'sts13.FNWN',
'sts14.deft-forum',
'sts15.answers-forums',
'sts16.answer-answer',]
target_eval_tasks = ['sts-b', 'sickr', 'sts12', 'sts13', 'sts14', 'sts15', 'sts16']
target_eval_data_num = [1379, 4927,
750,
189,
450,
375,
254,]
layer_index = [int(i) for i in args.layer_index.split(',')]
if args.whitening:
args.sts_corpus += "white/"
target_eval_files = [f+"-white" for f in target_eval_files]
word_embedding_model = models.Transformer(args.encoder_name, model_args={'output_hidden_states': True, 'batch_size': args.batch_size})
if args.last2avg:
layer_index = [0, -1]
if args.wk:
pooling_model = models.WKPooling(word_embedding_model.get_word_embedding_dimension())
logger.info("wkpooling")
else:
pooling_model = LayerNPooling(args.pooling, word_embedding_model.get_word_embedding_dimension(), layers=layer_index)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logger.info("Pool:{}, Encoder:{}, Whitening:{}".format(args.pooling, args.encoder_name, args.whitening))
evaluators = {task: [] for task in target_eval_tasks} #evaluators has a list of different evaluator classes we call periodically
sts_reader = STSBenchmarkDataReader(os.path.join(script_folder_path, args.sts_corpus))
for idx, target in enumerate(target_eval_files):
output_filename_eval = os.path.join(script_folder_path, args.sts_corpus + target + "-test.csv")
if args.whitening:
if len(evaluators[target[:5]]) == 0:
summary_path = "{}/{}".format(args.summary_dir, args.encoder_name.replace("/", "="))
if not os.path.exists(summary_path):
os.makedirs(summary_path)
summary_path = os.path.join(summary_path, target[:5]+".log")
evaluators[target[:5]].append(WhiteningEmbeddingSimilarityEvaluator.from_input_examples(sts_reader.get_examples(output_filename_eval), measure_data_num=target_eval_data_num[idx], embed_dim=args.embed_dim, name=target, main_similarity=SimilarityFunction.COSINE, summary_path=summary_path))
else:
evaluators[target[:5]].append(WhiteningEmbeddingSimilarityEvaluator.from_input_examples(sts_reader.get_examples(output_filename_eval), measure_data_num=target_eval_data_num[idx], embed_dim=args.embed_dim, name=target, main_similarity=SimilarityFunction.COSINE))
else:
evaluators[target[:5]].append(EmbeddingSimilarityEvaluator.from_input_examples(sts_reader.get_examples(output_filename_eval), name=target, main_similarity=SimilarityFunction.COSINE))
all_results = []
logger_text = ""
for task, sequential_evaluator in evaluators.items():
result = model.evaluate(SequentialEvaluator(sequential_evaluator, main_score_function=lambda scores: np.mean(scores)))
logger_text += "%.2f \t"%(result*100)
all_results.append(result*100)
logger.info(" \t".join(target_eval_tasks) + " \tOverall.")
logger.info(logger_text + "%.2f"%np.mean(all_results))
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-03-29"
# Created: 2015-03-21 24:00
import time
import threading
from .sensor import Sensor
from ..sensorInterface import SensorClientInterface, \
SensorUpdateException, SensorJoinException, SensorStartException
from .message import MsgType, Id, \
APPJoinMessage, APPUnjoinMessage, APPUpdateMessage, \
format_data
class SensorClient(Sensor, SensorClientInterface):
""" APP sensor client """
def __init__(self, settings=None):
if settings is None:
settings = {}
super(SensorClient, self).__init__(settings)
self._packet_timeout = settings.get('packet_wait_timeout', 1.5)
""" Time to wait for new packet event """
self._start_block_timeout = max(
self._packet_timeout, self._select_timeout
)
""" Time to block in start """
self._join_retry_timeout = settings.get('join_retry_timeout', 5.0)
""" Time to wait before resending join packet """
self._join_retry_count = settings.get('join_retry_number', 3)
""" Number of times to try to join before failing """
self._server_ip = None
""" Ip of server
:type : None | str """
self._server_port = None
""" Port of server
:type : None | int """
self._joined = threading.Event()
""" If set, currently joined the audience """
def join(self, people):
"""
Join the local audience
(a config message should be received on success)
Validates that there are people to join and that each of them
has a valid unique id
:param people: Which people does this sensor have
:type people: list[paps.person.Person]
:rtype: None
:raises SensorJoinException: Failed to join
"""
tries = 0
if not people:
raise SensorJoinException("No people given")
ids = set()
for person in people:
if not person.id and person.id != 0:
raise SensorJoinException("Invalid id for one or more people")
if person.id in ids:
raise SensorJoinException(
u"Id {} not unique".format(person.id)
)
ids.add(person.id)
while self._is_running and tries < self._join_retry_count:
packet = APPJoinMessage(
payload={'people': [person.to_dict() for person in people]}
)
self._send_packet(self._multicast_group, self._multicast_port,
packet)
if self._joined.wait(self._join_retry_timeout):
break
with self._seq_ack_lock:
# Got ack for packet?
packet_ackd = packet.header.sequence_number \
not in self._seq_ack
if packet_ackd and self._joined.wait(1.0):
# Packet already got acked
# -> wait another second for ._joined to clear
break
tries += 1
self.warning(
u"Unsuccessful attempt joining audience # {}".format(tries)
)
if not self._joined.is_set() or tries >= self._join_retry_count:
# Failed to join (no config packet received)
raise SensorJoinException("No config packet received")
self.info("Joined the audience")
def unjoin(self):
"""
Leave the local audience
:rtype: None
:raises SensorJoinException: Failed to leave
"""
self.debug("()")
if self._joined.is_set():
packet = APPUnjoinMessage(device_id=Id.NOT_SET)
self._send_packet(self._server_ip, self._server_port, packet)
self._joined.clear()
self.info("Left the audience")
def config(self, settings):
"""
Configuration has changed - config this module and lower layers
(calls on_config - if set)
:param settings: New configuration
:type settings: dict
:rtype: None
:raises SensorUpdateException: Failed to update
"""
self.debug("()")
# TODO synchronize access to vars
try:
self._device_id = settings['device_id']
self._packet_timeout = settings.get(
'packet_wait_timeout',
self._packet_timeout
)
self._server_ip = settings.get('server_ip', self._server_ip)
self._server_port = settings.get('server_port', self._server_port)
except KeyError:
raise SensorUpdateException("Key not in settings")
if callable(self.on_config):
try:
self.on_config(settings)
except:
self.exception("Failed to update remote config")
raise SensorUpdateException("Remote config failed")
def person_update(self, people):
"""
Update the status of people
:param people: All people of this sensor
:type people: list[paps.person.Person]
:rtype: None
:raises SensorUpdateException: Failed to update
"""
packet = APPUpdateMessage(device_id=Id.NOT_SET, people=people)
self._send_packet(
self._server_ip, self._server_port, packet,
acknowledge_packet=False
)
def _packet_loop(self):
"""
Packet processing loop
:rtype: None
"""
while self._is_running:
# Only wait if there are no more packets in the inbox
if self.inbox.empty() \
and not self.new_packet.wait(self._packet_timeout):
continue
ip, port, packet = self.inbox.get()
if self.inbox.empty():
self.new_packet.clear()
self.debug(u"{}".format(packet))
if packet.header.message_type == MsgType.CONFIG:
self._do_config_packet(packet, ip, port)
def _do_config_packet(self, packet, ip, port):
"""
Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None
"""
self.debug("()")
if packet.header.device_id != Id.SERVER:
# Only allow config packets from server
self.warning("Config packets only allowed from server")
return
try:
config = packet.payload
self.debug(u"{}".format(config))
if not isinstance(config, dict):
self.error("Wrong payload type")
raise RuntimeError("Wrong type")
config.setdefault("server_ip", ip)
config.setdefault("server_port", port)
self.config(config)
self._joined.set()
except:
self.exception("Failed to configure")
self.error(u"Faulty packet {}".format(format_data(packet.payload)))
return
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
:raises SensorStartException: Failed to start
"""
self.debug("()")
super(SensorClient, self).start(blocking=False)
try:
a_thread = threading.Thread(
target=self._thread_wrapper,
args=(self._packet_loop,)
)
a_thread.daemon = True
a_thread.start()
except:
self.exception("Failed to run packet loop")
raise SensorStartException("Packet loop failed")
self.info("Started")
# Blocking - call StartStopable.start
super(Sensor, self).start(blocking)
def stop(self):
"""
Stop the interface
:rtype: None
"""
self.debug("()")
try:
self.unjoin()
time.sleep(2)
except:
self.exception("Failed to leave audience")
super(SensorClient, self).stop()
|
# Areas list
areas = [ "hallway", 14.35,
"kitchen", 15.0,
"living room", 19.0,
"bedroom", 12.5,
"bathroom", 8.75 ]
#1 Print out second element
print(_________)
#2 Print out bathroom (using a negative index)
print(_________)
#3 Print out the kitchen area
print(_________) |
import requests
OH_BASE_URL = 'https://www.openhumans.org/'
def oh_get_member_data(token):
"""
Exchange OAuth2 token for member data.
"""
req = requests.get(
'{}api/direct-sharing/project/exchange-member/'.format(OH_BASE_URL),
params={'access_token': token})
if req.status_code == 200:
return req.json()
raise Exception('Status code {}'.format(req.status_code))
return None
|
"""
OBJECT RECOGNITION USING A SPIKING NEURAL NETWORK.
* The data preparation module.
@author: atenagm1375
"""
import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import cv2
class CaltechDataset(Dataset):
"""
CaltechDataset class.
Attributes
----------
caltech_dataset_loader : utils.data.CaltechDatasetLoader
An instance of CaltechDatasetLoader.
train : bool, optional
Defines whether to load the train instances or the test. The default
is True.
Keyword Arguments
-----------------
size_low : int
The size of first GaussianBlur filter.
size_high : int
The size of second GaussianBlur filter.
"""
def __init__(self, caltech_dataset_loader, train=True, **kwargs):
self._cdl = caltech_dataset_loader
if kwargs:
self._cdl.apply_DoG(kwargs.get("size_low", 0),
kwargs.get("size_high", 0))
self.dataframe = self._cdl.data_frame.iloc[
self._cdl.train_idx] if train else \
self._cdl.data_frame.iloc[self._cdl.test_idx]
def __len__(self):
"""
Get number of instances in the dataset.
Returns
-------
int
number of instances in the dataset.
"""
return len(self.dataframe)
def __getitem__(self, index):
"""
Get value(s) at the described index.
Returns the image matrix and one-hot encoded label of the instance(s)
at location index.
Parameters
----------
index : int
The index to return values of.
Returns
-------
tuple of two numpy.arrays
The tuple of image matrix and the label array.
"""
return self.dataframe["x"].iloc[index].astype(np.float32), \
self.dataframe[self._cdl.classes].iloc[index].values.astype(
np.float32)
class CaltechDatasetLoader:
"""
Loads the Caltech dataset.
Attributes
----------
path : str
Path to Caltech image folders.
classes: list of str
List of classes.
image_size: tuple, optional
The input image size. All images are resized to the specified size.
The default is (100, 100).
"""
def __init__(self, path, classes, image_size=(100, 100)):
self.classes = classes
self.n_classes = len(classes)
self.data_frame = pd.DataFrame()
self.train_idx = []
self.test_idx = []
x = []
y = []
for obj in classes:
cls_path = path + ("/" if path[-1] != "/" else "") + obj + "/"
for img_path in os.listdir(cls_path):
img = cv2.imread(cls_path + img_path, 0)
img = cv2.resize(img, image_size,
interpolation=cv2.INTER_CUBIC)
x.append(img.reshape((1, *image_size)))
y.append(obj)
self.n_samples = len(y)
self.data_frame = pd.DataFrame({"x": x, "y": y}, columns=["x", "y"])
enc = pd.get_dummies(self.data_frame["y"])
self.data_frame = pd.concat([self.data_frame, enc], axis=1)
def apply_DoG(self, size_low, size_high):
"""
Apply DoG filter on input images.
Parameters
----------
size_low : int
The size of first GaussianBlur filter.
size_high : int
The size of second GaussianBlur filter.
Returns
-------
None.
"""
try:
s1, s2 = (size_low, size_low), (size_high, size_high)
self.data_frame["x"] = self.data_frame.x.apply(
lambda im: cv2.GaussianBlur(im, s1, 0) -
cv2.GaussianBlur(im, s2, 0))
except cv2.error:
print("DoG failed to apply")
pass
def split_train_test(self, test_ratio=0.3):
"""
Split train and test samples.
Parameters
----------
test_ratio : float, optional
The ratio of test samples. The default is 0.3.
Returns
-------
x_train : numpy.array
Train image data.
x_test : numpy.array
Test image data.
y_train : numpy.array
Train class labels.
y_test : numpy.array
Test class labels.
"""
train_df = pd.DataFrame(columns=["x", *self.classes])
test_df = pd.DataFrame(columns=["x", *self.classes])
for obj in self.classes:
obj_df = self.data_frame[self.data_frame[obj] == 1]
sub_df = obj_df.sample(frac=1 - test_ratio)
train_df = train_df.append(sub_df, ignore_index=True)
test_df = test_df.append(obj_df[~obj_df.isin(sub_df)].dropna(),
ignore_index=True)
train_df = train_df.sample(frac=1)
test_df = test_df.sample(frac=1)
self.train_idx = list(train_df.index)
self.test_idx = list(test_df.index)
x_train = np.stack(np.array(train_df.x))
x_test = np.stack(np.array(test_df.x))
y_train = np.stack(np.array(train_df[self.classes]))
y_test = np.stack(np.array(test_df[self.classes]))
return x_train, x_test, y_train, y_test
|
import torch
class AccumulateKNN(torch.autograd.Function):
@staticmethod
def forward(ctx,
distances: torch.Tensor,
features: torch.Tensor,
indices: torch.Tensor,
n_moments: int = 1,
mean_and_max: bool = True):
output_feat_tensor, output_max_idxs_tensor = torch.ops.torch_cmspepr.accumulate_knn(distances = distances,
features = features,
indices = indices,
n_moments = n_moments,
mean_and_max = mean_and_max)
ctx.save_for_backward(distances, features, indices, output_feat_tensor, output_max_idx_tensor)
return output_feat_tensor, output_max_idxs_tensor
@staticmethod
def backward(ctx,
grad_from_out_features,
grad_max_idxs):
distances, features, indices, output_feat_tensor, output_max_idx_tensor = ctx.saved_tensors
grad_out_distances, grad_out_features = torch.ops.torch_cmspepr.accumulate_knn_grad(grad_from_out_features = grad_from_out_features,
distances = distances,
features = features,
neigh_indices = indices,
max_feat_indices = output_max_idx_tensor)
return grad_out_distances, grad_out_features
|
# CS 545 Machine Learning
# Homework 3 SVMs and Feature Selection
# Haomin He
"""
References:
Lecture Slides
https://archive.ics.uci.edu/ml/datasets/Spambase
http://scikit-learn.org/stable/modules/preprocessing.html
https://github.com/paolo215/CS445-ML
https://github.com/asayles
https://github.com/ZPdesu/Spam-Message-Classifier-sklearn
https://github.com/scottfabini/machine-learning-perceptron
"""
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn import svm
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve
import matplotlib
matplotlib.use("Agg")
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Tahoma']
import matplotlib.pyplot as plt
import random
def split(features, labels, test_size=0.5):
""" split data into 1/2 training, 1/2 test, each should have approximately
the same proportion of positive and negative examples as in the entire set.
Use model_selection library to split dataset and scale features.
test_size (float) : specify the ratio to split the dataset
"""
# Split dataset into 1/2 training and 1/2 testing
trainingFeatures, testingFeatures, trainingLabels, testingLabels = model_selection.train_test_split(features, labels, test_size=0.5, stratify=labels)
"""Scale training data using standardization: Compute the mean and standard deviation
of each feature (i.e., over a column in the training set). Then subtract the mean from
each value and divide each value by the standard deviation.
If for any feature the standard deviation is zero (i.e., all values are equal) then simply
set all the values of that feature to zero.
"""
# Calculate mean and std
trainingMeanList = trainingFeatures.mean(axis=0)
trainingStdList = trainingFeatures.std(axis=0)
# Scale datasets
trainingFeatures -= trainingMeanList
trainingFeatures /= trainingStdList
"""Scale test data: for each feature, subtract mean and divide by standard deviation of
that feature, where the mean and standard deviation were calculated for the training
set, not the test set.
"""
testingFeatures -= trainingMeanList
testingFeatures /= trainingStdList
# 2-dimensional labeld data structure that contains set of features
# assigned label of each features
return trainingFeatures, testingFeatures, trainingLabels, testingLabels
def savefig(xlabel, ylabel, filename):
"""Add labels, legends, and save figure to png
"""
# Reposition title
plt.gca().title.set_position([0.5, 1.05])
# Add labels
plt.ylabel(ylabel, fontsize=16)
plt.xlabel(xlabel, fontsize=16)
# Add legend
plt.legend(loc = "lower right")
# Save to png
plt.savefig(filename)
# Clear plot
plt.clf()
def experiment1(trainingFeatures, testingFeatures, trainingLabels, testingLabels):
"""
Use linear kernel.
Use SVM and test the learned SVM model on test data.
Report accuracy, precision, and recall.
Create an ROC curve for this SVM on the test data, using 200 or more evenly spaced thresholds.
"""
# Instantiate SVM with linear kernel
clf = svm.SVC(kernel="linear")
# Train model
clf.fit(trainingFeatures, trainingLabels)
# Make a prediction for each instance
predictions = clf.predict(testingFeatures)
# Calculate recall, precision, and accuracy
recall = recall_score(testingLabels, predictions)
precision = precision_score(testingLabels, predictions)
accuracy = accuracy_score(testingLabels, predictions)
print("Experiment 1 accuracy", accuracy)
print("Experiment 1 precision", precision)
print("Experiment 1 recall", recall)
# Plot ROC curve
# Calculate decision score (distance of the instance to the separating hyperplane)
scores = clf.decision_function(testingFeatures)
fpr, tpr, threshold = roc_curve(testingLabels, scores)
# Design chart
plt.plot(fpr, tpr, color="c", label="ROC Curve on the Test Data")
plt.plot([0,1], [0,1], color="k", linestyle="--", label="Random Guess")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title("Experiment 1: ROC Curve")
savefig("False Positive Rate or 1 - Specificity", "True Positive Rate or Sensitivity", "Experiment1roc.png")
# Return classifier to use for experiment2
return clf
def plotSolo(xAxis, exp, basename, label, title):
"""Add labels, legends, and save figure to png
"""
# Plot data
plt.plot(xAxis, exp, color="c", label=label)
# Add title
plt.title(title)
# Add labels and save to png
savefig("m Features", "Accuracy", basename + ".png")
def experiment2(exp1_clf, trainingFeatures, testingFeatures, trainingLabels, testingLabels):
"""
Use learned SVM model from experiment1 to obtain the weight vector.
For m = 2 to 57:
Select the set of m features that have highest weighted value
Train an SVM using linear kernel on the training data using m features
Testing new SVM on the test set
Plot accuracy vs. m
"""
# Get all the weights from experiment1's classifier
weights = exp1_clf.coef_[0]
# Sort weights in descending order and keep track the column index
weights = [f[0] for f in sorted([(f, weights[f]) for f in range(len(weights))], key=lambda x : x[1], reverse=True)]
length = len(trainingFeatures.columns)
testingAccuracies = []
mRange = list(range(2, length+1))
#print(weights)
#print(mRange)
# For each iteration, select m features, train the model, and calculate accuracy
for i in mRange:
newTrainingFeatureSets = trainingFeatures[weights[0:i]]
newTestingFeatureSets = testingFeatures[weights[0:i]]
#print(newTrainingFeatureSets)
#print(newTestingFeatureSets)
clf = svm.SVC(kernel="linear")
clf.fit(newTrainingFeatureSets, trainingLabels)
accuracy = clf.score(newTestingFeatureSets, testingLabels)
testingAccuracies.append(accuracy)
print("Experiment 2 accuracy", accuracy)
# Solo plot experiment 2 results
plotSolo(mRange, testingAccuracies, "Experiment 2", "accuracy vs. m features",
"Experiment 2: Feature selection with linear SVM")
return testingAccuracies
def experiment3(trainingFeatures, testingFeatures, trainingLabels, testingLabels):
"""Select m features at random from the complete set.
Your features for each m value should be selected with uniform probability over the set of 57
This experiment is to see if using SVM weights for feature selection (Experiment 2) has any
advantage over random selection of features.
"""
length = len(trainingFeatures.columns)
testingAccuracies = []
featureCols = []
choices = list(range(length))
mRange = list(range(2, length+1))
# For each iteration, select m features, train the model, and calculate accuracy
for i in mRange:
# Randomly select feature
featureCols.append(choices.pop(random.randint(0, len(choices)-1)))
newTrainingFeatureSets = trainingFeatures[featureCols]
newTestingFeatureSets = testingFeatures[featureCols]
clf = svm.SVC(kernel="linear")
clf.fit(newTrainingFeatureSets, trainingLabels)
accuracy = clf.score(newTestingFeatureSets, testingLabels)
testingAccuracies.append(accuracy)
print("Experiment 3 accuracy", accuracy)
plotSolo(mRange, testingAccuracies, "Experiment 3", "accuracy vs. m random features",
"Experiment 3: Random Feature Selection")
return testingAccuracies
def main():
print("Load dataset spambase.data")
df = pd.read_csv("spambase.data", dtype=np.float, header=None)
# put data into format needed for the SVM package
# Remember the label column
labelCol = len(df.columns) - 1
# Change all 0 values to -1 in the labelCol column
df[labelCol][df[labelCol] == 0] = -1
# Separate label cols (last column is 1 or -1) and features cols (rest of columns)
labels = np.array(df[labelCol])
#print(labels)
dfFeatures = df.drop(labelCol, 1)
#print(dfFeatures)
print("finished loading and modifying datasets")
""" split data into 1/2 training, 1/2 test, each should have approximately
the same proportion of positive and negative examples as in the entire set.
"""
trainingFeatures, testingFeatures, trainingLabels, testingLabels = split(dfFeatures, labels)
print("running Experiment 1:")
exp1_clf = experiment1(trainingFeatures, testingFeatures, trainingLabels, testingLabels)
#print(exp1_clf)
print("finished Experiment 1")
print("running Experiment 2")
exp2_testingAccuracies = experiment2(exp1_clf, trainingFeatures, testingFeatures, trainingLabels, testingLabels)
print("finished Experiment 2")
print("running Experiment 3")
exp3_testingAccuracies = experiment3(trainingFeatures, testingFeatures, trainingLabels, testingLabels)
print("finished Experiment 3")
if __name__ == "__main__":
main()
#print("Test if code can reach here")
|
import os
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
def binarize_batch(batch_tuple):
# We share randomness so that if x == y, they're binarized consistently
# You won't believe how much training time we saved by switching to this
# _blazingly fast_ binarization
shape = batch_tuple[0].shape
random_bytes = np.random.bytes(np.prod(shape))
rand = np.frombuffer(random_bytes, dtype=np.uint8).reshape(shape)
res = [((batch == 255) | (rand < batch)).astype(np.float32) for batch in batch_tuple]
return res
def batched_run(sess, data, target, get_feed_dict, batch_size, n_repeats=1, tqdm_desc=None):
tqdm_t = None
if tqdm_desc is not None:
tqdm_t = tqdm(total=data.num_examples * n_repeats, unit='sample', desc=tqdm_desc)
for it in range(n_repeats):
avg_val = 0
offset = 0
for batch in batched_dataset(data, batch_size):
binarized_batch_x, binarized_batch_y = binarize_batch(batch)
actual_batch_size = len(binarized_batch_x)
val = sess.run(target, get_feed_dict(binarized_batch_x, binarized_batch_y))
offset += actual_batch_size
avg_val += (val - avg_val) * actual_batch_size / offset
if tqdm_t is not None:
tqdm_t.update(actual_batch_size)
yield avg_val
if tqdm_t is not None:
tqdm_t.close()
def print_over(msg):
field = 'tty_columns'
if not hasattr(print_over, field):
try:
with os.popen('stty size', 'r') as fh:
_, columns = map(int, fh.read().split())
except:
columns = None
setattr(print_over, field, columns)
columns = getattr(print_over, field)
if columns is not None:
print('\r' + msg + ' ' * (columns - len(msg)))
else:
print(msg)
def batched_dataset(data, batch_size):
batch_size = min(data.num_examples, batch_size)
total_batches = (data.num_examples - 1) // batch_size + 1
for _ in range(total_batches):
yield data.next_batch(batch_size)
def save_weights(sess, save_path, suffix='', saver=None):
if save_path is None:
return
if saver is None:
saver = tf.train.Saver()
os.makedirs(save_path, exist_ok=True)
model_path = os.path.join(save_path, 'model-weights' + ('-' + suffix if suffix else ''))
saved_file = saver.save(sess, model_path)
print_over('Saved model to ' + saved_file)
def dynamic_tile_to(x, y):
if len(x.shape) == len(y.shape):
return x
assert len(x.shape) < len(y.shape), 'len(x.shape = {}) > len(y.shape = {})'.format(x.shape, y.shape)
k = len(x.shape)
multiples = tf.concat([tf.shape(y)[:-k], [1] * k], axis=0)
return tf.tile(x[(None,) * len(y.shape[:-k])], multiples)
def tile_to_common_shape(*args):
largest = max(args, key=lambda x: len(x.shape))
return [dynamic_tile_to(x, largest) for x in args]
def real_nvp_conditional_template(
x_cond, hidden_layers, shift_only=False, activation=tf.nn.relu, name=None, *args, **kwargs):
with tf.name_scope(name, "real_nvp_conditional_template"):
kernel_initializer = tf.initializers.random_normal(0., 1e-4)
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
"Conditioning not implemented in the default template.")
x_cond_tiled, x = tile_to_common_shape(x_cond, x)
for units in hidden_layers:
x = tf.layers.dense(
inputs=tf.concat([x_cond_tiled, x], axis=-1),
units=units,
activation=activation,
kernel_initializer=kernel_initializer,
*args,
**kwargs
)
x = tf.layers.dense(
inputs=tf.concat([x_cond_tiled, x], axis=-1),
units=(1 if shift_only else 2) * output_units,
kernel_initializer=kernel_initializer,
activation=None,
*args,
**kwargs
)
if shift_only:
return x, None
shift, log_scale = tf.split(x, 2, axis=-1)
return shift, log_scale
return tf.make_template("real_nvp_conditional_template", _fn)
def masked_autoregressive_conditional_template(
x_cond, hidden_layers, shift_only=False, activation=tf.nn.relu,
log_scale_min_clip=-5., log_scale_max_clip=3., log_scale_clip_gradient=False,
name=None, *args, **kwargs):
"""Build the Masked Autoregressive Density Estimator (Germain et al., 2015).
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the input and returns the `loc` ("mu" in [Germain et
al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from
the MADE network.
Warning: This function uses `masked_dense` to create randomly initialized
`tf.Variables`. It is presumed that these will be fit, just as you would any
other neural architecture which uses `tf.layers.dense`.
#### About Hidden Layers
Each element of `hidden_layers` should be greater than the `input_depth`
(i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the
neural network). This is necessary to ensure the autoregressivity property.
#### About Clipping
This function also optionally clips the `log_scale` (but possibly not its
gradient). This is useful because if `log_scale` is too small/large it might
underflow/overflow making it impossible for the `MaskedAutoregressiveFlow`
bijector to implement a bijection. Additionally, the `log_scale_clip_gradient`
`bool` indicates whether the gradient should also be clipped. The default does
not clip the gradient; this is useful because it still provides gradient
information (for fitting) yet solves the numerical stability problem. I.e.,
`log_scale_clip_gradient = False` means
`grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual
`grad[clip(x)] exp(clip(x))`.
Args:
x_cond: Tensor to condition on
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed. Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The minimum value to clip by. Default: -5.
log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The maximum value to clip by. Default: 3.
log_scale_clip_gradient: Python `bool` indicating that the gradient of
`tf.clip_by_value` should be preserved. Default: `False`.
name: A name for ops managed by this function. Default:
"masked_autoregressive_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms (the "mu" in
[Germain et al. (2015)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in
[Germain et al. (2015)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
name = name or "masked_autoregressive_conditional_template"
with tf.name_scope(name, values=[x_cond, log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
input_depth = x.shape.with_rank_at_least(1)[-1].value
if input_depth is None:
raise NotImplementedError("Rightmost dimension must be known prior to graph execution.")
input_shape = np.int32(x.shape.as_list()) if x.shape.is_fully_defined() else tf.shape(x)
for i, units in enumerate(hidden_layers):
x = tfp.bijectors.masked_dense(
inputs=x, units=units, num_blocks=input_depth,
exclusive=True if i == 0 else False, activation=activation,
*args, **kwargs)
x += tf.layers.dense(inputs=x_cond, units=units, activation=activation)
x = tfp.bijectors.masked_dense(
inputs=x, units=(1 if shift_only else 2) * input_depth,
num_blocks=input_depth, activation=None, *args, **kwargs)
x += tf.layers.dense(inputs=x_cond, units=(1 if shift_only else 2) * input_depth, activation=None)
if shift_only:
x = tf.reshape(x, shape=input_shape)
return x, None
x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0))
shift, log_scale = tf.unstack(x, num=2, axis=-1)
which_clip = tf.clip_by_value if log_scale_clip_gradient else _clip_by_value_preserve_grad
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
return tf.make_template(name, _fn)
def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None):
with tf.name_scope(name, "clip_by_value_preserve_grad",
[x, clip_value_min, clip_value_max]):
clip_x = tf.clip_by_value(x, clip_value_min, clip_value_max)
return x + tf.stop_gradient(clip_x - x)
class StepSchedule:
def __init__(self, spec, default):
self._default = default
self._schedule = sorted(spec)
assert all(len(x) == 2 for x in self._schedule), 'Malformed schedule: {}'.format(self._schedule)
def at(self, cur_step):
ret = self._default
for step, val in self._schedule:
if step > cur_step:
break
ret = val
return ret
class CyclicTemperatureSchedule:
def __init__(self, total_epochs, cycles, annealing_fraction):
self._cycle_size = total_epochs // cycles if cycles > 0 else total_epochs
self._annealing_fraction = annealing_fraction if cycles > 0 else 0.0
def at(self, cur_step):
t = (cur_step % self._cycle_size) / self._cycle_size
return t / self._annealing_fraction if t < self._annealing_fraction else 1.
def make_typed_tuple(*types, rest=None):
def impl(x):
x_vals = x.split(',')
if rest is None:
assert len(x_vals) == len(types), 'Wrong argument: "{}", expected {} values'.format(x, len(types))
else:
assert len(x_vals) >= len(types), 'Wrong argument: "{}", expected at least {} values'.format(x, len(types))
return tuple((types[idx] if idx < len(types) else rest)(x_val) for idx, x_val in enumerate(x_vals))
return impl
|
import pytest
from hyp3lib import DemError
from hyp3_autorift import geometry, io
from hyp3_autorift.process import DEFAULT_PARAMETER_FILE
def test_find_jpl_parameter_info():
lat_limits = (55, 56)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'NPS'
lat_limits = (54, 55)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'N37'
lat_limits = (54, 55)
lon_limits = (-40, -41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'N24'
lat_limits = (-54, -55)
lon_limits = (-40, -41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'S24'
lat_limits = (-55, -56)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'S37'
lat_limits = (-56, -57)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
parameter_info = io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
assert parameter_info['name'] == 'SPS'
lat_limits = (-90, -91)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
with pytest.raises(DemError):
io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
lat_limits = (90, 91)
lon_limits = (40, 41)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
with pytest.raises(DemError):
io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
lat_limits = (55, 56)
lon_limits = (180, 181)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
with pytest.raises(DemError):
io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
lat_limits = (55, 56)
lon_limits = (-180, -181)
polygon = geometry.polygon_from_bbox(x_limits=lat_limits, y_limits=lon_limits)
with pytest.raises(DemError):
io.find_jpl_parameter_info(polygon, DEFAULT_PARAMETER_FILE)
|
from pydub import AudioSegment as AS
import numpy as np
from numpy.fft import fft
import matplotlib.pyplot as plt
pfad = "/home/noah/Schreibtisch/Bachelorarbeit/speechcommandCorpus/marvin/0a7c2a8d_nohash_0.wav"
test = AS.from_wav(pfad)
rohDaten=test.raw_data
daten = np.frombuffer(rohDaten,dtype=np.int16)
fig = plt.figure()
plt.plot(daten)
fig.savefig('roh.png', bbox_inches='tight')
frequenzen = fft(daten)
fig = plt.figure()
haelfte=int(len(frequenzen)/2)
plt.plot(abs(frequenzen[:haelfte]))
fig.savefig('fft.png', bbox_inches='tight')
|
import pandas as pd
import numpy as np
from sklearn.metrics import silhouette_score
from itertools import combinations
import Levenshtein
def levenshtein_metric(X, min_samples=0):
""" Returns array of average levenshtein scores of each cluster by averaging the levenshtein scores of all pairwise strings in each cluster.
:param X: Pandas DataFrame of clusters with their respective strings
:param min_samples: Minimum number of elements in a cluster for the cluster to be counted for calculating the Levenshtein score.
:return: Array of average Levenshtein scores of each cluster
"""
keepcols = [c for c in X.columns if len(X[c].dropna()) >= min_samples]
X = X[keepcols]
return np.array([np.mean(np.array([Levenshtein.distance(comb[0], comb[1]) for comb in combinations(X[i].dropna(), 2) if type(comb[0]) == str and type(comb[1]) == str])) for i in X.columns])
def silhouette_metric(X, labels):
""" Returns silhouette score of featurized Pandas DataFrame
:param X: Pandas DataFrame of featurized data
:param labels: The labeled cluster assigned to each string (array)
:return: Silhouette Score (Floating point value between 0 and 1)
"""
return silhouette_score(X=X, labels=labels) |
import os.path
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser(description='Convert frames into a video')
parser.add_argument('--frames', default='/home/hteam/Documents/hao/Research/Detection/detectron2/result_images', type=str, help='video frames file')
parser.add_argument('--output', default='video', type=str, help='output video name')
def main():
args = parser.parse_args()
frames_name= os.listdir(args.frames)
frames_name.sort()
fps = 15
out = 0
count = 0
for frame_name in frames_name:
print('Processing', frame_name, '...')
path = os.path.join(args.frames, frame_name)
frame = cv2.imread(path)
h, w, _ = frame.shape
size = (w, h)
if count == 0:
out = cv2.VideoWriter(args.output + '.mp4',cv2.VideoWriter_fourcc(*'mp4v'), fps, size)
count += 1
out.write(frame)
out.release()
if __name__ == '__main__':
main() |
import urllib.request
import re
import os
import argparse
parser=argparse.ArgumentParser(description='Input Conference (C) and Year (Y)')
parser.add_argument('C',type=str,default='CVPR',
help='a name of CVPR, ICCV or ECCV')
parser.add_argument('Y',type=int,default=2020,
help='the year of conference')
#parser.print_help()
args=parser.parse_args()
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read()
html = html.decode('utf-8')
return html
def download_file(download_url, file_name):
response = urllib.request.urlopen(download_url)
file = open(file_name, 'wb')
file.write(response.read())
file.close()
print("Completed")
Conference=args.C#'ICCV'
Year=args.Y#2019
save_path = '%s%d'%(Conference,Year)
if not os.path.exists(save_path):
os.mkdir(save_path)
url = 'http://openaccess.thecvf.com/%s%d.py'%(Conference,Year)
html = getHtml(url)
compile_text=r'\b\?day=[0-9][0-9][0-9][0-9]\-[0-9][0-9]\-[0-9][0-9]\b'#正则化模式1匹配日子
compile_paper=r'\bcontent_.*paper\.pdf\b'#正则化模式
parttern = re.compile(compile_text)
url_list = parttern.findall(html)
paper_url_list=[]
if url_list.__len__()==0:
parttern = re.compile(compile_paper)
paper_url_list.extend(parttern.findall(html))
else:
for suburl in url_list:
html = getHtml(url +suburl)
parttern = re.compile(compile_paper)
url_temp = parttern.findall(html)
paper_url_list.extend(url_temp)
for url in paper_url_list:
name = url.split('/')[-1]
file_name = os.path.join(save_path , name)
download_file('http://openaccess.thecvf.com/'+url, file_name) |
import hashlib
import pickle
import os
from ..algorithms import librosa_analysis
from track import Track
class Song(Track):
"""A :py:class:`radiotool.composer.Track`
subclass that wraps a music .wav file.
Allows access to a musical analysis of the song.
"""
def __init__(self, fn, name="Song name", cache_dir=None,
refresh_cache=False, labels=None, labels_in_file=False):
self._analysis = None
self._checksum = None
self.refresh_cache = refresh_cache
self.cache_dir = cache_dir
Track.__init__(self, fn, name, labels=labels,
labels_in_file=labels_in_file)
@property
def analysis(self):
"""Get musical analysis of the song using the librosa library
"""
if self._analysis is not None:
return self._analysis
if self.cache_dir is not None:
path = os.path.join(self.cache_dir, self.checksum)
try:
if self.refresh_cache: raise IOError
with open(path + '.pickle', 'rb') as pickle_file:
self._analysis = pickle.load(pickle_file)
except IOError:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
with open(path + '.pickle', 'wb') as pickle_file:
pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL)
else:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
return self._analysis
def features_cached(self):
if self.cache_dir is not None:
path = os.path.join(self.cache_dir, self.checksum)
try:
if self.refresh_cache: raise IOError
with open(path + '.pickle', 'rb') as pickle_file:
return True
except IOError:
pass
return False
@property
def checksum(self):
if self._checksum is not None:
return self._checksum
block_size = 65536
hasher = hashlib.sha256()
with open(self.filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
self._checksum = hasher.hexdigest()
return self._checksum
|
from django.contrib import admin
from .models import Message
# Register your models here.
@admin.register(Message)
class MessageAdmin(admin.ModelAdmin):
list_display = ('id', 'author', 'created', 'updated','content','is_public')
list_filter = ('author', 'created',)
search_fields = ('category', 'content')
date_hierarchy = 'created'
ordering = ('-updated',)
|
from flask import Flask
from voice_api.blueprints.fs_api.views import fs_api
from voice_api.blueprints.web_api.views import web_api
from voice_api.models import db
import logging
from logging.handlers import TimedRotatingFileHandler
def creat_app():
app=Flask(__name__)
app.config.from_object('config')
app.config.from_object('setting')
app.register_blueprint(fs_api)
app.register_blueprint(web_api)
#日志
# 设置最开始的日志级别
logging.basicConfig(level=logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s:%(lineno)d][%(levelname)s][%(thread)d] - %(message)s")
handler = TimedRotatingFileHandler(
"voice.log", when="D", interval=1, backupCount=7,
encoding="UTF-8", delay=False, utc=True)
app.logger.addHandler(handler)
handler.setFormatter(formatter)
db.init_app(app)
#创建数据库
with app.app_context():
db.create_all()
return app |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, generics
from rest_framework.pagination import CursorPagination
from app import models
from app import serializers
class ResultsPagination(CursorPagination):
page_size = 25
page_size_query_param = 'size'
max_page_size = 100
class YoutubeVideosViewSet(generics.ListAPIView):
search_fields = ['title', 'description']
filter_backends = [filters.SearchFilter, DjangoFilterBackend, filters.OrderingFilter]
# filterset_fields = ['channel_id', 'channel_title']
ordering = ['-published_at', ]
queryset = models.Video.objects.all()
serializer_class = serializers.VideoSerializer
pagination_class = ResultsPagination
|
""" Unit tests for the basis polynomial computations """
# -------------------------------------------------------------------------------------------------------------------- #
# Importing packages
# -------------------------------------------------------------------------------------------------------------------- #
import sys
import os
import time
import pdb
import numpy as np
import nurbspy as nrb
# -------------------------------------------------------------------------------------------------------------------- #
# Prepare the test suite
# -------------------------------------------------------------------------------------------------------------------- #
def test_basis_function_example_1():
""" Test the basis function value against a known example (Ex2.2 from the NURBS book) """
# Compute the basis polynomials of a known problem
def get_analytic_polynomials(u):
N = np.zeros((8, u.size), dtype=u.dtype)
for i, u in enumerate(u):
N02 = (1 - u) ** 2 * (0 <= u < 1)
N12 = (2 * u - 3 / 2 * u ** 2) * (0 <= u < 1) + (1 / 2 * (2 - u) ** 2) * (1 <= u < 2)
N22 = (1 / 2 * u ** 2) * (0 <= u < 1) + (-3 / 2 + 3 * u - u ** 2) * (1 <= u < 2) + (1 / 2 * (3 - u) ** 2) * (2 <= u < 3)
N32 = (1 / 2 * (u - 1) ** 2) * (1 <= u < 2) + (-11 / 2 + 5 * u - u ** 2) * (2 <= u < 3) + (1 / 2 * (4 - u) ** 2) * (3 <= u < 4)
N42 = (1 / 2 * (u - 2) ** 2) * (2 <= u < 3) + (-16 + 10 * u - 3 / 2 * u ** 2) * (3 <= u < 4)
N52 = (u - 3) ** 2 * (3 <= u < 4) + (5 - u) ** 2 * (4 <= u < 5)
N62 = (2 * (u - 4) * (5 - u)) * (4 <= u < 5)
N72 = (u - 4) ** 2 * (4 <= u <= 5)
N[:, i] = np.asarray([N02, N12, N22, N32, N42, N52, N62, N72])
return N
# Maximum index of the basis polynomials (counting from zero)
n = 7
# Define the order of the basis polynomials
p = 2
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
# u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10
U = np.asarray([0.00, 0.00, 0.00, 1.00, 2.00, 3.00, 4.00, 4.00, 5.00, 5.00, 5.00])
# Define the u-parametrization
uu = np.linspace(0, 5, 21)
# Evaluate the polynomials numerically
N_basis = nrb.compute_basis_polynomials(n, p, U, uu)
# Evaluate the polynomials numerically
N_analytic = get_analytic_polynomials(uu)
# Check the error
error = np.sum((N_analytic - N_basis) ** 2) ** (1 / 2)
print('The two-norm of the evaluation error is : ', error)
assert error < 1e-8
def test_partition_of_unity_property():
""" Test the the partition of unity property of the basis polynomials """
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Compute the basis polynomials derivatives analytically
N_basis = nrb.compute_basis_polynomials(n, p, U, u)
# Check the error
error = np.sum((np.sum(N_basis, axis=0) - 1)**2) ** (1/2)
print('The two-norm of the partition of unity error is : ', error)
assert error < 1e-8
def test_basis_function_zeroth_derivative():
""" Test that the zero-th derivative agrees with the function evaluation """
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define the u-parametrization
u = np.linspace(0, 1, 101)
# Compute the basis polynomials derivatives analytically
N_basis = nrb.compute_basis_polynomials(n, p, U, u)
dN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=0)
# Check the error
error = np.sum((dN_basis - N_basis) ** 2) ** (1 / 2) / u.size
print('The two-norm of the zeroth derivative error is : ', error)
assert error < 1e-8
def test_basis_function_first_derivative_cs():
""" Test the first derivative of the basis polynomials against the complex step method """
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define a new u-parametrization suitable for finite differences
h = 1e-12
hh = h + h ** 2
Nu = 1000
u = np.linspace(0.00 + hh, 1.00 - hh, Nu) # Make sure that the limits [0, 1] also work when making changes
# Compute the basis polynomials derivatives analytically
dN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=1)
# Compute the basis polynomials derivatives using the complex step method
dN_fd = np.imag(nrb.compute_basis_polynomials(n, p, U, u + h*1j)) / h
# Check the error
error = np.sum((dN_basis - dN_fd) ** 2) ** (1 / 2) / Nu
print('The two-norm of the first derivative error is : ', error)
assert error < 1e-12
def test_basis_function_first_derivative_cfd():
""" Test the first derivative of the basis polynomials against central finite differences """
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define a new u-parametrization suitable for finite differences
h = 1e-5
hh = h + h ** 2
Nu = 1000
u = np.linspace(0.00 + hh, 1.00 - hh, Nu) # Make sure that the limits [0, 1] also work when making changes
# Compute the basis polynomials derivatives analytically
dN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=1)
# Compute the basis polynomials derivatives by central finite differences
a = -1 / 2 * nrb.compute_basis_polynomials(n, p, U, u - h)
b = +1 / 2 * nrb.compute_basis_polynomials(n, p, U, u + h)
dN_fd = (a + b) / h
# Check the error
error = np.sum((dN_basis - dN_fd) ** 2) ** (1 / 2) / Nu
print('The two-norm of the first derivative error is : ', error)
assert error < 1e-8
def test_basis_function_second_derivative_cfd():
""" Test the second derivative of the basis polynomials against central finite differences """
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define a new u-parametrization suitable for finite differences
h = 1e-4
hh = h + h ** 2
Nu = 1000
u = np.linspace(0.00 + hh, 1.00 - hh, Nu) # Make sure that the limits [0, 1] also work when making changes
# Compute the basis polynomials derivatives
ddN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=2)
# Check the second derivative against central finite differences
a = +1 * nrb.compute_basis_polynomials(n, p, U, u - h)
b = -2 * nrb.compute_basis_polynomials(n, p, U, u)
c = +1 * nrb.compute_basis_polynomials(n, p, U, u + h)
ddN_fd = (a + b + c) / h ** 2
# Check the error
error = np.sum((ddN_basis - ddN_fd) ** 2) ** (1 / 2) / Nu
print('The two-norm of the second derivative error is : ', error)
assert error < 1e-6
# -------------------------------------------------------------------------------------------------------------------- #
# Check the functions manually
# -------------------------------------------------------------------------------------------------------------------- #
test_basis_function_example_1()
test_partition_of_unity_property()
test_basis_function_zeroth_derivative()
test_basis_function_first_derivative_cs()
test_basis_function_first_derivative_cfd()
test_basis_function_second_derivative_cfd()
|
import unittest
from lab.upscaling.worker.Algorithm import *
if __name__ == "__main__":
# Test code
gscalertest = GScalerAlgorithmTest()
unittest.main()
|
import sys
import os
import argparse
import shutil
import json
from riaps.gen.target.cpp import cppgen, sync_cpp
from riaps.gen.target.python import pygen, sync_python
from riaps.gen.target.capnp import capnpgen, sync_capnp
from multigen.jinja import JinjaTask, JinjaGenerator
def preprocess(model):
items={
"appname" : model['name'],
"py" : [],
"cpp" : [],
"messages" : [m['name'] for m in model['messages']]
}
cppspec = ['c++', 'cpp', 'C++']
for part in ['components', 'devices']:
for comp_name, comp_params in model[part].items():
newitem = comp_params
newitem['is_device'] = (part == 'devices')
newitem['appname'] = model['name']
if comp_params['language'] in cppspec :
items['cpp'].append(newitem)
else:
items['py'].append(newitem)
return items
def main():
if sys.version_info[0] < 3 or sys.version_info[0] == 3 and sys.version_info[1]<6:
print("riaps_gen requires python3.6 or above")
model = {}
parser = argparse.ArgumentParser()
output_dir = ""
parser.add_argument("-m", "--model", help="Model file path.",required=True)
parser.add_argument("-o", "--output", help="Output directory. Default is the directory of the model file.")
parser.add_argument("-w", "--overwrite", help="Overwrite the existing code.", action="store_true")
args = parser.parse_args()
# Load json model
try:
fp = open(args.model, 'r')
model = json.load(fp)
except IOError as e:
print("I/O error({0}): {1} {2}".format(e.errno, e.strerror, e.filename))
os._exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
os._exit(1)
model = preprocess(model)
# If the output dir is not specified, the output is generated into the {json file dir}/generated
if args.output:
output_dir = args.output
else:
output_dir = os.path.join(os.path.abspath(os.path.dirname(args.model)), "generated")
if output_dir == "./" or output_dir == ".":
output_dir = os.path.dirname(os.path.abspath(args.model))
# Backup the source to keep implementation parts
if not args.overwrite:
backup_dir = f"{output_dir}_bak"
if os.path.isdir(backup_dir):
shutil.rmtree(backup_dir)
if os.path.isdir(output_dir):
shutil.copytree(output_dir, backup_dir)
if model['cpp']:
gen = cppgen.CompGenerator()
gen.generate(model, output_dir)
if not args.overwrite:
sync = sync_cpp.FileSync(model['cpp'])
sync.sync_all(output_dir)
if model['py']:
gen = pygen.CompGenerator()
gen.generate(model, output_dir)
if not args.overwrite:
sync = sync_python.FileSync(model['py'])
sync.sync_code(output_dir)
# always generate capnp
gen = capnpgen.CapnpGenerator(model['cpp'], output_dir)
gen.generate(model, output_dir)
if not args.overwrite:
sync = sync_capnp.FileSync(model)
sync.sync_capnp(output_dir)
if __name__ == '__main__':
main() |
"""
Module with specialized instructions for matrix factorization, topic modeling and clustering activities.
By: Victor Pontes (victoraleff@gmail.com)
"""
from tqdm import tqdm
import numpy as np
import pandas as pd
from implicit.als import AlternatingLeastSquares
import faiss
from sklearn.metrics.pairwise import cosine_similarity
def sparse_info(matrix):
"""
Prints some of the main features of a scipy sparse matrix.
Parameters
----------
matrix: scipy sparse array
"""
nbits = matrix.data.nbytes + matrix.indptr.nbytes + matrix.indices.nbytes
mb = nbits/1024**2
print('Shape: {} | Nonzero: {} | Memory Usage: {:.2f} Mb'.format(matrix.shape, matrix.nnz, mb))
def non_negative_als(X, n_components=300, alpha=1.0, n_iter=10, n_jobs=1):
"""
performs non-negative matrix factorization with the Alternative
Least Square (ALS) method through successive interactions of the
algorithm provided by github.com/benfred/implicit library with
interspersed by inputting zeros to the negative values of both
factors in each interaction;
Parameters
----------
X: matrix to be factored.
n_components (int, optional): components of latent space. Defaults to 300.
alpha (float, optional): [description]. Defaults to 1.0.
n_iter (int, optional): [description]. Defaults to 10.
n_jobs (int, optional): [description]. Defaults to 1.
Returns:
----------
als.item_factors: left factor (document_topics for document_word input matrix)
als.user_factors: right factor (word_topics for document_word input matrix)
"""
als = AlternatingLeastSquares(factors=n_components, regularization=alpha,
iterations=1, num_threads=n_jobs)
for _ in tqdm(range(n_iter)):
als.fit(X, show_progress=False)
als.item_factors[als.item_factors < 0.0] = 0.0
als.user_factors[als.user_factors < 0.0] = 0.0
return (als.item_factors, als.user_factors)
def nnmf_wals(X, n_components=300, alpha=1.0, n_jobs=4, n_iter=10, weight=50.0):
"""
performs non-negative matrix factorization with the Weighted Alternative
Least Square (WALS)
Parameters
----------
X: input matrix to be factored.
n_components (int, optional): components of latent space. Defaults to 300.
alpha (float, optional): [description]. Defaults to 1.0.
n_iter (int, optional): [description]. Defaults to 10.
n_jobs (int, optional): [description]. Defaults to 1.
weight (float, optional): Defalts to 1.
Returns
----------
left factor: document_topics for document_word input matrix
right factor: word_topics for document_word input matrix
"""
wals_X = X.copy()
wals_X.data = 1.0 + weight * wals_X.data
left_factor, right_factor = non_negative_als(wals_X, n_components=n_components, n_jobs=n_jobs, n_iter=n_iter)
return left_factor, right_factor
def relevance_transform(pw_topic, lamb):
"""
Transform the weigth of words in componentes from matrix words-topics in relevance score.
The relevance score was proposed by Sievert e Shirley in 2014 in this paper:
https://nlp.stanford.edu/events/illvi2014/papers/sievert-illvi2014.pdf
Obs: can be used for any vector representation of words (linear map of word space to any space)
example word-topic matrix or word-clusters matrix.
Parameters
----------
pw_topic: the word-topics matrix
lamb: the coeficient
Returns:
word_topic_relevance: the transformed word-topics
"""
pw_corpus = pw_topic.sum(axis=1) + 0.0000001
pw_topic_div_corpus = np.divide(pw_topic.T, pw_corpus).T
word_topic_relevance = lamb * np.log10(pw_topic) + (1-lamb) * np.log10(pw_topic_div_corpus)
return word_topic_relevance
def view_topics(word_topic, word_names, n_top_words=10, n_top_top_words=5, relevance=True, lamb=0.5):
"""
Viewing topics through your most important words.
Obs: can be used for any vector representation of words (linear map of word space to any space)
example word-topic matrix or word-clusters matrix.
Parameters
----------
word_topic (numpy array or dense matrix): embbedings of words from topic model.
word_names (list of strings): name of terms following indexing of matrix word_topic
n_top_words (int, optional): number of top words presented in dataframe. Default to 10.
n_top_top_words (int, optional): number of top words presented resumed list. Defaults to 5.
n_top_top_words < n_top_words
relevance (boolean, optional): define the method of ranking most important words,if True use
relevance score, if False use the weigth of words in componentes
Returns:
----------
topwords_df: dataframe with n_top_words per topic
right factor: pandas Series with n_top_top_words per tipic
"""
np.seterr(divide='ignore')
if relevance: word_topic = relevance_transform(word_topic, lamb)
array_sorted = np.argsort(word_topic, axis=0)
topwords_inverse = array_sorted[-n_top_words:]
topwords = np.flipud(topwords_inverse)
topwords_df = pd.DataFrame(topwords)
topwords_df = topwords_df.applymap(lambda x: word_names[x])
top_top_words = topwords_df.T.iloc[:,:n_top_top_words].fillna('').apply(lambda x: ' | '.join(x), axis=1)
return topwords_df, top_top_words
def get_word_cluster_matrix(cluster_labels, tf_doc_word):
"""
Creates the word-cluster matrix whose values represent the frequency of words in all documents
associated with each cluster.
Parameters
----------
tf_doc_word (scipy sparse matrix): term-frequency matrix
cluster_labels (iterator): clusters labels associated with each document following rows indexing of tf_doc_word
Returns
-------
word_cluster_tf (numpy array): word_cluster term-frequency matrix
"""
DF = pd.DataFrame({'cluster_label': cluster_labels, 'idx': np.arange(cluster_labels.shape[0])})
cluster_docs_idx = DF.groupby('cluster_label')['idx'].apply(list)
cluster_term_frequency = cluster_docs_idx.apply(lambda x: tf_doc_word[x].sum(axis=0).tolist()[0])
word_cluster_tf = np.array(cluster_term_frequency.tolist()).T
return word_cluster_tf
class FaissNearestNeighbors:
"""
Unsupervised learner for implementing neighbor searches.
This algorithm was implemented from the faiss library, following the sklearn.neighbors.NearestNeighbors scheme, as a faster alternative.
"""
def __init__(self):
"Initialize self."
self.index = None
def fit(self, X):
"""Fit the model using X as training data
Parameters
----------
X (array-like, sparse matrix): training data
"""
d = X.shape[1]
self.index = faiss.IndexFlatL2(d)
self.index.add(X.astype(np.float32))
def kneighbors(self, X=None, n_neighbors=5, return_distance=True):
"""
Finds the K-neighbors of a point according to Euclidean distance.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X (array-like, sparse matrix): the query points or points.
n_neighbors (int): number of neighbors to get. Defaults to 5.
return_distence (boolean): if False, distance will not be returned. Defaults to True.
Returns
-------
distances (array): euclidean distances of the nearest points in training matrix
indices (array): indices of the nearest points in training matrix.
"""
distances, indices = self.index.search(X.astype(np.float32), n_neighbors)
return distances, indices if return_distance else indices |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/CapabilityStatement) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .coding import Coding
from .contactdetail import ContactDetail
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .usagecontext import UsageContext
@dataclass
class CapabilityStatementMessagingEndpoint(BackboneElement):
""" Where messages should be sent.
An endpoint (network accessible address) to which messages and/or replies
are to be sent.
"""
resource_type: ClassVar[str] = "CapabilityStatementMessagingEndpoint"
protocol: Coding = None
address: str = None
@dataclass
class CapabilityStatementMessagingSupportedMessage(BackboneElement):
""" Messages supported by this system.
References to message definitions for messages this system can send or
receive.
"""
resource_type: ClassVar[str] = "CapabilityStatementMessagingSupportedMessage"
mode: str = None
definition: str = None
@dataclass
class CapabilityStatementRestResourceInteraction(BackboneElement):
""" What operations are supported?.
Identifies a restful operation supported by the solution.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestResourceInteraction"
code: str = None
documentation: Optional[str] = None
@dataclass
class CapabilityStatementRestResourceSearchParam(BackboneElement):
""" Search parameters supported by implementation.
Search parameters for implementations to support and/or make use of -
either references to ones defined in the specification, or additional ones
defined for/by the implementation.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestResourceSearchParam"
name: str = None
definition: Optional[str] = None
type: str = None
documentation: Optional[str] = None
@dataclass
class CapabilityStatementRestResourceOperation(BackboneElement):
""" Definition of a resource operation.
Definition of an operation or a named query together with its parameters
and their meaning and type. Consult the definition of the operation for
details about how to invoke the operation, and the parameters.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestResourceOperation"
name: str = None
definition: str = None
documentation: Optional[str] = None
@dataclass
class CapabilityStatementRestSecurity(BackboneElement):
""" Information about security of implementation.
Information about security implementation from an interface perspective -
what a client needs to know.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestSecurity"
cors: Optional[bool] = None
service: Optional[List[CodeableConcept]] = None
description: Optional[str] = None
@dataclass
class CapabilityStatementRestResource(BackboneElement):
""" Resource served on the REST interface.
A specification of the restful capabilities of the solution for a specific
resource type.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestResource"
type: str = None
profile: Optional[str] = None
supportedProfile: Optional[List[str]] = None
documentation: Optional[str] = None
interaction: Optional[List[CapabilityStatementRestResourceInteraction]] = None
versioning: Optional[str] = None
readHistory: Optional[bool] = None
updateCreate: Optional[bool] = None
conditionalCreate: Optional[bool] = None
conditionalRead: Optional[str] = None
conditionalUpdate: Optional[bool] = None
conditionalDelete: Optional[str] = None
referencePolicy: Optional[List[str]] = None
searchInclude: Optional[List[str]] = None
searchRevInclude: Optional[List[str]] = None
searchParam: Optional[List[CapabilityStatementRestResourceSearchParam]] = None
operation: Optional[List[CapabilityStatementRestResourceOperation]] = None
@dataclass
class CapabilityStatementRestInteraction(BackboneElement):
""" What operations are supported?.
A specification of restful operations supported by the system.
"""
resource_type: ClassVar[str] = "CapabilityStatementRestInteraction"
code: str = None
documentation: Optional[str] = None
@dataclass
class CapabilityStatementSoftware(BackboneElement):
""" Software that is covered by this capability statement.
Software that is covered by this capability statement. It is used when the
capability statement describes the capabilities of a particular software
version, independent of an installation.
"""
resource_type: ClassVar[str] = "CapabilityStatementSoftware"
name: str = None
version: Optional[str] = None
releaseDate: Optional[FHIRDate] = None
@dataclass
class CapabilityStatementImplementation(BackboneElement):
""" If this describes a specific instance.
Identifies a specific implementation instance that is described by the
capability statement - i.e. a particular installation, rather than the
capabilities of a software program.
"""
resource_type: ClassVar[str] = "CapabilityStatementImplementation"
description: str = None
url: Optional[str] = None
custodian: Optional[FHIRReference] = None
@dataclass
class CapabilityStatementRest(BackboneElement):
""" If the endpoint is a RESTful one.
A definition of the restful capabilities of the solution, if any.
"""
resource_type: ClassVar[str] = "CapabilityStatementRest"
mode: str = None
documentation: Optional[str] = None
security: Optional[CapabilityStatementRestSecurity] = None
resource: Optional[List[CapabilityStatementRestResource]] = None
interaction: Optional[List[CapabilityStatementRestInteraction]] = None
searchParam: Optional[List[CapabilityStatementRestResourceSearchParam]] = None
operation: Optional[List[CapabilityStatementRestResourceOperation]] = None
compartment: Optional[List[str]] = None
@dataclass
class CapabilityStatementMessaging(BackboneElement):
""" If messaging is supported.
A description of the messaging capabilities of the solution.
"""
resource_type: ClassVar[str] = "CapabilityStatementMessaging"
endpoint: Optional[List[CapabilityStatementMessagingEndpoint]] = None
reliableCache: Optional[int] = None
documentation: Optional[str] = None
supportedMessage: Optional[List[CapabilityStatementMessagingSupportedMessage]] = None
@dataclass
class CapabilityStatementDocument(BackboneElement):
""" Document definition.
A document definition.
"""
resource_type: ClassVar[str] = "CapabilityStatementDocument"
mode: str = None
documentation: Optional[str] = None
profile: str = None
@dataclass
class CapabilityStatement(DomainResource):
""" A statement of system capabilities.
A Capability Statement documents a set of capabilities (behaviors) of a
FHIR Server for a particular version of FHIR that may be used as a
statement of actual server functionality or a statement of required or
desired server implementation.
"""
resource_type: ClassVar[str] = "CapabilityStatement"
url: Optional[str] = None
version: Optional[str] = None
name: Optional[str] = None
title: Optional[str] = None
status: str = None
experimental: Optional[bool] = None
date: FHIRDate = None
publisher: Optional[str] = None
contact: Optional[List[ContactDetail]] = None
description: Optional[str] = None
useContext: Optional[List[UsageContext]] = None
jurisdiction: Optional[List[CodeableConcept]] = None
purpose: Optional[str] = None
copyright: Optional[str] = None
kind: str = None
instantiates: Optional[List[str]] = None
imports: Optional[List[str]] = None
software: Optional[CapabilityStatementSoftware] = None
implementation: Optional[CapabilityStatementImplementation] = None
fhirVersion: str = None
format: List[str] = field(default_factory=list)
patchFormat: Optional[List[str]] = None
implementationGuide: Optional[List[str]] = None
rest: Optional[List[CapabilityStatementRest]] = None
messaging: Optional[List[CapabilityStatementMessaging]] = None
document: Optional[List[CapabilityStatementDocument]] = None |
from typing import Optional, Tuple, Callable, Dict
import numpy as np
from pararealml.core.constrained_problem import ConstrainedProblem
from pararealml.core.constraint import apply_constraints_along_last_axis
from pararealml.core.differential_equation import Lhs
from pararealml.core.initial_value_problem import InitialValueProblem
from pararealml.core.operator import Operator, discretize_time_domain
from pararealml.core.operators.fdm.numerical_differentiator import \
NumericalDifferentiator
from pararealml.core.operators.fdm.fdm_symbol_mapper import FDMSymbolMapper, \
FDMSymbolMapArg
from pararealml.core.operators.fdm.numerical_integrator import \
NumericalIntegrator
from pararealml.core.solution import Solution
BoundaryConstraintsCache = Dict[
Optional[float],
Tuple[Optional[np.ndarray], Optional[np.ndarray]]
]
YConstraintsCache = Dict[Optional[float], Optional[np.ndarray]]
class FDMOperator(Operator):
"""
A finite difference method based conventional differential equation solver.
"""
def __init__(
self,
integrator: NumericalIntegrator,
differentiator: NumericalDifferentiator,
d_t: float):
"""
:param integrator: the differential equation integrator to use
:param differentiator: the differentiator to use
:param d_t: the temporal step size to use
"""
if d_t <= 0.:
raise ValueError('time step sizemust be greater than 0')
self._integrator = integrator
self._differentiator = differentiator
self._d_t = d_t
@property
def d_t(self) -> float:
return self._d_t
@property
def vertex_oriented(self) -> Optional[bool]:
return True
def solve(
self,
ivp: InitialValueProblem,
parallel_enabled: bool = True) -> Solution:
cp = ivp.constrained_problem
t = discretize_time_domain(ivp.t_interval, self._d_t)
y = np.empty((len(t) - 1,) + cp.y_vertices_shape)
y_i = ivp.initial_condition.discrete_y_0(True)
if not cp.are_all_boundary_conditions_static:
init_boundary_constraints = cp.create_boundary_constraints(
True, t[0])
init_y_constraints = cp.create_y_vertex_constraints(
init_boundary_constraints[0])
apply_constraints_along_last_axis(init_y_constraints, y_i)
y_constraints_cache: YConstraintsCache = {}
boundary_constraints_cache: BoundaryConstraintsCache = {}
y_next = self._create_y_next_function(
ivp, y_constraints_cache, boundary_constraints_cache)
for i, t_i in enumerate(t[:-1]):
y[i] = y_i = y_next(t_i, y_i)
if not cp.are_all_boundary_conditions_static:
y_constraints_cache.clear()
boundary_constraints_cache.clear()
return Solution(ivp, t[1:], y, vertex_oriented=True, d_t=self._d_t)
def _create_y_next_function(
self,
ivp: InitialValueProblem,
y_constraints_cache: YConstraintsCache,
boundary_constraints_cache: BoundaryConstraintsCache
) -> Callable[[float, np.ndarray], np.ndarray]:
"""
Creates a function that returns the value of y(t + d_t) given t and y.
:param ivp: the initial value problem
:param boundary_constraints_cache: a cache for boundary constraints for
different t values
:param y_constraints_cache: a cache for overall y constraints for
different t values
:return: the function defining the value of y at the next time point
"""
cp = ivp.constrained_problem
eq_sys = cp.differential_equation.symbolic_equation_system
symbol_mapper = FDMSymbolMapper(cp, self._differentiator)
d_y_over_d_t_eq_indices = \
eq_sys.equation_indices_by_type(Lhs.D_Y_OVER_D_T)
y_eq_indices = eq_sys.equation_indices_by_type(Lhs.Y)
y_laplacian_eq_indices = \
eq_sys.equation_indices_by_type(Lhs.Y_LAPLACIAN)
y_constraint_func, d_y_constraint_func = \
self._create_constraint_functions(
cp, y_constraints_cache, boundary_constraints_cache)
def d_y_over_d_t_function(t: float, y: np.ndarray) -> np.ndarray:
d_y_over_d_t = np.zeros(y.shape)
d_y_over_d_t_rhs = symbol_mapper.map_concatenated(
FDMSymbolMapArg(t, y, d_y_constraint_func), Lhs.D_Y_OVER_D_T)
d_y_over_d_t[..., d_y_over_d_t_eq_indices] = d_y_over_d_t_rhs
return d_y_over_d_t
def y_next_function(t: float, y: np.ndarray) -> np.ndarray:
y_next = self._integrator.integral(
y, t, self._d_t, d_y_over_d_t_function, y_constraint_func)
if len(y_eq_indices):
y_constraint = y_constraint_func(t + self._d_t)
y_constraint = None if y_constraint is None \
else y_constraint[y_eq_indices]
y_rhs = symbol_mapper.map_concatenated(
FDMSymbolMapArg(t, y, d_y_constraint_func), Lhs.Y)
y_next[..., y_eq_indices] = \
apply_constraints_along_last_axis(y_constraint, y_rhs)
if len(y_laplacian_eq_indices):
y_constraint = y_constraint_func(t + self._d_t)
y_constraint = None if y_constraint is None \
else y_constraint[y_laplacian_eq_indices]
d_y_constraint = d_y_constraint_func(t + self._d_t)
d_y_constraint = None if d_y_constraint is None \
else d_y_constraint[:, y_laplacian_eq_indices]
y_laplacian_rhs = symbol_mapper.map_concatenated(
FDMSymbolMapArg(t, y, d_y_constraint_func),
Lhs.Y_LAPLACIAN)
y_next[..., y_laplacian_eq_indices] = \
self._differentiator.anti_laplacian(
y_laplacian_rhs,
cp.mesh,
y_constraint,
d_y_constraint)
return y_next
return y_next_function
@staticmethod
def _create_constraint_functions(
cp: ConstrainedProblem,
y_constraints_cache: YConstraintsCache,
boundary_constraints_cache: BoundaryConstraintsCache
) -> Tuple[
Callable[[float], Optional[np.ndarray]],
Callable[[float], Optional[np.ndarray]]
]:
"""
Creates two functions that return the constraints on y and the boundary
constraints on the spatial derivatives of y with respect to the normals
of the boundaries respectively.
:param cp: the constrained problems to create the constraint functions
for
:param boundary_constraints_cache: a cache for boundary constraints for
different t values
:param y_constraints_cache: a cache for overall y constraints for
different t values
:return: a tuple of two functions that return the two different
constraints given t
"""
if not cp.differential_equation.x_dimension:
return lambda _: None, lambda _: None
if cp.are_all_boundary_conditions_static:
return lambda _: cp.static_y_vertex_constraints, \
lambda _: cp.static_boundary_vertex_constraints[1]
def d_y_constraints_function(
t: Optional[float]) -> Optional[np.ndarray]:
if t in boundary_constraints_cache:
return boundary_constraints_cache[t][1]
boundary_constraints = cp.create_boundary_constraints(True, t)
boundary_constraints_cache[t] = boundary_constraints
return boundary_constraints[1]
if not cp.are_there_boundary_conditions_on_y:
return lambda _: cp.static_y_vertex_constraints, \
d_y_constraints_function
def y_constraints_function(
t: Optional[float]) -> Optional[np.ndarray]:
if t in y_constraints_cache:
return y_constraints_cache[t]
if t in boundary_constraints_cache:
boundary_constraints = boundary_constraints_cache[t]
else:
boundary_constraints = \
cp.create_boundary_constraints(True, t)
boundary_constraints_cache[t] = boundary_constraints
y_constraints = \
cp.create_y_vertex_constraints(boundary_constraints[0])
y_constraints_cache[t] = y_constraints
return y_constraints
return y_constraints_function, d_y_constraints_function
|
from django import forms
from django.contrib.auth.models import User
from app.models import *
class LoginForm(forms.Form):
username = forms.CharField(max_length=32, min_length=5, label="用户名")
password = forms.CharField(max_length=32, min_length=5, label="密码", widget=forms.PasswordInput())
def clean(self):
if not User.objects.filter(username=self.cleaned_data['username']).exists():
raise forms.ValidationError('用户名不存在,请检查您的用户名')
return self.cleaned_data
class RegisterForm(forms.Form):
username = forms.CharField(max_length=32, min_length=5, label="用户名")
password = forms.CharField(max_length=32, min_length=5, label="密码", widget=forms.PasswordInput())
email = forms.EmailField(label="邮箱")
def clean(self):
if User.objects.filter(username=self.cleaned_data['username']).exists():
raise forms.ValidationError('用户名已存在')
return self.cleaned_data
class FileForm(forms.Form):
file = forms.FileField(label='图片文件', widget=forms.FileInput(attrs={'accept':'image/*'}))
class FilePathForm(forms.Form):
file_path = forms.CharField(max_length=256, label='图片地址') |
from .enet.enet import ENet
__all__ = ["ENet"]
|
# Generated by Django 3.1.3 on 2021-01-25 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awward_app', '0004_auto_20210125_2054'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile',
field=models.ImageField(upload_to='images/'),
),
]
|
#!/usr/bin/env python2
import os
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
ROOT = os.path.abspath(os.path.dirname(__file__))
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
install_requires = ['pyside']
data_files = []
if os.name == 'nt':
install_requires.append('pywin32')
elif os.name == 'posix':
install_requires.append('python-xlib')
data_files.extend([
('/usr/share/applications', ['clipmanager.desktop']),
('/usr/share/pixmaps', ['data/clipmanager.png']),
('/usr/share/licenses/clipmanager', ['LICENSE']),
])
def get_version():
init = open(os.path.join(ROOT, 'clipmanager', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
download_url = 'https://github.com/scottwernervt/clipmanager' \
'archive/%s.tar.gz' % get_version()
setup(
name='clipmanager',
version=get_version(),
author='Scott Werner',
author_email='scott.werner.vt@gmail.com',
description="Manage the system's clipboard history.",
long_description=open('README.rst').read(),
license='BSD',
platforms='Posix; Windows',
keywords=' '.join([
'clipboard',
'manager',
'history',
]),
url='https://github.com/scottwernervt/clipmanager',
download_url=download_url,
scripts=['bin/clipmanager'],
install_requires=install_requires,
extras_require={
'win32': [
'PyInstaller', # GPL
],
},
setup_requires=[
'pytest-runner', # MIT
],
tests_require=[
'pytest', # MIT
'pytest-qt', # MIT
],
test_suite='tests',
packages=find_packages(exclude=['contrib', 'tests*']),
include_package_data=True,
data_files=data_files,
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Utilities',
]
)
|
import tempfile, shutil, os, datetime, resource, sys, time
from s3ts.treestore import TreeStore
from s3ts.filestore import LocalFileStore
from s3ts.config import TreeStoreConfig
class TransferStats:
def __init__(self):
self.bytesUploaded = 0
self.bytesCached = 0
self.rusage0 = resource.getrusage(resource.RUSAGE_SELF)
self.time0 = time.time()
def progress(self, bytesUploaded, bytesCached):
self.bytesUploaded += bytesUploaded
self.bytesCached += bytesCached
def done(self):
self.rusage1 = resource.getrusage(resource.RUSAGE_SELF)
self.time1 = time.time()
print "{:,} bytes transferred".format(self.bytesUploaded)
print "{:,} bytes cached".format(self.bytesCached)
print "{} seconds elapsed time".format((self.time1 - self.time0))
print "{} seconds user time".format((self.rusage1.ru_utime - self.rusage0.ru_utime))
print "{} seconds system time".format((self.rusage1.ru_stime - self.rusage0.ru_stime))
class InstallStats:
def __init__(self):
self.bytesInstalled = 0
self.rusage0 = resource.getrusage(resource.RUSAGE_SELF)
self.time0 = time.time()
def progress(self, bytesInstalled):
self.bytesInstalled += bytesInstalled
def done(self):
self.rusage1 = resource.getrusage(resource.RUSAGE_SELF)
self.time1 = time.time()
print "{:,} bytes copied".format(self.bytesInstalled)
print "{} seconds elapsed time".format((self.time1 - self.time0))
print "{:,} MB per second".format(int((self.bytesInstalled/(self.time1 - self.time0))/1e6))
print "{} seconds user time".format((self.rusage1.ru_utime - self.rusage0.ru_utime))
print "{} seconds system time".format((self.rusage1.ru_stime - self.rusage0.ru_stime))
def runtest(testContent):
print "----------------------------------------------------------------------"
print "* path", testContent
dir = tempfile.mkdtemp()
try:
store = LocalFileStore(os.path.join(dir, 'store'))
cache = LocalFileStore(os.path.join(dir, 'cache'))
config = TreeStoreConfig(1000000, True)
ts = TreeStore.create(store, cache, config)
creationTime = datetime.datetime.now()
print "* Initial upload"
stats = TransferStats()
ts.upload( "test", "", creationTime, testContent, stats.progress)
stats.done()
print
print "* Repeat upload"
stats = TransferStats()
ts.upload( "test", "", creationTime, testContent, stats.progress)
stats.done()
print
print "* Download"
stats = TransferStats()
pkg = ts.find("test")
ts.download(pkg, stats.progress)
stats.done()
print
print "* Clean installation"
installDir = os.path.join(dir,"install")
os.makedirs(installDir)
stats = InstallStats()
pkg = ts.find("test")
ts.install(pkg, installDir, stats.progress)
stats.done()
finally:
shutil.rmtree(dir)
def main():
for testContent in sys.argv[1:]:
runtest(testContent)
if __name__ == '__main__':
main()
|
from tqdm import *
import numpy
import dataset
from nets import simpleNet
mnist = dataset.load_mnist()
nn = simpleNet(architecture=numpy.array([784 ,100, 10]))
for epoch in range(10):
success = numpy.zeros(shape=(mnist[0][0].shape[0],))
for example in tqdm(range(mnist[0][0].shape[0])): #
input = mnist[0][0][example,:]
target = mnist[0][1][example]
output = nn.forward(input)
reward = -0.00001
if output == target:
reward = 0.0001
success[example] += 1
nn.backward(reward)
print numpy.sum(success) / success.shape[0] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 16:25:21 2021
@author: Andreas Traut
Problem:
Assume, ath we have a bag of capacity c. And assume, that we have n items, which
each has a weight and a profit. We have the choice to select items, to be filled
into this bag and the aim is to maximize the profit. At same time we are not
allowed to breach the capacity of the bag, meaning, that the sum of
the weights of the chosen items should be smaller or equal to the capacity.
Solution:
This problem can be solved with dynamic programming.
Further references for learning more about this topic:
https://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/
https://medium.com/@fabianterh/how-to-solve-the-knapsack-problem-with-dynamic-programming-eb88c706d3cf
"""
#%% Define the test cases
test = {
'input': {
'capacity': 2000,
'weights': [630, 780, 1400, 480],
'profits': [8, 6, 12, 7]
},
'output': 21
}
tests = []
tests.append(test)
tests.append({
'input': {
'capacity': 3,
'weights': [4, 5, 6],
'profits': [1, 2, 3]
},
'output': 0
})
tests.append({
'input': {
'capacity': 4,
'weights': [4, 5, 1],
'profits': [1, 2, 3]
},
'output': 3
})
tests.append({
'input': {
'capacity': 170,
'weights': [41, 50, 49, 59, 55, 57, 60],
'profits': [442, 525, 511, 593, 546, 564, 617]
},
'output': 1735
})
tests.append({
'input': {
'capacity': 15,
'weights': [4, 5, 6],
'profits': [1, 2, 3]
},
'output': 6
})
tests.append({
'input': {
'capacity': 15,
'weights': [4, 5, 1, 3, 2, 5],
'profits': [2, 3, 1, 5, 4, 7]
},
'output': 19
})
tests.append({
'input': {
'capacity': 165,
'weights': [23, 31, 29, 44, 53, 38, 63, 85, 89, 82],
'profits': [92, 57, 49, 68, 60, 43, 67, 84, 87, 72]
},
'output': 309
})
#%% Define the algorithm
def knapsack_dp(capacity, weights, profits):
n = len(weights)
results = [[0 for _ in range(capacity+1)] for _ in range(n+1)]
for idx in range(n):
for c in range(capacity+1):
if weights[idx] > c:
results[idx+1][c] = results[idx][c]
else:
results[idx+1][c] = max(results[idx][c], profits[idx] + results[idx][c-weights[idx]])
return results[-1][-1]
#%% Perform the tests
# On the first test case only
from jovian.pythondsa import evaluate_test_case
evaluate_test_case(knapsack_dp, test)
#%%
# On all test cases
from jovian.pythondsa import evaluate_test_cases
evaluate_test_cases(knapsack_dp, tests) |
# -*- coding: utf-8 -*-
"""Query builder."""
import json
import logging
from collections import Iterable
from .exc import QueryMissingNetworksError
from .seeding import Seeding
from ...manager.models import Node
from ...struct.pipeline import Pipeline
__all__ = [
'Query',
]
log = logging.getLogger(__name__)
class Query:
"""Represents a query over a network store."""
def __init__(self, network_ids=None, seeding=None, pipeline=None):
"""Build a query.
:param iter[int] network_ids: Database network identifiers identifiers
:type network_ids: None or int or iter[int]
:type seeding: Optional[Seeding]
:type pipeline: Optional[Pipeline]
"""
if not network_ids:
self.network_ids = []
elif isinstance(network_ids, int):
self.network_ids = [network_ids]
elif isinstance(network_ids, Iterable):
network_ids = list(network_ids)
for network_id in network_ids:
if not isinstance(network_id, int):
raise TypeError(network_ids)
self.network_ids = network_ids
else:
raise TypeError(network_ids)
if seeding is not None and not isinstance(seeding, Seeding):
raise TypeError('Not a Seeding: {}'.format(seeding))
self.seeding = seeding or Seeding()
if pipeline is not None and not isinstance(pipeline, Pipeline):
raise TypeError('Not a pipeline: {}'.format(pipeline))
self.pipeline = pipeline or Pipeline()
def append_network(self, network_id):
"""Add a network to this query.
:param int network_id: The database identifier of the network
:returns: self for fluid API
:rtype: Query
"""
self.network_ids.append(network_id)
return self
def append_seeding_induction(self, nodes):
"""Add a seed induction method.
:param list[tuple or Node or BaseEntity] nodes: A list of PyBEL node tuples
:returns: seeding container for fluid API
:rtype: Seeding
"""
return self.seeding.append_induction(nodes)
def append_seeding_neighbors(self, nodes):
"""Add a seed by neighbors.
:param nodes: A list of PyBEL node tuples
:type nodes: BaseEntity or iter[BaseEntity]
"""
return self.seeding.append_neighbors(nodes)
def append_seeding_annotation(self, annotation, values):
"""Add a seed induction method for single annotation's values.
:param str annotation: The annotation to filter by
:param set[str] values: The values of the annotation to keep
"""
return self.seeding.append_annotation(annotation, values)
def append_seeding_sample(self, **kwargs):
"""Add seed induction methods.
Kwargs can have ``number_edges`` or ``number_seed_nodes``.
"""
return self.seeding.append_sample(**kwargs)
def append_pipeline(self, name, *args, **kwargs):
"""Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.
:param name: The name of the function
:type name: str or types.FunctionType
:return: This pipeline for fluid query building
:rtype: Pipeline
"""
return self.pipeline.append(name, *args, **kwargs)
def __call__(self, manager):
"""Run this query and returns the resulting BEL graph with :meth:`Query.run`.
:param pybel.manager.Manager manager: A cache manager
:rtype: Optional[pybel.BELGraph]
"""
return self.run(manager)
def run(self, manager):
"""Run this query and returns the resulting BEL graph.
:param manager: A cache manager
:rtype: Optional[pybel.BELGraph]
"""
universe = self._get_universe(manager)
graph = self.seeding.run(universe)
return self.pipeline.run(graph, universe=universe)
def _get_universe(self, manager):
if not self.network_ids:
raise QueryMissingNetworksError('can not run query without network identifiers')
log.debug('query universe consists of networks: %s', self.network_ids)
universe = manager.get_graph_by_ids(self.network_ids)
log.debug('query universe has %d nodes/%d edges', universe.number_of_nodes(), universe.number_of_edges())
return universe
def to_json(self):
"""Return this query as a JSON object.
:rtype: dict
"""
rv = {
'network_ids': self.network_ids,
}
if self.seeding:
rv['seeding'] = self.seeding.to_json()
if self.pipeline:
rv['pipeline'] = self.pipeline.to_json()
return rv
def dump(self, file, **kwargs):
"""Dump this query to a file as JSON."""
json.dump(self.to_json(), file, **kwargs)
def dumps(self, **kwargs):
"""Dump this query to a string as JSON
:rtype: str
"""
return json.dumps(self.to_json(), **kwargs)
@staticmethod
def from_json(data):
"""Load a query from a JSON dictionary.
:param dict data: A JSON dictionary
:rtype: Query
:raises: QueryMissingNetworksError
"""
network_ids = data.get('network_ids')
if network_ids is None:
raise QueryMissingNetworksError('query JSON did not have key "network_ids"')
seeding_data = data.get('seeding')
seeding = (
Seeding(seeding_data)
if seeding_data is not None else
None
)
pipeline_data = data.get('pipeline')
pipeline = (
Pipeline(pipeline_data)
if pipeline_data is not None else
None
)
return Query(
network_ids=network_ids,
seeding=seeding,
pipeline=pipeline,
)
@staticmethod
def load(file):
"""Load a query from a JSON file.
:param file: A file or file-like
:rtype: Query
:raises: QueryMissingNetworksError
"""
return Query.from_json(json.load(file))
@staticmethod
def loads(s):
"""Load a query from a JSON string
:param str s: A stringified JSON query
:rtype: Query
:raises: QueryMissingNetworksError
"""
return Query.from_json(json.loads(s))
|
"""Converts PubMed abstracts to a file with one abstract per line."""
import argparse
import datetime
import gzip
import logging
import multiprocessing
import os
import process
import time
import xml.etree.cElementTree as ET
from functools import partial
from typing import Tuple
logging.basicConfig(format="%(asctime)s : %(levelname)s : %(message)s",
level=logging.INFO)
# Approximate total number of abstracts so we can estimate processing time
# without having to read all files beforehand.
TOTAL_DOCS = 32000000
def abstracts_to_lines_fn(
args: Tuple[str],
text_processor: process.MaterialsTextProcessor) -> int:
"""Write abstracts in a PubMed XML file to a line-separated text file.
Args:
args: A tuple containing the input and output file paths.
text_processor: an instance of process.MaterialsTextProcessor.
Returns:
doc_count: number of abstracts written to the file.
"""
input_file, output_file = args
fout = open(output_file, 'w')
doc_count = 0
with open(output_file, 'w') as fout:
with gzip.open(input_file, 'rt') as f:
text = f.read()
tree = ET.ElementTree(ET.fromstring(text))
root = tree.getroot()
for abstract_text in root.iterfind('.//AbstractText'):
doc_text = abstract_text.text
if doc_text is None:
continue
doc_tokens, _ = text_processor.process(doc_text)
doc_text = ' '.join(doc_tokens)
fout.write('{}\n'.format(doc_text))
doc_count += 1
return doc_count
def abstracts_to_lines(input_folder: str, output_folder: str,
n_processes: int) -> None:
"""Write abstracts in PubMed XML files to line-separated text files.
Args:
input_folder: Path to the folder containing the PubMed XML files.
output_folder: Path to the folder to write the raw text files.
n_processes: Number of processes to read and write the files in
parallel.
"""
start_time = time.time()
input_files = []
output_files = []
for file_name in os.listdir(input_folder):
if file_name.endswith('.gz'):
input_files.append(os.path.join(input_folder, file_name))
output_files.append(os.path.join(
output_folder, file_name.replace('.xml.gz', '.txt')))
logging.info('{} files found'.format(len(input_files)))
text_processor = process.MaterialsTextProcessor()
abstracts_to_lines_fn_partial = partial(
abstracts_to_lines_fn,
text_processor=text_processor)
# Warning: more than 8 processes causes OOM on a 64GB machine.
pool = multiprocessing.Pool(n_processes)
doc_count = 0
with multiprocessing.Pool(n_processes) as pool:
for partial_doc_count in pool.imap_unordered(
abstracts_to_lines_fn_partial, zip(input_files, output_files)):
doc_count += partial_doc_count
time_passed = time.time() - start_time
time_remaining = TOTAL_DOCS * time_passed / doc_count - time_passed
logging.info('Processed {}/{} docs in {} (Remaining: {})'.format(
doc_count, TOTAL_DOCS,
datetime.timedelta(seconds=time_passed),
datetime.timedelta(seconds=time_remaining)))
logging.info('Done. Time: {}'.format(
datetime.timedelta(seconds=time.time() - start_time)))
logging.info('Converted {} docs in {}'.format(
doc_count,
datetime.timedelta(seconds=time.time() - start_time)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Converts PubMed abstracts to a file with one abstract '
'per line.')
parser.add_argument('--input_folder', required=True,
help='Folder containing the XML.gz files.')
parser.add_argument('--output_folder', required=True,
help='output folder.')
parser.add_argument(
'--n_processes', default=None, type=int,
help='Number of processes to read and write files in parallel. '
'Warning: more than 8 processes causes OOM on a 64GB machine.')
args = parser.parse_args()
logging.info(args)
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
abstracts_to_lines(input_folder=args.input_folder,
output_folder=args.output_folder,
n_processes=args.n_processes)
logging.info('Done!')
|
# --------------
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
cols = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
df[cols] = df[cols].replace({'\$': '', ',': ''}, regex=True)
print(df.head())
X = df.drop('CLAIM_FLAG', axis=1)
y = df['CLAIM_FLAG']
count = y.value_counts()
print(count)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3, random_state=6)
# Code ends here
# --------------
# Code starts here
X_train[cols] = X_train[cols].astype(float)
X_test[cols] = X_test[cols].astype(float)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
# Code starts here
X_train.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
X_test.dropna(subset=['YOJ','OCCUPATION'],inplace=True)
y_train = y_train[X_train.index]
y_test = y_test[X_test.index]
col = ['AGE','CAR_AGE','INCOME','HOME_VAL']
X_train[col].replace(np.nan,X_train.mean(),inplace=True)
X_test[col].replace(np.nan,X_train.mean(),inplace=True)
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here
le = LabelEncoder()
for i in columns:
X_train[i] = le.fit_transform(X_train[i])
X_test[i] = le.transform(X_test[i])
print(X_train.head())
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# code starts here
print(X_train.head())
print(y_train.head())
print(X_train.info())
print(X_train.isna())
model=LogisticRegression(random_state = 6)
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
score=accuracy_score(y_test,y_pred)
print(score)
precision=precision_score(y_test,y_pred)
print(precision)
# Code ends here
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote=SMOTE(random_state=6)
X_train,y_train=smote.fit_sample(X_train,y_train)
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
# Code ends here
# --------------
# Code Starts here
model=LogisticRegression()
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
score=accuracy_score(y_test,y_pred)
print(score)
# Code ends here
|
class Solution:
def numTeams(self, rating: List[int]) -> int:
"""
requirements:
3 elements
2 valid teams:
1. i < j < k
2. i > j < k
can reuse the same elements
Ideas:
1. Brute Force.
Fix i, j, move k....n-1
Fix i, move j till k, fix k
Move i till j, fix j and k
if any valid team can be formed -> add to the result
2. Optimize with DP
do idea 1 with cache optimizations
a. subproblems:
take any 3 element
try valid team 1 option
try valid team 2 option
return count
b. recurrence:
max_teams(i, j, soldiers, inc) = max_teams(i..j, j..n, soldiers, inc=true)
[2,5,3,4,1]
i j
c: base cases:
if soldiers == 3:
count += 1
if j >= len:
return
c: answer = max_teams(0,1,0,true) + max_teams(0,1,0,false)
Time O(N^3)
Space O(N^3)
[2,5,3,4,1]
i j
c = 0
s = 1
ij
s = 2
c = 0
i j
s = 1
c = 0
i j
s = 2
c = 0
i j
s = 3
c = 1
i j
s = 2
c = 1
"""
@lru_cache(None)
def max_teams(i, soldiers, is_increasing):
if soldiers >= 3:
return 1
count = 0
for j in range(i+1, len(rating)):
if is_increasing and rating[i] < rating[j]:
count += max_teams(j, soldiers+1, is_increasing)
elif not is_increasing and rating[i] > rating[j]:
count += max_teams(j, soldiers+1, is_increasing)
return count
count = 0
for i in range(0, len(rating)-2):
count += max_teams(i, 1, True) + max_teams(i, 1, False)
return count
|
from typing import List, Sequence
import itertools
import numpy as np
__all__ = ['voltage_to_uint16']
def voltage_to_uint16(voltage: np.ndarray, output_amplitude: float, output_offset: float, resolution: int) -> np.ndarray:
"""
:param voltage:
:param output_amplitude:
:param output_offset:
:param resolution:
:return:
"""
if resolution < 1 or not isinstance(resolution, int):
raise ValueError('The resolution must be an integer > 0')
non_dc_voltage = voltage - output_offset
if np.any(np.abs(non_dc_voltage) > output_amplitude):
raise ValueError('Voltage of range', dict(voltage=voltage,
output_offset=output_offset,
output_amplitude=output_amplitude))
non_dc_voltage += output_amplitude
non_dc_voltage *= (2**resolution - 1) / (2*output_amplitude)
np.rint(non_dc_voltage, out=non_dc_voltage)
return non_dc_voltage.astype(np.uint16)
def make_combined_wave(segments: List['TaborSegment'], destination_array=None, fill_value=None) -> np.ndarray:
quantum = 16
if len(segments) == 0:
return np.zeros(0, dtype=np.uint16)
segment_lengths = np.fromiter((segment.num_points for segment in segments), count=len(segments), dtype=int)
if np.any(segment_lengths % quantum != 0):
raise ValueError('Segment is not a multiple of 16')
n_quanta = np.sum(segment_lengths) // quantum + len(segments) - 1
if destination_array is not None:
if len(destination_array) != 2*n_quanta*quantum:
raise ValueError('Destination array has an invalid length')
destination_array = destination_array.reshape((2*n_quanta, quantum))
else:
destination_array = np.empty((2*n_quanta, quantum), dtype=np.uint16)
if fill_value:
destination_array[:] = fill_value
# extract data that already includes the markers
data, next_data = itertools.tee(((segment.data_a, segment.data_b) for segment in segments), 2)
next(next_data, None)
current_quantum = 0
for (data_a, data_b), next_segment, segment_length in itertools.zip_longest(data, next_data, segment_lengths):
segment_quanta = 2 * (segment_length // quantum)
segment_destination = destination_array[current_quantum:current_quantum+segment_quanta, :]
if data_b is not None:
segment_destination[::2, :].flat = data_b
if data_a is not None:
segment_destination[1::2, :].flat = data_a
current_quantum += segment_quanta
if next_segment:
# fill one quantum with first data point from next segment
next_data_a, next_data_b = next_segment
if next_data_b is not None:
destination_array[current_quantum, :] = next_data_b[0]
if next_data_a is not None:
destination_array[current_quantum+1, :] = next_data_a[0]
current_quantum += 2
return destination_array.ravel()
def find_positions(data: Sequence, to_find: Sequence) -> np.ndarray:
"""Find indices of the first occurrence of the elements of to_find in data. Elements that are not in data result in
-1"""
data_sorter = np.argsort(data)
pos_left = np.searchsorted(data, to_find, side='left', sorter=data_sorter)
pos_right = np.searchsorted(data, to_find, side='right', sorter=data_sorter)
found = pos_left < pos_right
positions = np.full_like(to_find, fill_value=-1, dtype=np.int64)
positions[found] = data_sorter[pos_left[found]]
return positions
|
############################################
# Semi-Adversarial Network #
# (convolutional autoencoder) #
# iPRoBe lab #
# #
############################################
import torch
import torch.nn as nn
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.autoencoder = nn.Sequential(
## Encoder
nn.Conv2d(4, 8, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(8, 12, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.AvgPool2d(kernel_size=2, stride=2),
## Decoder
nn.Conv2d(12, 256, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest')
)
self.protocombiner = nn.Sequential(
nn.Conv2d(131, 1, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, imgs, same_proto, oppo_proto):
x = torch.cat([imgs, same_proto], dim=1)
x = self.autoencoder(x)
rec_same = torch.cat([x, same_proto], dim=1)
rec_oppo = torch.cat([x, oppo_proto], dim=1)
return self.protocombiner(rec_same), self.protocombiner(rec_oppo)
|
#!/usr/bin/python
import usb.core
import usb.util
import sys, struct
class WCHISP:
def __init__(self):
# find our device
dev = usb.core.find(idVendor=0x4348, idProduct=0x55e0)
if dev is None:
raise ValueError('Device not found')
dev.set_configuration()
cfg = dev.get_active_configuration()
intf = cfg[(0,0)]
self.epout = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT)
self.epin = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)
def cmd(self, msg):
self.writeb(msg)
b = self.readb(64)
if len(b) == 2:
return struct.unpack('<H', b)[0]
return b
def xcmd(self, msg, exp):
#xmsg = map(lambda x: hex(ord(x))[2:], msg)
#print ' '.join(xmsg)
#return 0
ret = self.cmd(msg)
if ret != exp:
xmsg = map(lambda x: hex(ord(x)), msg[0:4])
raise Exception('cmd[%s] return %d != %d' % (','.join(xmsg), ret, exp))
def info(self):
v = self.cmd('\xa2\x13USB DBG CH559 & ISP' + '\0'*42)
self.cmd('\xbb\x00')
return v
def readb(self, size):
return self.epin.read(size)
def writeb(self, b):
self.epout.write(b)
def program(self, hexfile):
def readhex():
lno = 0
mem = []
with open(hexfile, 'r') as f:
for line in f:
lno += 1
line = line.strip()
if len(line)<6 or line[0] != ':': continue
if line[7:9] == '01': break
if line[7:9] != '00':
raise ValueException('unknown data type @ %s:%d' % (hexfile, lno))
n = int(line[1:3], 16)
addr = int(line[3:7], 16)
if n + addr > len(mem):
mem.extend([255] * (n+addr-len(mem)))
i = 9
while n > 0:
mem[addr] = int(line[i:i+2], 16)
i += 2
addr += 1
n -= 1
return mem
def wv(mode):
if mode == '\xa7':
print 'Verifying ',
else:
print 'Programming ',
sys.stdout.flush()
addr = 0
while addr < len(mem):
b = mode
sz = len(mem) - addr
if sz > 0x3c: sz = 0x3c
b += struct.pack('<BH', sz, addr)
for i in range(sz):
b += chr(mem[addr+i]^rand[i%4])
self.xcmd(b, 0)
addr += sz
sys.stdout.write('#')
sys.stdout.flush()
print ''
rand = (0x29, 0x52, 0x8C, 0x70)
mem = readhex()
if len(mem) < 256 or len(mem) > 16384:
raise "hexfile codesize %d not in (256, 16384)" % len(mem)
b = '\xa6\x04' + struct.pack('BBBB', *rand)
self.xcmd(b, 0)
for page in range(0, 0x40, 4):
b = '\xa9\x02\x00' + chr(page)
self.xcmd(b, 0)
wv('\xa8')
self.cmd('\xb8\x02\xff\x4e') # Code_Protect, Boot_Load, No_Long_Reset, No_RST
self.cmd('\xb9\x00')
wv('\xa7')
self.writeb('\xa5\x02\x00\x00')
if len(sys.argv) != 2:
print 'wchprog hexfile'
sys.exit(1)
isp = WCHISP()
if isp.info() != 0x52:
raise IOException("not a CH552T device")
isp.program(sys.argv[1])
|
import os
import re
import logging
from consts import TIMEOUT_RC
from structs import TimedRunParams
from structs import ToolRunParams
from utils import get_tmp_file_name, execute_shell, readfile
from config import RUN_SOLVER_EXEC
class RunStats:
def __init__(self, cpu_time_sec:float=None,
wall_time_sec:float=None,
virt_mem_mb:int=None):
self.cpu_time_sec = cpu_time_sec
self.wall_time_sec = wall_time_sec
self.virt_mem_mb = virt_mem_mb
def get_float(param_name, text):
numbers = re.findall(param_name + '=([0-9]+\.?[0-9]*)',
text)
assert len(numbers) == 1, numbers
return float(numbers[0])
def parse_stats(stats_str) -> RunStats:
stats = RunStats(cpu_time_sec=get_float('CPUTIME', stats_str),
wall_time_sec=get_float('WCTIME', stats_str),
virt_mem_mb=int(get_float('MAXVM', stats_str) / 1000))
return stats
def get_tool_rc(exec_log_str) -> int:
if 'Maximum wall clock time exceeded' in exec_log_str:
return TIMEOUT_RC
status = re.findall('Child status: (\d+)', exec_log_str)
assert len(status) == 1, status # TODO: replace ALL asserts in the async part by logging into the DB
return int(status[0])
def main(timed_run_params:TimedRunParams, tool_run:ToolRunParams) -> (RunStats, int):
# TODO: add memory limit
logging.info('timed_run.main')
stats_file_name = get_tmp_file_name()
exec_log_file = get_tmp_file_name()
rc, out, err = \
execute_shell('{runsolver} -o {tool_log} -v {stats_file} -w {exec_log} -W {time_limit} '
'{tool_cmd}'
.format(runsolver=RUN_SOLVER_EXEC,
tool_log=tool_run.log_file,
stats_file=stats_file_name,
exec_log=exec_log_file,
time_limit=str(timed_run_params.time_limit_sec),
tool_cmd=tool_run.to_cmd_str()))
logging.info(readfile(exec_log_file))
# TODO: this should also be logged in the DB
assert rc == 0, 'timed run failed: rc={rc}, \nout={out}, \nerr={err}'\
.format(rc=str(rc), out=out, err=err)
tool_rc = get_tool_rc(readfile(exec_log_file))
stats = parse_stats(readfile(stats_file_name))
os.remove(stats_file_name)
os.remove(exec_log_file)
return stats, tool_rc
|
import unittest2
import sys
import os
sys.path.append(os.getcwd() + '/..')
import video_file_saver
class AuthTokenTests(unittest2.TestCase):
@classmethod
def tearDown(cls):
[os.remove(f) for f in os.listdir() if f.endswith('.mp4') or f.endswith('.txt')]
def test_record(self):
stream = 'https://cph-p2p-msl.akamaized.net/hls/live/2000341/test/master.m3u8'
fps = "25"
interval = 5
filepath = "test"
i = 0
video_file_saver.record(stream,fps,interval,filepath,i)
res = f'{filepath}{i}.mp4' in os.listdir() and f'{filepath}.txt' in os.listdir()
self.assertTrue(res)
def test_create_final_video(self):
stream = 'https://cph-p2p-msl.akamaized.net/hls/live/2000341/test/master.m3u8'
fps = "25"
interval = 5
filepath = "test"
i = 0
v_id = 0
path = 'output'
video_file_saver.record(stream,fps,interval,filepath,i)
video_file_saver.record(stream,fps,interval,filepath,i+1)
video_file_saver.create_final_video(filepath,path,v_id)
res = not f'{filepath}{i}.mp4' in os.listdir() and not f'{filepath}{i+1}.mp4' in os.listdir() and not f'{filepath}.txt' in os.listdir() and f'{path}{i}.mp4' in os.listdir()
self.assertTrue(res)
if __name__ == "__main__":
unittest2.main() |
#------------------------------------#
# Author: Yueh-Lin Tsou #
# Update: 7/20/2019 #
# E-mail: hank630280888@gmail.com #
#------------------------------------#
"""-----------------------------------------------------------------
Implement Template Matching by using Normalised Cross Correlation
-------------------------------------------------------------------"""
import argparse
import numpy as np
import cv2
import pylab as pl
# ------------------ Normalised Cross Correlation ------------------ #
def Normalised_Cross_Correlation(roi, target):
# Normalised Cross Correlation Equation
cor = np.sum(roi * target)
nor = np.sqrt( (np.sum(roi ** 2))) * np.sqrt(np.sum(target ** 2))
return cor / nor
# ----------------------- template matching ----------------------- #
def template_matching(img, target):
# initial parameter
height, width = img.shape
tar_height, tar_width = target.shape
(max_Y, max_X) = (0, 0)
MaxValue = 0
# Set image, target and result value matrix
img = np.array(img, dtype="int")
target = np.array(target, dtype="int")
NccValue = np.zeros((height-tar_height, width-tar_width))
# calculate value using filter-kind operation from top-left to bottom-right
for y in range(0, height-tar_height):
for x in range(0, width-tar_width):
# image roi
roi = img[y : y+tar_height, x : x+tar_width]
# calculate ncc value
NccValue[y, x] = Normalised_Cross_Correlation(roi, target)
# find the most match area
if NccValue[y, x] > MaxValue:
MaxValue = NccValue[y, x]
(max_Y, max_X) = (y, x)
return (max_X, max_Y)
# -------------------------- main -------------------------- #
if __name__ == '__main__':
# read one input from terminal
# command line >> python Template_Matching.py -i brain.jpg -t target.jpg
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to input image")
ap.add_argument("-t", "--target", required = True, help = "Path to target")
args = vars(ap.parse_args())
image = cv2.imread(args["image"], 0)
target = cv2.imread(args["target"], 0)
height, width = target.shape
# function
top_left = template_matching(image, target)
# draw rectangle on the result region
cv2.rectangle(image, top_left, (top_left[0] + width, top_left[1] + height), 0, 3)
# show result
pl.subplot(111)
pl.imshow(image)
pl.title('result')
pl.show()
|
"""
jsnek_saved_games_dataset that returns flat (vectorized) data
"""
from .jsnek_base_dataset import JSnekBaseDataset
from .. import utils
class JSnekDataset(JSnekBaseDataset):
"""Represents a board state in the following way:
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
"""
def __init__(
self, board_state_as_vector=False, direction_as_index=False, max_frames=-1
):
super().__init__(max_frames=max_frames)
self.board_state_as_vector = board_state_as_vector
self.direction_as_index = direction_as_index
def __getitem__(self, index):
"""
Parameters
----------
index : int
Index of datum
Returns
-------
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
or
Index if direction_as_index = True
"""
frame, winner_id, direction = super().__getitem__(index)
board_state = utils.frame_to_image(frame, winner_id)
if self.board_state_as_vector:
board_state = board_state.view([board_state.numel()])
if self.direction_as_index:
direction = utils.direction_to_index(direction)
else:
direction = utils.direction_to_onehot(direction)
return board_state, direction
|
from io import StringIO
from textwrap import dedent
from aiida.cmdline.utils.common import get_calcjob_report
from aiida.orm import FolderData
from aiida.plugins import ParserFactory
import pytest
def get_log():
return dedent(
"""\
units metal
final_energy: 2.0
final_cell: 0 1 0 0 1 0 0 1 0
final_stress: 0 0 0 0 0 0
"""
)
def get_traj_force():
return dedent(
"""\
ITEM: TIMESTEP
0
ITEM: NUMBER OF ATOMS
6
ITEM: BOX BOUNDS pp pp pp
0 4.44
0 5.39
0 3.37
ITEM: ATOMS element fx fy fz
Fe 0.0000000000 0.0000000000 -0.0000000000
Fe 0.0000000000 -0.0000000000 0.0000000000
S -25.5468278966 20.6615772179 -0.0000000000
S -25.5468278966 -20.6615772179 -0.0000000000
S 25.5468278966 20.6615772179 -0.0000000000
S 25.5468278966 -20.6615772179 0.0000000000
"""
)
@pytest.mark.parametrize(
"plugin_name", ["lammps.force", "lammps.optimize", "lammps.md", "lammps.md.multi"]
)
def test_missing_log(db_test_app, plugin_name):
retrieved = FolderData()
calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
parser = ParserFactory(plugin_name)
with db_test_app.sandbox_folder() as temp_path:
results, calcfunction = parser.parse_from_node(
calc_node, retrieved_temporary_folder=temp_path.abspath
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_failed, calcfunction.exit_status
assert (
calcfunction.exit_status
== calc_node.process_class.exit_codes.ERROR_LOG_FILE_MISSING.status
)
@pytest.mark.parametrize(
"plugin_name", ["lammps.force", "lammps.optimize", "lammps.md", "lammps.md.multi"]
)
def test_missing_traj(db_test_app, plugin_name):
retrieved = FolderData()
retrieved.put_object_from_filelike(StringIO(get_log()), "log.lammps")
retrieved.put_object_from_filelike(StringIO(""), "_scheduler-stdout.txt")
retrieved.put_object_from_filelike(StringIO(""), "_scheduler-stderr.txt")
calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
parser = ParserFactory(plugin_name)
with db_test_app.sandbox_folder() as temp_path:
results, calcfunction = parser.parse_from_node(
calc_node, retrieved_temporary_folder=temp_path.abspath
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_failed, calcfunction.exit_status
assert (
calcfunction.exit_status
== calc_node.process_class.exit_codes.ERROR_TRAJ_FILE_MISSING.status
)
@pytest.mark.parametrize(
"plugin_name", ["lammps.force", "lammps.optimize", "lammps.md", "lammps.md.multi"]
)
def test_empty_log(db_test_app, plugin_name):
retrieved = FolderData()
for filename in [
"log.lammps",
"trajectory.lammpstrj",
"_scheduler-stdout.txt",
"_scheduler-stderr.txt",
]:
retrieved.put_object_from_filelike(StringIO(""), filename)
calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
parser = ParserFactory(plugin_name)
with db_test_app.sandbox_folder() as temp_path:
with temp_path.open("x-trajectory.lammpstrj", "w"):
pass
results, calcfunction = parser.parse_from_node(
calc_node, retrieved_temporary_folder=temp_path.abspath
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_failed, calcfunction.exit_status
assert (
calcfunction.exit_status
== calc_node.process_class.exit_codes.ERROR_LOG_PARSING.status
)
@pytest.mark.parametrize(
"plugin_name", ["lammps.force", "lammps.optimize", "lammps.md", "lammps.md.multi"]
)
def test_empty_traj(db_test_app, plugin_name):
retrieved = FolderData()
retrieved.put_object_from_filelike(StringIO(get_log()), "log.lammps")
for filename in [
"trajectory.lammpstrj",
"_scheduler-stdout.txt",
"_scheduler-stderr.txt",
]:
retrieved.put_object_from_filelike(StringIO(""), filename)
calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
parser = ParserFactory(plugin_name)
with db_test_app.sandbox_folder() as temp_path:
with temp_path.open("x-trajectory.lammpstrj", "w"):
pass
results, calcfunction = parser.parse_from_node(
calc_node, retrieved_temporary_folder=temp_path.abspath
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_failed, calcfunction.exit_status
assert (
calcfunction.exit_status
== calc_node.process_class.exit_codes.ERROR_TRAJ_PARSING.status
)
@pytest.mark.parametrize(
"plugin_name", ["lammps.force", "lammps.optimize", "lammps.md", "lammps.md.multi"]
)
def test_run_error(db_test_app, plugin_name):
retrieved = FolderData()
retrieved.put_object_from_filelike(StringIO(get_log()), "log.lammps")
retrieved.put_object_from_filelike(
StringIO(get_traj_force()), "x-trajectory.lammpstrj"
)
retrieved.put_object_from_filelike(
StringIO("ERROR description"), "_scheduler-stdout.txt"
)
retrieved.put_object_from_filelike(StringIO(""), "_scheduler-stderr.txt")
calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
parser = ParserFactory(plugin_name)
with db_test_app.sandbox_folder() as temp_path:
with temp_path.open("x-trajectory.lammpstrj", "w") as handle:
handle.write(get_traj_force())
results, calcfunction = parser.parse_from_node(
calc_node, retrieved_temporary_folder=temp_path.abspath
)
print(get_calcjob_report(calc_node))
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_failed, calcfunction.exit_status
assert (
calcfunction.exit_status
== calc_node.process_class.exit_codes.ERROR_LAMMPS_RUN.status
)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
import pandas as pd
from q2_diversity import procrustes_analysis
class PCoATests(unittest.TestCase):
def setUp(self):
axes = ['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6']
eigvals = pd.Series(np.array([1.5, 0.75, 0.3, 0.15, 0.15, 0.15]),
index=axes)
samples = np.array([[0, 3, 4, 4, 0, 0],
[1, 2, 1, 4, 3, 3],
[2, 3, 1, 0, 0, 1],
[0, 3, 2, 4, 3, 0]])
proportion_explained = pd.Series([0.50, 0.25, 0.10, 0.05, 0.05, 0.05],
index=axes)
samples_df = pd.DataFrame(samples,
index=['A', 'B', 'C', 'D'],
columns=axes)
self.reference = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals,
samples_df,
proportion_explained=proportion_explained)
samples = np.array([[0.7, 3.7, 4.7, 4.7, 0.7, 0.7],
[1.7, 2.7, 1.7, 4.7, 3.7, 3.7],
[2.7, 3.7, 1.7, 0.7, 0.7, 1.7],
[30, 3.7, 2.7, 4.7, 3.7, 0.7]])
samples_df = pd.DataFrame(samples,
index=['A', 'B', 'C', 'D'],
columns=axes)
self.other = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals.copy(),
samples_df.copy(),
proportion_explained=proportion_explained.copy())
S = [[-0.1358036, 0.0452679, 0.3621430, 0.1810715, -0.2716072],
[0.0452679, -0.1358036, -0.1810715, 0.1810715, 0.2716072],
[0.2263394, 0.0452679, -0.1810715, -0.5432145, -0.2716072],
[-0.1358036, 0.0452679, 0.0000000, 0.1810715, 0.2716072]]
samples_df = pd.DataFrame(np.array(S),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_ref = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
S = [[0.0482731, -0.0324317, 0.0494312, -0.0316828, -0.1584374],
[0.0803620, -0.0718115, -0.0112234, -0.0171011, -0.1101209],
[0.0527554, -0.0042753, -0.0126739, -0.0969602, -0.0964822],
[-0.1813905, 0.1085184, -0.0255339, 0.1457440, 0.3650405]]
samples_df = pd.DataFrame(np.array(S),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_other = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
noise = [
[0.04988341, -0.03234447, 0.03177641, -0.03507789, -0.13564394],
[0.09117347, -0.08318546, -0.02249053, -0.01597601, -0.10901541],
[0.05077765, -0.003994, -0.00984688, -0.09356729, -0.09648388],
[-0.19183453, 0.11952393, 0.000561, 0.14462118, 0.34114323]]
samples_df = pd.DataFrame(np.array(noise),
index=['A', 'B', 'C', 'D'],
columns=axes[:5])
self.expected_noise = skbio.OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
eigvals[:5].copy(),
samples_df.copy(),
proportion_explained=proportion_explained[:5].copy())
self.expected_m2 = 0.72240956
self.expected_p = 0.5
def test_procrustes(self):
ref, other, m2_results = procrustes_analysis(self.reference,
self.other)
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_other)
self.assertAlmostEqual(true_m2, self.expected_m2)
self.assertNotAlmostEqual(true_p_value, self.expected_p)
def test_non_zero_p(self):
# generated with np.random.seed(3); np.random.randn(4, 6)
noise = np.array(
[[1.78862847, 0.43650985, 0.09649747, -1.8634927, -0.2773882,
-0.35475898],
[-0.08274148, -0.62700068, -0.04381817, -0.47721803, -1.31386475,
0.88462238],
[0.88131804, 1.70957306, 0.05003364, -0.40467741, -0.54535995,
-1.54647732],
[0.98236743, -1.10106763, -1.18504653, -0.2056499, 1.48614836,
0.23671627]])
self.other.samples += noise
ref, other, m2_results = procrustes_analysis(self.reference,
self.other)
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_noise)
# the p value shouldn't be zero even in the presence of noise
self.assertAlmostEqual(true_m2, 0.7388121)
self.assertNotAlmostEqual(true_p_value, 0.001)
def test_zero_permutations_nan_pvalue(self):
ref, other, m2_results = procrustes_analysis(self.reference,
self.other,
permutations='disable')
true_m2 = m2_results['true M^2 value'][0]
true_p_value = m2_results['p-value for true M^2 value'][0]
skbio.util.assert_ordination_results_equal(ref, self.expected_ref)
skbio.util.assert_ordination_results_equal(other, self.expected_other)
self.assertAlmostEqual(true_m2, self.expected_m2)
self.assertTrue(np.isnan(true_p_value))
def test_procrustes_bad_dimensions(self):
self.other.samples = self.other.samples.iloc[:, :4]
self.other.eigvals = self.other.eigvals[:4]
self.other.proportion_explained = self.other.proportion_explained[:4]
with self.assertRaisesRegex(ValueError, 'The matrices cannot be '):
procrustes_analysis(self.reference, self.other)
def test_procrustes_over_dimensions(self):
with self.assertRaisesRegex(ValueError, 'Cannot fit fewer dimensions '
'than available'):
procrustes_analysis(self.reference, self.other, 11)
def test_procrustes_id_mismatch(self):
msg = 'The ordinations represent two different sets of samples'
self.other.samples.index = pd.Index([':L', ':D', ':)', ':('])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
self.other.samples.index = pd.Index([':L', 'B', 'C', 'D'])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
self.other.samples.index = pd.Index(['a', 'b', 'c', 'd'])
with self.assertRaisesRegex(ValueError, msg):
procrustes_analysis(self.reference, self.other)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.