content
stringlengths 5
1.05M
|
|---|
"""
Abstract class for generic scraping actions
"""
from abc import ABC, abstractmethod
import logging
from logging import Logger
from pathlib import Path
from bs4 import BeautifulSoup
import requests
from requests.exceptions import ConnectTimeout, HTTPError, RequestException
import config as cfg
class Page(ABC):
def __init__(self, log_name: str, log_path: Path):
self.logger = self._init_log(log_name, log_path)
@abstractmethod
def scrape(self):
pass
@abstractmethod
def _pages(self):
pass
@abstractmethod
def _page_elements(self, soup: BeautifulSoup):
pass
def _get_soup(self, url: str) -> BeautifulSoup:
""" Scrape the page, returning success and soup."""
req = requests.get(url, headers=cfg.HTTP_HEADERS)
try:
req.raise_for_status()
except ConnectTimeout:
raise ConnectTimeout
except HTTPError:
raise HTTPError
except RequestException:
raise RequestException
else:
soup = BeautifulSoup(req.text, "html.parser")
return soup
def _init_log(self, log_name: str, log_path: Path) -> Logger:
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path, mode="a")
formatter = logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("********************************")
return logger
|
import logging
import threading
import os
import sys
from collections import Counter
from itertools import islice
from multiprocessing import cpu_count
from typing import IO, List, Iterable, Optional, cast
import subprocess
LOGGER = logging.getLogger(__name__)
DEFAULT_STOP_EPSILON_VALUE = '0.00001'
DEFAULT_STOP_WINDOW_SIZE = 20
DEFAULT_INVALID_CHARACTER_PLACEHOLDER = '?'
INVAID_CHARACTER_START_ORD = 0x6EE80
def format_feature_line(feature_line: List[str]) -> str:
return '\t'.join(feature_line)
def replace_invalid_characters(text: str, placeholder: str = DEFAULT_INVALID_CHARACTER_PLACEHOLDER):
return ''.join((
c if ord(c) < INVAID_CHARACTER_START_ORD else placeholder
for c in text
))
def lines_to_log(logger: logging.Logger, level: int, message: str, lines: Iterable[str]):
LOGGER.debug('lines: %s', lines)
for line in lines:
if isinstance(line, bytes):
line = line.decode('utf-8')
line = line.rstrip()
logger.log(level, message, line)
class WapitiModel:
def __init__(self, process: subprocess.Popen):
self.process = process
@property
def process_stdin(self) -> IO:
stdin = self.process.stdin
assert stdin
return stdin
@property
def process_stdout(self) -> IO:
stdout = self.process.stdout
assert stdout
return stdout
def iter_read_lines(self) -> Iterable[str]:
while self.process.poll() is None:
line = self.process_stdout.readline().decode('utf-8').rstrip()
LOGGER.debug('read line: %s', line)
yield line
def iter_label(self, data: str) -> Iterable[str]:
self.process_stdin.write((data + '\n\n\n').encode('utf-8'))
self.process_stdin.flush()
yield from self.iter_read_lines()
def label_lines(self, lines: List[str], clean_input: bool = False) -> List[str]:
LOGGER.debug('lines: %s', lines)
for line in lines + ['', '']:
if clean_input:
cleaned_line = replace_invalid_characters(line, placeholder='?')
else:
cleaned_line = line
try:
LOGGER.debug('writing line: %s', cleaned_line)
LOGGER.debug('line counts: %s', Counter(cleaned_line))
self.process_stdin.write(
(cleaned_line + '\n').encode('utf-8')
)
self.process_stdin.flush()
except BrokenPipeError:
LOGGER.error('failed to write line: %s', [(c, hex(ord(c))) for c in cleaned_line])
raise
self.process_stdin.flush()
labelled_lines = list(islice(self.iter_read_lines(), len(lines) + 1))
LOGGER.debug('labelled_lines: %s', labelled_lines)
return labelled_lines[:-1]
def label_raw_text(self, data: str) -> str:
return '\n'.join(self.label_lines(data.splitlines()))
def label_features(self, features: List[List[str]]) -> List[List[str]]:
lines = [
format_feature_line(feature_line)
for feature_line in features
]
return [
[
token_features[0],
labelled_line.rsplit('\t', maxsplit=1)[-1]
]
for labelled_line, token_features in zip(self.label_lines(lines), features)
]
class WapitiWrapper:
def __init__(self, wapiti_binary_path: str = None):
self.wapiti_binary_path = wapiti_binary_path or 'wapiti'
def check_available(self):
self.run_wapiti(['--version'])
def load_model(
self,
model_path: str,
output_only_labels: bool = True,
stderr_to_log_enabled: bool = True) -> WapitiModel:
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('wapiti model not found: %s' % model_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
command = [self.wapiti_binary_path] + args
LOGGER.debug('running wapiti: %s', command)
process = subprocess.Popen( # pylint: disable=consider-using-with
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=sys.stderr if not stderr_to_log_enabled else subprocess.PIPE
)
process.poll()
if stderr_to_log_enabled:
t = threading.Thread(target=lambda: lines_to_log(
LOGGER, logging.INFO, 'wapiti, stderr: %s',
cast(Iterable[str], process.stderr)
))
t.daemon = True
t.start()
return WapitiModel(process=process)
def run_wapiti(self, args: List[str]):
command = [self.wapiti_binary_path] + args
LOGGER.info('calling wapiti: %s', command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
) as process:
assert process.stdout
with process.stdout:
lines_to_log(
LOGGER,
logging.INFO,
'wapiti: %s',
cast(Iterable[str], process.stdout)
)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
command
)
LOGGER.debug('wapiti call succeeded')
def label(
self,
model_path: str,
data_path: str,
output_data_path: str,
output_only_labels: bool = True):
if not os.path.isfile(str(model_path)):
raise FileNotFoundError('model file not found: %s' % model_path)
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
args = [
'label',
'--model',
str(model_path)
]
if output_only_labels:
args.append('--label')
args.append(str(data_path))
args.append(str(output_data_path))
self.run_wapiti(args)
def train(
self,
data_path: str,
output_model_path: str,
template_path: Optional[str] = None,
max_iter: Optional[int] = None,
num_threads: Optional[int] = None,
stop_epsilon_value: Optional[str] = None,
stop_window_size: Optional[int] = None
):
if not os.path.isfile(str(data_path)):
raise FileNotFoundError('data file not found: %s' % data_path)
if not num_threads:
num_threads = cpu_count()
if not stop_epsilon_value:
stop_epsilon_value = DEFAULT_STOP_EPSILON_VALUE
if not stop_window_size:
stop_window_size = DEFAULT_STOP_WINDOW_SIZE
args = ['train']
if template_path:
if not os.path.isfile(str(template_path)):
raise FileNotFoundError('template file not found: %s' % template_path)
args.append('--pattern')
args.append(str(template_path))
if max_iter:
args.append('--maxiter')
args.append(str(max_iter))
args.append('--nthread')
args.append(str(num_threads))
args.append('--stopeps')
args.append(str(stop_epsilon_value))
args.append('--stopwin')
args.append(str(stop_window_size))
args.append(str(data_path))
args.append(str(output_model_path))
self.run_wapiti(args)
|
import unittest
import os, sys, inspect, json
from src.solr_export import prep_url, exclude, match
dir_path = os.path.dirname(os.path.realpath(__file__))
resource_path = dir_path + os.sep + "resources"
class solr_export_test(unittest.TestCase):
def test_prep_url(self):
preped = prep_url("http://localhost:8983", {"q": "fish", "sort": "id asc"})
self.assertEqual(preped, "http://localhost:8983/select?q=fish&sort=id+asc")
def test_exclude(self):
with open(resource_path + "/doc_testexclude.json") as doc:
json_doc = json.load(doc)
# test basic exclude
excluded = exclude(json_doc, "*_sort")
self.assertEqual(
excluded,
{"id": "test", "sort_key": "value", "title_search": "title_value"},
)
# test comma separated exclude
excluded = exclude(json_doc, "*_sort,*_search")
self.assertEqual(excluded, {"id": "test", "sort_key": "value"})
# test multiple exclude
json_doc = {"id": "1", "fish_exclude": "ex1", "other_exclude": "ex2"}
excluded = exclude(json_doc, "*_exclude")
self.assertEqual(excluded, {"id": "1"})
def test_match(self):
self.assertTrue(match("title_search", "*_search"))
self.assertTrue(match("sort_key", "sort_*"))
self.assertTrue(match("title_search", "*_search,sort_*"))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Distributed under the terms of MIT License (MIT)
import pywikibot
import re
from pywikibot import pagegenerators
from pywikibot import config
import MySQLdb as mysqldb
def numbertopersian(a):
a = str(a)
a = a.replace(u'0', u'۰')
a = a.replace(u'1', u'۱')
a = a.replace(u'2', u'۲')
a = a.replace(u'3', u'۳')
a = a.replace(u'4', u'۴')
a = a.replace(u'5', u'۵')
a = a.replace(u'6', u'۶')
a = a.replace(u'7', u'۷')
a = a.replace(u'8', u'۸')
a = a.replace(u'9', u'۹')
a = a.replace(u'.', u'٫')
return a
savetext = u"{{#switch:{{{1|fa}}}"
# sql part
for lang in ["fa", "ar", "cs", "tr", "en", "fr", "de", "it", "az", "fi", "ko", "hu", "he"]:
site = pywikibot.Site(lang)
query = "select /* SLOW_OK */ count(rc_title),0 from recentchanges join page on rc_cur_id=page_id where rc_new=1 and rc_namespace=0 and page_is_redirect=0 and page.page_len>70 and rc_deleted=0 and DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 1 DAY)<rc_timestamp;"
conn = mysqldb.connect(lang + "wiki.labsdb", db=site.dbName()+ '_p',
read_default_file="~/replica.my.cnf")
cursor = conn.cursor()
pywikibot.output(u'Executing query:\n%s' % query)
query = query.encode(site.encoding())
cursor.execute(query)
wikinum, numb = cursor.fetchone()
if wikinum:
savetext = savetext + u"|" + lang + u"=" + numbertopersian(wikinum)
else:
savetext = savetext + u"|" + lang + u"="
# pywikipedia part
savetext = savetext + "}}"
pywikibot.output(savetext)
site = pywikibot.Site()
page = pywikibot.Page(site, u"الگو:سردر تغییرات اخیر/سایر ویکیها")
page.put(savetext, u"ربات: بهروز رسانی آمار دیگر ویکیها")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.core.observation import ObservationData
from ax.modelbridge.transforms.ivw import IVW, ivw_metric_merge
from ax.utils.common.testutils import TestCase
class IVWTransformTest(TestCase):
def testNoRepeats(self):
obsd = ObservationData(
metric_names=["m1", "m2"],
means=np.array([1.0, 2.0]),
covariance=np.array([[1.0, 0.2], [0.2, 2.0]]),
)
obsd2 = ivw_metric_merge(obsd)
self.assertEqual(obsd2, obsd)
def testMerge(self):
obsd = ObservationData(
metric_names=["m1", "m2", "m2"],
means=np.array([1.0, 2.0, 1.0]),
covariance=np.array([[1.0, 0.2, 0.4], [0.2, 2.0, 0.8], [0.4, 0.8, 3.0]]),
)
obsd2 = ivw_metric_merge(obsd)
self.assertEqual(obsd2.metric_names, ["m1", "m2"])
self.assertTrue(np.array_equal(obsd2.means, np.array([1.0, 0.6 * 2 + 0.4])))
cov12 = 0.2 * 0.6 + 0.4 * 0.4
# var(w1*y1 + w2*y2) =
# w1 ** 2 * var(y1) + w2 ** 2 * var(y2) + 2 * w1 * w2 * cov(y1, y2)
cov22 = 0.6 ** 2 * 2.0 + 0.4 ** 2 * 3 + 2 * 0.6 * 0.4 * 0.8
cov_true = np.array([[1.0, cov12], [cov12, cov22]])
discrep = np.max(np.abs(obsd2.covariance - cov_true))
self.assertTrue(discrep < 1e-8)
def testNoiselessMerge(self):
# One noiseless
obsd = ObservationData(
metric_names=["m1", "m2", "m2"],
means=np.array([1.0, 2.0, 1.0]),
covariance=np.array([[1.0, 0.2, 0.4], [0.2, 2.0, 0.8], [0.4, 0.8, 0.0]]),
)
obsd2 = ivw_metric_merge(obsd)
np.array_equal(obsd2.means, np.array([1.0, 1.0]))
cov_true = np.array([[1.0, 0.4], [0.4, 0.0]])
self.assertTrue(np.array_equal(obsd2.covariance, cov_true))
# Conflicting noiseless, default (warn)
obsd = ObservationData(
metric_names=["m1", "m2", "m2"],
means=np.array([1.0, 2.0, 1.0]),
covariance=np.array([[1.0, 0.2, 0.4], [0.2, 0.0, 0.8], [0.4, 0.8, 0.0]]),
)
with self.assertRaises(ValueError):
obsd2 = ivw_metric_merge(obsd, conflicting_noiseless="wrong")
obsd2 = ivw_metric_merge(obsd)
self.assertTrue(np.array_equal(obsd2.means, np.array([1.0, 2.0])))
cov_true = np.array([[1.0, 0.2], [0.2, 0.0]])
self.assertTrue(np.array_equal(obsd2.covariance, cov_true))
# Conflicting noiseless, raise
with self.assertRaises(ValueError):
obsd2 = ivw_metric_merge(obsd, conflicting_noiseless="raise")
def testTransform(self):
obsd1_0 = ObservationData(
metric_names=["m1", "m2", "m2"],
means=np.array([1.0, 2.0, 1.0]),
covariance=np.array([[1.0, 0.2, 0.4], [0.2, 2.0, 0.8], [0.4, 0.8, 3.0]]),
)
obsd1_1 = ObservationData(
metric_names=["m1", "m1", "m2", "m2"],
means=np.array([1.0, 1.0, 2.0, 1.0]),
covariance=np.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.2, 0.4],
[0.0, 0.2, 2.0, 0.8],
[0.0, 0.4, 0.8, 3.0],
]
),
)
obsd2_0 = ObservationData(
metric_names=["m1", "m2"],
means=np.array([1.0, 1.6]),
covariance=np.array([[1.0, 0.28], [0.28, 1.584]]),
)
obsd2_1 = ObservationData(
metric_names=["m1", "m2"],
means=np.array([1.0, 1.6]),
covariance=np.array([[0.5, 0.14], [0.14, 1.584]]),
)
observation_data = [obsd1_0, obsd1_1]
t = IVW(None, None, None)
observation_data2 = t.transform_observation_data(observation_data, [])
observation_data2_true = [obsd2_0, obsd2_1]
for i, obsd in enumerate(observation_data2_true):
self.assertEqual(observation_data2[i].metric_names, obsd.metric_names)
self.assertTrue(np.array_equal(observation_data2[i].means, obsd.means))
discrep = np.max(np.abs(observation_data2[i].covariance - obsd.covariance))
self.assertTrue(discrep < 1e-8)
|
from setuptools import setup
setup(
name="ytdlmusic",
version="2.0.0",
description="ytdlmusic is a command-line program to search and download music files from YouTube without use browser.",
long_description="The complete description/installation/use/FAQ is available at : https://github.com/thib1984/ytdlmusic#readme",
long_description_content_type="text/markdown",
url="https://github.com/thib1984/ytdlmusic",
author="thib1984",
author_email="thibault.garcon@gmail.com",
license="MIT",
packages=["ytdlmusic"],
install_requires=[
"youtube-search-python",
"yt_dlp",
"tinytag",
"unidecode",
"termcolor",
"colorama",
],
zip_safe=False,
entry_points={
"console_scripts": ["ytdlmusic=ytdlmusic.__init__:ytdlmusic"],
},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add a table to hold blacklisted projects
Revision ID: b6a20b9c888d
Revises: 5b3f9e687d94
Create Date: 2017-09-15 16:24:03.201478
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = "b6a20b9c888d"
down_revision = "5b3f9e687d94"
def upgrade():
op.create_table(
"blacklist",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"created", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("blacklisted_by", postgresql.UUID(), nullable=True),
sa.Column("comment", sa.Text(), server_default="", nullable=False),
sa.CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="blacklist_valid_name",
),
sa.ForeignKeyConstraint(["blacklisted_by"], ["accounts_user.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
# Setup a trigger that will ensure that we never commit a name that hasn't
# been normalized to our blacklist.
op.execute(
""" CREATE OR REPLACE FUNCTION ensure_normalized_blacklist()
RETURNS TRIGGER AS $$
BEGIN
NEW.name = normalize_pep426_name(NEW.name);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
)
op.execute(
""" CREATE TRIGGER normalize_blacklist
AFTER INSERT OR UPDATE OR DELETE ON blacklist
FOR EACH ROW EXECUTE PROCEDURE ensure_normalized_blacklist();
"""
)
def downgrade():
raise RuntimeError("Order No. 227 - Ни шагу назад!")
|
"""
The ``interface`` taxon is the most fundamental and generic applet taxon. It groups applets
implementing interfaces that are used for purposes that do not fit into any single other taxon.
Because the ``interface`` taxon is so important, the applet names in this taxon are not prefixed
with the taxon name.
Examples: SPI, I²C.
Counterexamples: MIDI (use taxon ``audio``).
"""
|
from .gelu import gelu
from .transformer import *
__version__ = '0.39.0'
|
from twilio.rest import Client
account_sid = 'AC988415bd476b4abc248b4afaa8bc6717'
auth_token = '3b62cb9653d077b61f0d1f50bc06e718'
client = Client(account_sid, auth_token)
message = client.messages.create(
from_='+13343423628',
body='asdf',
to='+8617742566640'
)
print(message.sid)
|
from marshmallow import fields
from marshmallow import Schema
class SmsRequestParameters(Schema):
national_number = fields.String(required=True, description='National Number')
country_code = fields.String(
required=True, description='Country Code, like 86 for China'
)
class ValidateSmsCodeParameters(Schema):
code = fields.String(required=True, description='SMS Code')
phone_num = fields.String(
required=True, description='Concatted Phone Number, like 8613333333333'
)
|
# -*- coding: utf-8 -*-
from flask import current_app
from .util import render_email_template, send_or_handle_error, escape_markdown
import rollbar
import pendulum
def send_brief_response_received_email(supplier, brief, brief_response, supplier_user=None, is_update=False):
to_address = brief_response.data['respondToEmailAddress']
if brief.lot.slug in ['rfx', 'atm', 'training2']:
brief_url = current_app.config['FRONTEND_ADDRESS'] + '/2/' + brief.framework.slug + '/opportunities/' \
+ str(brief.id)
else:
brief_url = current_app.config['FRONTEND_ADDRESS'] + '/' + brief.framework.slug + '/opportunities/' \
+ str(brief.id)
ask_question_url = '{}/login?next=%2Fsellers%2Fopportunities%2F{}%2Fask-a-question'.format(
current_app.config['FRONTEND_ADDRESS'], brief.id
)
brief_response_url = '{}/2/brief/{}/{}/respond/{}'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.id,
brief.lot.slug,
brief_response.id
)
if is_update:
subject = "You've updated your response for {}"
else:
subject = "You've applied for {} successfully!"
brief_title = brief.data['title']
if len(brief_title) > 30:
brief_title = '{}...'.format(brief_title[:30])
subject = subject.format(brief_title)
template_file_name = 'brief_response_updated.md' if is_update else 'brief_response_submitted.md'
# prepare copy
email_body = render_email_template(
template_file_name,
brief_id=brief.id,
brief_url=brief_url,
brief_response_url=brief_response_url,
ask_question_url=ask_question_url,
closing_at=brief.closed_at.format('DD MMMM YYYY'),
brief_title=brief_title,
supplier_name=supplier.name,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
organisation=brief.data['organisation'],
supplier_user=supplier_user
)
send_or_handle_error(
to_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief response recieved'
)
def send_specialist_brief_response_received_email(supplier, brief, brief_response, supplier_user=None,
is_update=False):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
if brief.lot.slug not in ['specialist']:
return
to_address = brief_response.data['respondToEmailAddress']
specialist_name = '{} {}'.format(
brief_response.data.get('specialistGivenNames', ''),
brief_response.data.get('specialistSurname', '')
)
brief_url = '{}/2/{}/opportunities/{}'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.framework.slug,
brief.id
)
brief_response_url = '{}/2/brief/{}/specialist2/respond/{}'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.id,
brief_response.id
)
attachment_url = '{}/api/2/brief/{}/respond/documents/{}/'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.id,
supplier.code
)
ess = ""
if brief_response.data.get('essentialRequirements', None):
i = 0
for req in brief.data['essentialRequirements']:
ess += "**{}. {}**\n\n{}\n\n".format(
i + 1,
req['criteria'],
escape_markdown(brief_response.data['essentialRequirements'][req['criteria']])
)
i += 1
nth = ""
if brief_response.data.get('niceToHaveRequirements', None):
i = 0
for req in brief.data['niceToHaveRequirements']:
nth_reqs = brief_response.data.get('niceToHaveRequirements', [])
if req['criteria'] in nth_reqs:
nth += "**{}. {}**\n\n{}\n\n".format(
i + 1,
req['criteria'],
escape_markdown(nth_reqs[req['criteria']])
)
i += 1
if nth:
nth = '####Desirable criteria: \n\n ' + nth
criteriaResponses = ""
evaluationCriteriaResponses = brief_response.data.get('criteria', {})
if evaluationCriteriaResponses:
for evaluationCriteria in brief.data['evaluationCriteria']:
if (
'criteria' in evaluationCriteria and
evaluationCriteria['criteria'] in evaluationCriteriaResponses.keys()
):
criteriaResponses += "####• {}\n\n{}\n\n".format(
evaluationCriteria['criteria'],
escape_markdown(evaluationCriteriaResponses[evaluationCriteria['criteria']])
)
attachments = ''
resume = ''
for attach in brief_response.data.get('resume', []):
if not resume:
resume = '[{}]({}{}) '.format(attach, attachment_url, attach)
else:
attachments += "* [{}]({}{})\n\n".format(attach, attachment_url, attach)
if attachments:
attachments = '**Other documents:** \n\n ' + attachments
if is_update:
subject = "{}'s response for '{}' ({}) was updated".format(
specialist_name,
brief.data['title'],
brief.id
)
else:
subject = 'You submitted {} for {} ({}) successfully'.format(
specialist_name,
brief.data['title'],
brief.id
)
response_security_clearance = ''
if brief.data.get('securityClearance') == 'mustHave':
must_have_clearance = ''
if brief.data.get('securityClearanceCurrent') == 'baseline':
must_have_clearance = 'baseline'
elif brief.data.get('securityClearanceCurrent') == 'nv1':
must_have_clearance = 'negative vetting level 1'
elif brief.data.get('securityClearanceCurrent') == 'nv2':
must_have_clearance = 'negative vetting level 2'
elif brief.data.get('securityClearanceCurrent') == 'pv':
must_have_clearance = 'positive vetting'
response_security_clearance = '\n**Holds a {} security clearance:** {} '.format(
must_have_clearance,
escape_markdown(brief_response.data.get('securityClearance'))
)
response_rates = ''
response_rates_excluding_gst = ''
if brief.data.get('preferredFormatForRates') == 'hourlyRate':
response_rates = '**Hourly rate, including GST:** ${}'.format(
escape_markdown(brief_response.data.get('hourRate'))
)
response_rates_excluding_gst = '**Hourly rate, excluding GST:** ${}'.format(
escape_markdown(brief_response.data.get('hourRateExcludingGST'))
)
elif brief.data.get('preferredFormatForRates') == 'dailyRate':
response_rates = '**Daily rate, including GST:** ${}'.format(
escape_markdown(brief_response.data.get('dayRate'))
)
response_rates_excluding_gst = '**Daily rate, excluding GST:** ${}'.format(
escape_markdown(brief_response.data.get('dayRateExcludingGST'))
)
response_visa_status = ''
if brief_response.data.get('visaStatus') == 'AustralianCitizen':
response_visa_status = 'Australian citizen'
elif brief_response.data.get('visaStatus') == 'PermanentResident':
response_visa_status = 'Permanent resident'
elif brief_response.data.get('visaStatus') == 'ForeignNationalWithAValidVisa':
response_visa_status = 'Foreign national with a valid visa'
template_file_name = (
'specialist_brief_response_updated.md' if is_update else 'specialist_brief_response_submitted.md'
)
# prepare copy
email_body = render_email_template(
template_file_name,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_url=brief_url,
brief_id=brief.id,
brief_name=brief.data['title'],
brief_organisation=brief.data['organisation'],
supplier_user=supplier_user,
essential_requirements=ess,
nice_to_have_requirements=nth,
criteria_responses=criteriaResponses,
resume=resume,
attachments=attachments,
closing_at=brief.closed_at.format('DD MMMM YYYY'),
specialist_name=escape_markdown(specialist_name),
brief_response_url=brief_response_url,
response_rates=response_rates,
response_rates_excluding_gst=response_rates_excluding_gst,
response_previously_worked=escape_markdown(brief_response.data.get('previouslyWorked')),
response_security_clearance=response_security_clearance,
response_start_date=escape_markdown(brief_response.data.get('availability')),
response_visa_status=response_visa_status
)
send_or_handle_error(
to_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief response recieved'
)
audit_service.log_audit_event(
audit_type=audit_types.specialist_brief_response_received_email,
user='',
data={
"to_address": to_address,
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_specialist_brief_response_withdrawn_email(supplier, brief, brief_response, supplier_user=None):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
to_address = brief_response.data['respondToEmailAddress']
specialist_name = '{} {}'.format(
brief_response.data.get('specialistGivenNames', ''),
brief_response.data.get('specialistSurname', '')
)
subject = "{}'s response to '{}' ({}) has been withdrawn".format(
specialist_name,
brief.data['title'],
brief.id
)
brief_url = '{}/2/{}/opportunities/{}'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.framework.slug,
brief.id
)
email_body = render_email_template(
'specialist_brief_response_withdrawn.md',
specialist_name=specialist_name,
brief_url=brief_url,
brief_name=brief.data['title'],
brief_id=brief.id,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_organisation=brief.data['organisation'],
supplier_user=supplier_user
)
send_or_handle_error(
to_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief response withdrawn'
)
audit_service.log_audit_event(
audit_type=audit_types.specialist_brief_response_withdrawn_email,
user='',
data={
"to_address": to_address,
"email_body": email_body,
"subject": subject
},
db_object=brief_response)
def send_brief_response_withdrawn_email(supplier, brief, brief_response, supplier_user=None):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
to_address = brief_response.data['respondToEmailAddress']
subject = "Your response for '{}' ({}) has been withdrawn".format(
brief.data['title'],
brief.id
)
brief_url = '{}/2/{}/opportunities/{}'.format(
current_app.config['FRONTEND_ADDRESS'],
brief.framework.slug,
brief.id
)
email_body = render_email_template(
'brief_response_withdrawn.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_url=brief_url,
brief_title=brief.data['title'],
brief_id=brief.id,
organisation=brief.data['organisation'],
supplier_user=supplier_user
)
send_or_handle_error(
to_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief response withdrawn'
)
audit_service.log_audit_event(
audit_type=audit_types.brief_response_withdrawn_email,
user='',
data={
"to_address": to_address,
"email_body": email_body,
"subject": subject
},
db_object=brief_response)
def send_brief_closed_email(brief):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
if brief.lot.slug in ['specialist']:
return
brief_email_sent_audit_event = audit_service.find(type=audit_types.sent_closed_brief_email.value,
object_type="Brief",
object_id=brief.id).count()
if (brief_email_sent_audit_event > 0):
return
to_addresses = get_brief_emails(brief)
# prepare copy
email_body = render_email_template(
'brief_closed.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id
)
subject = "Your opportunity has closed - please review all responses."
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief closed'
)
audit_service.log_audit_event(
audit_type=audit_types.sent_closed_brief_email,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_seller_requested_feedback_from_buyer_email(brief):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
to_addresses = get_brief_emails(brief)
# prepare copy
email_body = render_email_template(
'seller_requested_feedback_from_buyer_email.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id
)
subject = "Buyer notifications to unsuccessful sellers"
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='seller_requested_feedback_from_buyer_email'
)
audit_service.log_audit_event(
audit_type=audit_types.seller_requested_feedback_from_buyer_email,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_seller_invited_to_rfx_email(brief, invited_supplier):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
if brief.lot.slug != 'rfx':
return
to_addresses = []
if 'contact_email' in invited_supplier.data:
to_addresses = [invited_supplier.data['contact_email']]
elif 'email' in invited_supplier.data:
to_addresses = [invited_supplier.data['email']]
if len(to_addresses) > 0:
email_body = render_email_template(
'brief_rfx_invite_seller.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id
)
subject = "You have been invited to respond to an opportunity"
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='seller_invited_to_rfx_opportunity'
)
audit_service.log_audit_event(
audit_type=audit_types.seller_invited_to_rfx_opportunity,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_seller_invited_to_training_email(brief, invited_supplier):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
if brief.lot.slug != 'training2':
return
to_addresses = []
if 'contact_email' in invited_supplier.data:
to_addresses = [invited_supplier.data['contact_email']]
elif 'email' in invited_supplier.data:
to_addresses = [invited_supplier.data['email']]
if len(to_addresses) > 0:
email_body = render_email_template(
'brief_training_invite_seller.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id
)
subject = "You have been invited to respond to an opportunity"
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='seller_invited_to_training_opportunity'
)
audit_service.log_audit_event(
audit_type=audit_types.seller_invited_to_training_opportunity,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_specialist_brief_published_email(brief):
from app.api.services import (
audit_service,
audit_types,
domain_service,
suppliers
) # to circumvent circular dependency
from app.models import Supplier
if brief.lot.slug != 'specialist':
return
brief_email_sent_audit_event = audit_service.find(type=audit_types.specialist_brief_published.value,
object_type="Brief",
object_id=brief.id).count()
if (brief_email_sent_audit_event > 0):
return
to_addresses = get_brief_emails(brief)
invited_sellers = ''
sellers_text = ''
if brief.data.get('sellerSelector', '') == 'someSellers':
sellers_text = ''
seller_codes = []
for key, value in brief.data.get('sellers', {}).items():
seller_codes.append(key)
sellers = suppliers.filter(Supplier.code.in_(seller_codes)).all()
for seller in sellers:
invited_sellers += '* {}\n'.format(seller.name)
else:
panel_category = domain_service.get(id=brief.data.get('sellerCategory'))
sellers_text = 'All sellers approved under {}'.format(panel_category.name)
# prepare copy
email_body = render_email_template(
'specialist_brief_published.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id,
brief_close_date=brief.closed_at.strftime('%d/%m/%Y'),
sellers_text=sellers_text,
invited_sellers=invited_sellers,
number_of_suppliers=brief.data.get('numberOfSuppliers', ''),
question_close_date=brief.questions_closed_at.strftime('%d/%m/%Y')
)
subject = "Your opportunity for {} has been published".format(brief.data['title'])
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief published'
)
audit_service.log_audit_event(
audit_type=audit_types.specialist_brief_published,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_specialist_brief_seller_invited_email(brief, invited_supplier):
from app.api.services import audit_service, audit_types # to circumvent circular dependency
if brief.lot.slug != 'specialist':
return
to_addresses = []
if 'contact_email' in invited_supplier.data:
to_addresses = [invited_supplier.data['contact_email']]
elif 'email' in invited_supplier.data:
to_addresses = [invited_supplier.data['email']]
if len(to_addresses) > 0:
number_of_suppliers = int(brief.data['numberOfSuppliers'])
email_body = render_email_template(
'specialist_brief_invite_seller.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id,
brief_organisation=brief.data['organisation'],
brief_close_date=brief.closed_at.strftime('%d/%m/%Y'),
question_close_date=brief.questions_closed_at.strftime('%d/%m/%Y'),
number_of_suppliers=number_of_suppliers,
number_of_suppliers_plural='s' if number_of_suppliers > 1 else ''
)
subject = "You're invited to submit candidates for {}".format(brief.data['title'])
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='seller_invited_to_specialist_opportunity'
)
audit_service.log_audit_event(
audit_type=audit_types.seller_invited_to_specialist_opportunity,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_specialist_brief_closed_email(brief):
from app.api.services import (
audit_service,
audit_types,
brief_responses_service
) # to circumvent circular dependency
if brief.lot.slug != 'specialist':
return
audit_event = audit_service.find(type=audit_types.specialist_brief_closed_email.value,
object_type="Brief",
object_id=brief.id).count()
if (audit_event > 0):
return
responses = brief_responses_service.get_brief_responses(brief.id, None, submitted_only=True)
to_addresses = get_brief_emails(brief)
# prepare copy
email_body = render_email_template(
'specialist_brief_closed.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_name=brief.data['title'],
brief_id=brief.id,
number_of_responses='{}'.format(len(responses)),
number_of_responses_plural='s' if len(responses) > 1 else ''
)
subject = 'Your "{}" opportunity has closed.'.format(brief.data['title'])
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief closed'
)
audit_service.log_audit_event(
audit_type=audit_types.specialist_brief_closed_email,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_brief_clarification_to_buyer(brief, brief_question, supplier):
from app.api.services import (
audit_service,
audit_types
) # to circumvent circular dependency
to_addresses = get_brief_emails(brief)
# prepare copy
email_body = render_email_template(
'brief_question_to_buyer.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_id=brief.id,
brief_name=escape_markdown(brief.data.get('title')),
publish_by_date=brief.closed_at.strftime('%d/%m/%Y'),
message=escape_markdown(brief_question.data.get('question')),
supplier_name=escape_markdown(supplier.name)
)
subject = "You received a new question for ‘{}’".format(brief.data.get('title'))
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief question email sent to buyer'
)
audit_service.log_audit_event(
audit_type=audit_types.sent_brief_question_to_buyer,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_brief_clarification_to_seller(brief, brief_question, to_address):
from app.api.services import (
audit_service,
audit_types
) # to circumvent circular dependency
# prepare copy
email_body = render_email_template(
'brief_question_to_seller.md',
frontend_url=current_app.config['FRONTEND_ADDRESS'],
brief_id=brief.id,
brief_name=escape_markdown(brief.data.get('title')),
brief_organisation=brief.data.get('organisation'),
publish_by_date=brief.questions_closed_at.strftime('%d/%m/%Y'),
message=escape_markdown(brief_question.data.get('question'))
)
subject = "You submitted a question for {} ({}) successfully".format(brief.data.get('title'), brief.id)
send_or_handle_error(
to_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors='brief question email sent to seller'
)
audit_service.log_audit_event(
audit_type=audit_types.sent_brief_question_to_seller,
user='',
data={
"to_addresses": to_address,
"email_body": email_body,
"subject": subject
},
db_object=brief)
def send_opportunity_closed_early_email(brief, current_user):
# to circumvent circular dependencies
from app.api.services import audit_service, audit_types
to_addresses = get_brief_emails(brief)
supplier_code, seller = next(iter(brief.data.get('sellers', {}).items()))
email_body = render_email_template(
'opportunity_closed_early.md',
brief_id=brief.id,
framework=brief.framework.slug,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
possessive="'" if seller['name'].lower().endswith('s') else "'s",
seller_name=escape_markdown(seller['name']),
title=escape_markdown(brief.data['title']),
user=escape_markdown(current_user.name)
)
subject = "'{}' has been closed early".format(brief.data['title'])
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors=audit_types.close_opportunity_early
)
audit_service.log_audit_event(
audit_type=audit_types.sent_opportunity_closed_early_email,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief
)
def send_opportunity_edited_email_to_buyers(brief, current_user, edit):
# to circumvent circular dependencies
from app.api.business.brief import brief_edit_business
from app.api.services import audit_service, audit_types
to_addresses = get_brief_emails(brief)
summary = ''
seller_questions_message = ''
timezone = 'Australia/Canberra'
changes = brief_edit_business.get_changes_made_to_opportunity(brief, edit)
if 'closingDate' in changes:
seller_questions_message = (
'The last day sellers can ask questions is now {}. '.format(
brief.questions_closed_at.in_timezone(timezone).format('DD MMMM YYYY')
) +
'You must answer all relevant questions while the opportunity is live.'
)
summary = '* Closing date changed from {} to {}\n'.format(
pendulum.parse(edit.data['closed_at'], tz=timezone).format('DD MMMM YYYY'),
brief.closed_at.in_timezone(timezone).format('DD MMMM YYYY')
)
if 'title' in changes:
summary += "* Title changed from '{}' to '{}'\n".format(
escape_markdown(edit.data['title']), escape_markdown(brief.data['title'])
)
if 'sellers' in changes:
new_sellers = []
for key, value in changes['sellers']['newValue'].items():
if key not in changes['sellers']['oldValue']:
new_sellers.append(value['name'])
number_of_sellers_invited = len(new_sellers)
seller_or_sellers = 'seller' if number_of_sellers_invited == 1 else 'sellers'
summary += '* {} more {} invited to apply:\n'.format(number_of_sellers_invited, seller_or_sellers)
sorted_sellers = sorted(new_sellers, key=lambda s: s.lower())
for seller in sorted_sellers:
summary += ' * {}\n'.format(
escape_markdown(seller)
)
if 'summary' in changes:
summary += '* Summary was updated\n'
def generate_document_changes(old, new):
text = ''
removed = [x for x in old if x not in new]
added = [x for x in new if x not in old]
if len(removed) > 0:
for x in removed:
text += '* ' + escape_markdown(x) + ' removed\n'
if len(added) > 0:
for x in added:
text += '* ' + escape_markdown(x) + ' added\n'
return text
if 'attachments' in changes:
summary += generate_document_changes(changes['attachments']['oldValue'], changes['attachments']['newValue'])
if 'responseTemplate' in changes:
summary += generate_document_changes(
changes['responseTemplate']['oldValue'], changes['responseTemplate']['newValue']
)
if 'requirementsDocument' in changes:
summary += generate_document_changes(
changes['requirementsDocument']['oldValue'], changes['requirementsDocument']['newValue']
)
email_body = render_email_template(
'opportunity_edited_buyers.md',
brief_id=brief.id,
edit_summary=summary,
framework=brief.framework.slug,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
seller_questions_message=seller_questions_message,
title=escape_markdown(brief.data['title']),
user=escape_markdown(current_user.name)
)
subject = "Updates made to '{}' opportunity".format(brief.data['title'])
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors=audit_types.opportunity_edited
)
audit_service.log_audit_event(
audit_type=audit_types.sent_opportunity_edited_email_to_buyers,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief
)
def send_opportunity_edited_email_to_seller(brief, email_address, buyer):
# to circumvent circular dependencies
from app.api.services import audit_service, audit_types
candidate_message = ''
if brief.lot.slug == 'specialist':
candidate_message = "candidate's "
formatted_closing_date = (
brief.closed_at.in_timezone('Australia/Canberra').format('dddd DD MMMM YYYY at h:mmA (in Canberra)')
)
email_body = render_email_template(
'opportunity_edited_sellers.md',
brief_id=brief.id,
buyer=buyer,
candidate_message=candidate_message,
closing_date=formatted_closing_date,
framework=brief.framework.slug,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
title=escape_markdown(brief.data['title'])
)
subject = "Changes made to '{}' opportunity".format(brief.data['title'])
send_or_handle_error(
email_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors=audit_types.opportunity_edited
)
audit_service.log_audit_event(
audit_type=audit_types.sent_opportunity_edited_email_to_seller,
user='',
data={
"to_addresses": email_address,
"email_body": email_body,
"subject": subject
},
db_object=brief
)
def send_opportunity_withdrawn_email_to_buyers(brief, current_user):
# to circumvent circular dependencies
from app.api.business.brief import brief_business
from app.api.services import audit_service, audit_types
to_addresses = get_brief_emails(brief)
seller_message = ''
invited_seller_codes = list(brief.data.get('sellers', {}).keys())
if brief_business.is_open_to_all(brief):
seller_message = 'We have notified sellers who have drafted or submitted responses to this opportunity'
elif len(invited_seller_codes) == 1:
invited_seller_code = invited_seller_codes.pop()
seller_name = brief.data['sellers'][invited_seller_code]['name']
seller_message = '{} has been notified'.format(seller_name)
else:
seller_message = 'All invited sellers have been notified'
email_body = render_email_template(
'opportunity_withdrawn_buyers.md',
brief_id=brief.id,
framework=brief.framework.slug,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
seller_message=escape_markdown(seller_message),
title=escape_markdown(brief.data['title']),
user=escape_markdown(current_user.name),
withdrawal_reason=escape_markdown(brief.data['reasonToWithdraw'])
)
subject = "'{}' ({}) is withdrawn from the Digital Marketplace".format(
brief.data['title'],
brief.id
)
send_or_handle_error(
to_addresses,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors=audit_types.withdraw_opportunity
)
audit_service.log_audit_event(
audit_type=audit_types.sent_opportunity_withdrawn_email_to_buyers,
user='',
data={
"to_addresses": ', '.join(to_addresses),
"email_body": email_body,
"subject": subject
},
db_object=brief
)
def send_opportunity_withdrawn_email_to_seller(brief, email_address, buyer):
# to circumvent circular dependencies
from app.api.services import audit_service, audit_types
email_body = render_email_template(
'opportunity_withdrawn_sellers.md',
brief_id=brief.id,
buyer=buyer,
framework=brief.framework.slug,
frontend_url=current_app.config['FRONTEND_ADDRESS'],
title=escape_markdown(brief.data['title']),
withdrawal_reason=escape_markdown(brief.data['reasonToWithdraw'])
)
subject = "'{}' ({}) is withdrawn from the Digital Marketplace".format(
brief.data['title'],
brief.id
)
send_or_handle_error(
email_address,
email_body,
subject,
current_app.config['DM_GENERIC_NOREPLY_EMAIL'],
current_app.config['DM_GENERIC_SUPPORT_NAME'],
event_description_for_errors=audit_types.withdraw_opportunity
)
audit_service.log_audit_event(
audit_type=audit_types.sent_opportunity_withdrawn_email_to_seller,
user='',
data={
"to_addresses": email_address,
"email_body": email_body,
"subject": subject
},
db_object=brief
)
def get_brief_emails(brief):
to_addresses = [user.email_address for user in brief.users if user.active]
to_addresses = to_addresses + [
tb.user.email_address
for tb in brief.team_briefs
if tb.user.active and tb.team.status == 'completed']
to_addresses = to_addresses + [
tb.team.email_address
for tb in brief.team_briefs
if tb.team.status == 'completed' and tb.team.email_address
]
return to_addresses
|
from downloader import download
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
def main(file, out):
in_file = open(file, "r")
out_file = open(out, "w")
for l in in_file.readlines():
l_split = l.split("\t")
# if it's a known IDS attack OR normal traffic
if (l_split[14] != "0" and l_split[17] == "-1") or (l_split[17] == "1"):
out_file.write('\t'.join(map(str, l_split)))
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Pre process the Kyoto database. This script gets the unmodified data and outputs a csv file where "
"only IDS detection and normal traffic are present. Additionally, it's possible to normalize, "
"binarize and remove attributes. It's also possible to remove instances belonging to classes that "
"have less than certain amount of occurrences.")
parser.add_argument("file", help="input file")
parser.add_argument("out", help="output file")
args = parser.parse_args()
main(args.file, args.out)
pass
|
from __future__ import unicode_literals
import youtube_dl
ydl = youtube_dl.YoutubeDL({'format': 'mp4'})
def download(url):
ydl.download([url])
|
import pkg_resources
from timeseries.timeseries import *
from timeseries.lazy import *
try:
__version__ = pkg_resources.get_distribution(__name__).version
except:
__version__ = 'unknown'
|
from time import time
import numpy as np
import cv2
import math
from pydarknet import Detector, Image
from constants import *
if CAPTURE_MODE == "ZED_SDK":
import pyzed.sl as sl
zed = sl.Camera()
init_params = sl.InitParameters()
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init_params.coordinate_units = sl.UNIT.METER
err = zed.open(init_params)
if err != sl.ERROR_CODE.SUCCESS:
print("ERROR OPENING ZED CAMERA WITH SDK")
exit(1)
image = sl.Mat()
depth_map = sl.Mat()
point_cloud = sl.Mat()
runtime_parameters = sl.RuntimeParameters()
elif CAPTURE_MODE == "OPENCV":
cam = cv2.VideoCapture(CAMERA_INDEX)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, OPENCV_CAMERA_WIDTH)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, OPENCV_CAMERA_HEIGHT)
if DETECTION_MODE == "YOLO":
net = Detector(bytes(YOLO_PATH_CONFIG[YOLO_VERSION], encoding="utf-8"), bytes(YOLO_PATH_WEIGHTS[YOLO_VERSION], encoding="utf-8"), 0, bytes(YOLO_PATH_DATA[YOLO_VERSION],encoding="utf-8"))
def get_frame():
if CAPTURE_MODE == "ZED_SDK":
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# A new image is available if grab() returns SUCCESS
zed.retrieve_image(image, sl.VIEW.LEFT)
return cv2.cvtColor(image.get_data(), cv2.COLOR_RGBA2RGB)
else:
_, frame = cam.read()
if FLIP_IMG:
frame = cv2.flip(frame, 0)
frame = cv2.flip(frame, 1)
return frame[:,:OPENCV_FRAME_WIDTH_CUT,:] # cut half left of image
def detect(frame):
img_darknet = Image(frame)
results = net.detect(img_darknet, thresh=DETECTION_THRESHOLD)
return [[x,y,w,h,cat,score] for cat, score, (x, y, w, h) in results]
def get_mono_pos(rects):
cones_blue = [[],[]]
cones_yellow = [[],[]]
print_info = ""
for rect in rects:
y = (CONE_HEIGHT * FOCAL) / (rect[3]*PIXEL_SIZE)
u = (rect[0]-U_OFFSET)
x = u*PIXEL_SIZE*y/FOCAL
v = (rect[1]-V_OFFSET)
z = v*PIXEL_SIZE*y/FOCAL
print_info += f"Cone height:{rect[3]} px \n"
print_info += f"Cone x:{x/10} cm, y:{y/10} cm \n"
x /= 1000
x += X_OFFSET
y /= 1000
z /= 1000
ratio = rect[2]/rect[3]
if MIN_RATIO <= ratio <= MAX_RATIO:
if MIN_DISTANCE <= y <= MAX_DISTANCE:
if not math.isinf(x) or not math.isinf(y) or not math.isnan(x) or not math.isnan(y):
if rect[4] == b'blue_cone':
cones_blue[0].append([x,y])
cones_blue[1].append(rect)
elif rect[4] == b'yellow_cone':
cones_yellow[0].append([x,y])
cones_yellow[1].append(rect)
else:
ignored_by_y_dist += 1
else:
ignored_by_ratio += 1
print_info += f"Input:\n Cones Detected:{len(rects):>3}\n"
print_info += f" Cones Returned:\n Blue:{len(cones_blue):>5}\n Yellow:{len(cones_yellow):>3}\n"
print_info += f" Ignored cones:\n Ratio:{ignored_by_ratio:>4}\n Y Dist:{ignored_by_y_dist:>3}\n"
return cones_blue, cones_yellow, print_info
def get_stereo_pos(rects):
cones_blue = [[],[]]
cones_yellow = [[],[]]
print_info = ""
for rect in rects:
zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
#zed.retrieve_measure(depth_map, sl.MEASURE.DEPTH) # Retrieve depth
err, point3D = point_cloud.get_value(rect[0],rect[1])
if err == sl.ERROR_CODE.SUCCESS:
x = point3D[0]
z = point3D[1]
y = point3D[2]
if not math.isinf(x) and not math.isinf(y) and not math.isnan(x) and not math.isnan(y):
if rect[4] == b'blue_cone' or rect[4] == b'BLUE_CONE':
cones_blue[0].append([y,-x + X_OFFSET ])
cones_blue[1].append(rect)
elif rect[4] == b'yellow_cone' or rect[4] == b'YELLOW_CONE':
cones_yellow[0].append([y,-x + X_OFFSET])
cones_yellow[1].append(rect)
return cones_blue, cones_yellow, print_info
def get_pos(rects):
if MESURMENT_MODE == "MONO":
cones_blue, cones_yellow, print_info = get_mono_pos(rects)
elif MESURMENT_MODE == "STEREO":
cones_blue, cones_yellow, print_info = get_stereo_pos(rects)
return cones_blue, cones_yellow, print_info
def run():
frame_time_start = time()
frame = get_frame()
frame_time = (time() - frame_time_start) * 1000
detect_time_start = time()
rects = detect(frame)
detect_time = (time() - detect_time_start) * 1000
img = frame.copy()
frame_time_start = time()
cones_blue, cones_yellow, print_info = get_pos(rects)
get_pos_time = (time() - frame_time_start) * 1000
if SHOW_IMG:
if len(cones_blue[0]):
for i in range(len(cones_blue[0])):
x,y = cones_blue[0][i]
u,v,w,h,cat,_ = cones_blue[1][i]
cv2.rectangle(img, (int(u - w / 2), int(v - h / 2)), (int(u + w / 2), int(v + h / 2)), (255, 0, 0), thickness=2)
cv2.putText(img, str(f"x:{x:.2f} m"),(int(u),int(v)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
cv2.putText(img, str(f"y:{y:.2f} m"),(int(u),int(v+30)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
if len(cones_yellow[0]):
for i in range(len(cones_yellow[0])):
x,y = cones_yellow[0][i]
u,v,w,h,cat,_ = cones_yellow[1][i]
cv2.rectangle(img, (int(u - w / 2), int(v - h / 2)), (int(u + w / 2), int(v + h / 2)), (0, 255, 255), thickness=2)
cv2.putText(img, str(f"x:{x:.2f} m"),(int(u),int(v)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
cv2.putText(img, str(f"y:{y:.2f} m"),(int(u),int(v+30)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
print_info += f" Times:\n Frame:{frame_time:>7.0f} ms\n Detect:{detect_time:>6.0f} ms\n get_pos:{get_pos_time:>5.0f} ms\n\n"
return np.array(cones_blue[0]), np.array(cones_yellow[0]), print_info, img
if __name__ == "__main__":
print("Starting test")
blue, yellow , print_info, img = run()
print(f"blue cones: {blue}\nyellow cones: {yellow}\n")
|
import thingspeak
import schedule
from twitter_bot_module import Twitter_Bot
from gpiozero import CPUTemperature
from time import sleep, strftime
# Declare CPU, thingspeak and local variables
cpu = CPUTemperature()
channel_id = 1357661
write_key = '037QCFSLSVG4MFMZ'
# read_key = 'TZ9SADGMTGS7B3M0'
degree_sign = u"\N{DEGREE SIGN}"
twitter = Twitter_Bot()
warning_dict = {}
# Define write_temp() function to write temperature to thingspeak
def write_temp(temp, channel_write):
try:
response = channel_write.update({'field1': temp})
print(temp, strftime("%Y-%m-%d %H:%M:%S"))
except:
print("Connection Failed")
# Define log_temp() function to get the temperature and log temp to twitter and thingspeak
def log_temp(tweets):
temp = cpu.temperature
time = strftime("%Y-%m-%d %H:%M:%S")
write_temp(temp, channel_write)
if temp >= 60:
out_string = f"Warning! High CPU Temperature: {temp}{degree_sign}C recorded on {time}"
print(out_string)
tweets.tweet(out_string)
warning_dict.update({time: temp})
# Define daily_brief() function to make a daily brief string and send it to twitter
def daily_brief(warning, tweets):
min_temp = 10000
max_temp = -10000
out_string = f"{len(warning)} warnings in total today\n"
if len(warning) != 0:
for time_out, temp in warning.items():
if temp < min_temp:
min_temp = temp
if temp > max_temp:
max_temp = temp
out_string += f"Max Warning Temperature Today: {max_temp}{degree_sign}C\n"
out_string += f"Min Warning Temperature Today: {min_temp}{degree_sign}C\n"
tweets.tweet(out_string)
print(out_string)
# Define good_bye function to terminate the program
def good_bye(tweets):
# tweets.tweet("Farewell!")
print("Farewell!")
quit()
# Use schedule to log temperature every 10 mins, report daily brief at 23:50 and close the program at 23:51 everyday
schedule.every(10).minutes.do(log_temp, tweets=twitter)
schedule.every().day.at("23:50:00").do(daily_brief, warning=warning_dict, tweets=twitter)
schedule.every().day.at("23:51:00").do(good_bye, tweets=twitter)
# main program
if __name__ == "__main__":
print("Hello World!")
channel_write = thingspeak.Channel(id=channel_id, api_key=write_key)
# channel_read = thingspeak.Channel(id=channel_id, api_key=read_key)
while True:
schedule.run_pending()
sleep(1)
|
class AccountPermissions(object):
MUTATE_DATA = 1
permissions = {
MUTATE_DATA: {
"name": "MUTATE_DATA",
"description": "Can edit any site data. TODO: make this work.",
},
}
|
# GlitchyGames
# palette: Manages the custom color palette file format used by the engine
import configparser
import os.path
import sys
from pygame import Color
VGA = 'vga'
SYSTEM = 'system'
NES = 'nes'
class ColorPalette:
_BUILTIN_PALETTE_LOCATION = os.path.join(os.path.dirname(__file__), 'resources')
_DEFAULT_EXTENSION = 'palette'
def __init__(self, colors=None, filename=None):
if colors:
self._colors = colors
elif filename:
script_path = os.path.dirname(sys.argv[0])
paths = [self._BUILTIN_PALETTE_LOCATION,
script_path,
os.path.join(script_path, 'resources')
]
for path in paths:
file_path = os.path.join(path, f'{filename}.{self._DEFAULT_EXTENSION}')
if os.path.exists(file_path):
self._colors = PaletteUtility.load_palette_from_file(file_path)
break
else:
self._colors = []
self._size = len(self._colors) - 1
def get_color(self, palette_index):
"""Returns PyGame Color at index"""
return self._colors[palette_index] if palette_index <= self._size else None
def set_color(self, palette_index, new_color):
"""Sets the indexed color to the new PyGame Color"""
if palette_index < self._size:
self._colors[palette_index] = new_color
else:
self._colors.append(new_color)
class PaletteUtility:
@staticmethod
def load_palette_from_config(config):
"""Load a palette from a ConfigParser object. Returns a list of PyGame Colors"""
colors = []
for color_index in range(int(config['default']['colors'])):
color_index = str(color_index)
tmp_color = Color(
config[color_index].getint('red'),
config[color_index].getint('green'),
config[color_index].getint('blue'),
config[color_index].getint('alpha', 255)
)
colors.append(tmp_color)
return colors
@staticmethod
def load_palette_from_file(config_file_path):
"""Load a palette from a GlitchyGames palette file. Returns a list of PyGame Colors"""
config = configparser.ConfigParser()
# Read contents of file and close after
with open(config_file_path) as file_obj:
config.read_file(file_obj)
return PaletteUtility.load_palette_from_config(config)
@staticmethod
def write_palette_to_file(config_data, output_file):
""" Write a GlitchyGames palette to a file"""
with open(output_file, 'w') as file_obj:
config_data.write(file_obj)
@staticmethod
def parse_rgb_data_in_file(rgb_data_file):
"""Read RGB data from a file. Returns a list of PyGame Colors"""
# Read input RGBA Values from file. No duplicates
colors = []
with open(rgb_data_file) as file_obj:
for line in file_obj.readlines():
tmp = [int(x) for x in line.strip().split(',')]
color = Color(*tmp)
if color not in colors:
colors.append(color)
return colors
@staticmethod
def create_palette_data(colors):
"""Create a ConfigParser object containing palette data from a list of PyGame Colors. Returns a ConfigParser"""
palette_data = configparser.ConfigParser()
palette_data['default'] = {"colors": str(len(colors))}
for count, color in enumerate(colors):
palette_data[str(count)] = {
"red": color.r,
"green": color.g,
"blue": color.b,
"alpha": color.a
}
return palette_data
# A Custom Color palette with named colors
class Default(ColorPalette):
"""A default set of colors used for Glitchy Games Examples"""
def __init__(self):
super().__init__(filename='default')
self.YELLOW = self.get_color(0)
self.PURPLE = self.get_color(1)
self.BLUE = self.get_color(2)
self.GREEN = self.get_color(3)
self.WHITE = self.get_color(4)
self.BLACK = self.get_color(5)
self.BLACKLUCENT = self.get_color(6)
self.BLUELUCENT = self.get_color(7)
self.RED = self.get_color(8)
class System(ColorPalette):
"""A palette representing the 16 default system colors"""
def __init__(self):
super().__init__(filename=SYSTEM)
self.BLACK = self.get_color(0)
self.MAROON = self.get_color(1)
self.GREEN = self.get_color(2)
self.OLIVE = self.get_color(3)
self.NAVY = self.get_color(4)
self.PURPLE = self.get_color(5)
self.TEAL = self.get_color(6)
self.SILVER = self.get_color(7)
self.GREY = self.get_color(8)
self.RED = self.get_color(9)
self.LIME = self.get_color(10)
self.YELLOW = self.get_color(11)
self.BLUE = self.get_color(12)
self.MAGENTA = self.get_color(13)
self.CYAN = self.get_color(14)
self.WHITE = self.get_color(15)
class Vga(ColorPalette):
"""The 256 VGA color palette"""
def __init__(self):
super().__init__(filename=VGA)
# TODO: Set Color Names (See rich.color for list of names to poach)
|
'''
datetime_utilities.py
========================
Basic functions to make dealing with Dates and Times easier
datetime_utilities - Module Contents
+++++++++++++++++++++++++++++++
'''
import pytz
class tzAlias(object):
'''
Enum like objec to organize pytz time zones. They are called based on strings, so this will make it easiere in the IDE
'''
eastern=pytz.timezone('US/Eastern')
central=pytz.timezone('US/Central')
pacific=pytz.timezone('US/Pacific')
london=pytz.timezone('Europe/London')
paris=pytz.timezone('Europe/Paris')
utc=pytz.UTC
def isAware(dtObject):
'''
determines if a datetime.datetime or datetime.time object is aware or naive
'''
if hasattr(dtObject,'tzinfo') and not dtObject.tzinfo is None and not dtObject.tzinfo.utcoffset(dtObject) is None:
return(True)
return(False)
def modify_time_zone(dtObject,time_zone=pytz.UTC, old_time_zone=None):
'''
adjusts the time zone on a date time objects.
accepts both aware and unaware objects
For unaware objects it uses the time as 'tacks on' the time zone
For aware objects it translates the time from the old to the new
'''
if time_zone is None:
return dtObject
if isAware(dtObject):
output=time_zone.normalize(dtObject)
else:
if old_time_zone is None:
output=time_zone.localize(dtObject)
else:
output=time_zone.normalize(old_time_zone.localize(dtObject))
return(output)
|
from django.contrib import admin
from example_app.models import Secret
admin.site.register(Secret)
|
from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='ingreedypy',
py_modules=['ingreedypy'],
version='1.3.5',
description='ingreedy-py parses recipe ingredient lines into a object',
long_description=long_description,
long_description_content_type='text/markdown',
author='Scott Cooper',
author_email='scttcper@gmail.com',
url='https://github.com/openculinary/ingreedy-py',
keywords=['ingreedy', 'ingreedypy', 'recipe', 'parser'],
install_requires=[
'parsimonious'
],
extras_require={
'tests': [
'pytest',
'pytest-cov',
]
},
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
# Autogenerated constants for Arcade Gamepad service
from enum import IntEnum
from jacdac.constants import *
from jacdac.system.constants import *
JD_SERVICE_CLASS_ARCADE_GAMEPAD = const(0x1deaa06e)
class ArcadeGamepadButton(IntEnum):
LEFT = const(0x1)
UP = const(0x2)
RIGHT = const(0x3)
DOWN = const(0x4)
A = const(0x5)
B = const(0x6)
MENU = const(0x7)
SELECT = const(0x8)
RESET = const(0x9)
EXIT = const(0xa)
JD_ARCADE_GAMEPAD_REG_BUTTONS = const(JD_REG_READING)
JD_ARCADE_GAMEPAD_REG_AVAILABLE_BUTTONS = const(0x180)
JD_ARCADE_GAMEPAD_EV_DOWN = const(JD_EV_ACTIVE)
JD_ARCADE_GAMEPAD_EV_UP = const(JD_EV_INACTIVE)
JD_ARCADE_GAMEPAD_PACK_FORMATS = {
JD_ARCADE_GAMEPAD_REG_BUTTONS: "r: u8 u0.8",
JD_ARCADE_GAMEPAD_REG_AVAILABLE_BUTTONS: "r: u8",
JD_ARCADE_GAMEPAD_EV_DOWN: "u8",
JD_ARCADE_GAMEPAD_EV_UP: "u8"
}
|
import os
import typing
import tornado.web
from tornado_swagger._builders import generate_doc_from_endpoints
from tornado_swagger._handlers import SwaggerSpecHandler, SwaggerUiHandler
STATIC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "swagger_ui"))
def export_swagger(
routes: typing.List[tornado.web.URLSpec],
*,
api_base_url: str = "/",
description: str = "Swagger API definition",
api_version: str = "1.0.0",
title: str = "Swagger API",
contact: str = "",
schemes: list = None,
security_definitions: dict = None,
security: list = None,
):
"""Export swagger schema as dict"""
return generate_doc_from_endpoints(
routes,
api_base_url=api_base_url,
description=description,
api_version=api_version,
title=title,
contact=contact,
schemes=schemes,
security_definitions=security_definitions,
security=security,
)
def setup_swagger(
routes: typing.List[tornado.web.URLSpec],
*,
swagger_url: str = "/api/doc",
api_base_url: str = "/",
description: str = "Swagger API definition",
api_version: str = "1.0.0",
title: str = "Swagger API",
contact: str = "",
schemes: list = None,
security_definitions: dict = None,
security: list = None,
display_models: bool = True,
):
"""Inject swagger ui to application routes"""
swagger_schema = generate_doc_from_endpoints(
routes,
api_base_url=api_base_url,
description=description,
api_version=api_version,
title=title,
contact=contact,
schemes=schemes,
security_definitions=security_definitions,
security=security,
)
_swagger_ui_url = "/{}".format(swagger_url) if not swagger_url.startswith("/") else swagger_url
_base_swagger_ui_url = _swagger_ui_url.rstrip("/")
_swagger_spec_url = "{}/swagger.json".format(_swagger_ui_url)
routes[:0] = [
tornado.web.url(_swagger_ui_url, SwaggerUiHandler),
tornado.web.url("{}/".format(_base_swagger_ui_url), SwaggerUiHandler),
tornado.web.url(_swagger_spec_url, SwaggerSpecHandler),
]
SwaggerSpecHandler.SWAGGER_SPEC = swagger_schema
with open(os.path.join(STATIC_PATH, "ui.html"), "r") as f:
SwaggerUiHandler.SWAGGER_HOME_TEMPLATE = (
f.read().replace("{{ SWAGGER_URL }}", _swagger_spec_url).replace("{{ DISPLAY_MODELS }}", str(-1 if not display_models else 1))
)
|
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0 for _ in range(len(cost))]
dp[0] = cost[0]
dp[1] = cost[1]
for k in range(2, len(cost)):
dp[k] = min(dp[k-1], dp[k-2]) + cost[k]
return min(dp[-1], dp[-2])
|
import json
import requests
import warnings
from .models import University
def _deprecated(msg):
warnings.simplefilter('always')
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default')
class API(object):
"""API object for making requests to the university database."""
endpoint = r"http://universities.hipolabs.com/search"
def __init__(self, encoding='utf-8'):
"""
Initialize the API object, optionally specifying the encoding
to use for Python 2.
:param str encoding: encoding to use when using Python 2
"""
self.encoding = encoding
self.session = requests.Session()
def search(self, name="", domain="", country_code="", country=""):
"""
Search for a university in the database. Each available option
can be used to narrow down saerch results.
:param str name: The name of the university.
:param str domain: The domain the university uses.
:param str country_code: DEPRECATED, DOES NOTHING
:param str country: The country of the university.
:rtype: generator of models.University objects
"""
parameters = dict()
if any([name, domain, country_code, country]):
if name:
parameters["name"] = name
if domain:
parameters["domain"] = domain
if country_code:
_deprecated("Country code filters have no function for now.")
parameters["alpha_two_code"] = country_code
if country:
parameters["country"] = country
university_data = self.session.get(
self.endpoint,
params=parameters
).json()
for data in university_data:
yield University(self.encoding, json=data)
def lucky(self, name="", domain="", country_code="", country=""):
"""
Search for a university in the database, and only return the
first result. This is simply a wrapper on search() that takes
the resulting generator and returns the first element if it
exists. Each available option can be used to narrow down search
results.
:param str name: The name of the university.
:param str domain: The domain the university uses.
:param str country_code: DEPRECATED, DOES NOTHING
:param str country: The country of the university.
:rtype: A models.University object
"""
attempt = self.search(name, domain, country_code, country)
try:
return next(attempt)
except StopIteration:
return None
def get_all(self):
"""
Return a generator containing all university data. This is
simply a wrapper on search() which does not do any filtering.
:rtype: generator of models.University objects
"""
return self.search()
def __del__(self):
self.session.close()
|
"""
A federated learning training session using FEI.
"""
import logging
import fei_agent
import fei_client
import fei_server
import fei_trainer
def main():
""" A Plato federated learning training session using the FEI algorithm. """
logging.info("Starting RL Environment's process.")
trainer = fei_trainer.Trainer()
client = fei_client.Client(trainer=trainer)
agent = fei_agent.RLAgent()
server = fei_server.RLServer(agent=agent, trainer=trainer)
server.run(client)
if __name__ == "__main__":
main()
|
# Menu
# PROBABLY WONT COMPILE
from consolemenu import *
from consolemenu.items import *
from colorama import *
from termcolor import colored
from geo_scraper.site_scrapers import humdata_check_filter, humdata_scraper
print(colored(ascii-art.txt, 'green', 'on_red'))
menu = ConsoleMenu("chimera", "interface")
humdata_search_item = MenuItem("-h -s")
humdata_check_item = MenuItem("-h -c")
humdata_scraper_item = MenuItem("scrape -h")
diva_scraper_item = MenuItem("scrape -d")
scrape_sites_item = MenuItem("scrape -s")
function_humdata_search = FunctionItem("humdata_search_filter(humdata_search_item, section, driver)")
function_humdata_check = FunctionItem("humdata_check_filter(humdata_check_item, section, driver)")
function_humdata_scraper = FunctionItem("humdata_scraper(website, countrylist, organizationlist, filetypelist, taglist)")
function_diva_scraper = FunctionItem("diva_scraper_(website, countrylist, taglist)")
function_scrape_sites = FunctionItem("scrape_sites(countrylist=['Denmark', 'Albania'], filetypelist=['CSV','Shapefile'], organizationlist=['Facebook','WorldPop'], taglist=['Roads', 'Inland water', 'weather and climate'], querylist=[], sitelist=['https://data.humdata.org/search?ext_geodata=1&q=&ext_page_size=25', 'https://www.diva-gis.org/GData'])")
command_humdata_search = CommandItem("Command", "touch out.txt")
command_humdata_check = CommandItem("Command", "touch out.txt")
command_humdata_scraper = CommandItem("Command", "touch out.txt")
command_diva_scraper = CommandItem("Command", "touch out.txt")
command_scrape_sites = CommandItem("Command", "touch out.txt")
selection_menu = SelectionMenu(["-h", "-s", "object"])
selection_menu = SelectionMenu(["-h", "-c", "object"])
selection_menu = SelectionMenu(["scrape", "-h", "object"])
selection_menu = SelectionMenu(["scrape", "-s", "object"])
selection_menu = SelectionMenu(["scrape", "-d", "object"])
menu.append_item(humdata_search_item)
menu.append_item(humdata_check_item)
menu.append_item(humdata_scraper_item)
menu.append_item(diva_scraper_item)
menu.append_item(scrape_sites_item)
menu.append_item(function_humdata_search)
menu.append_item(function_humdata_check)
menu.append_item(function_humdata_scraper)
menu.append_item(function_diva_scraper)
menu.append_item(function_scrape_sites)
menu.show()
|
import urllib.parse
import async_timeout
import aiohttp
import asyncio
import logging
import voluptuous as vol
import homeassistant.util as util
import voluptuous as vol
from datetime import timedelta
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=3)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
from homeassistant.helpers import config_validation as cv
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_TURN_ON,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_SET,
)
from homeassistant.components.media_player import (
MediaPlayerDevice
)
from homeassistant.const import (
CONF_NAME,
CONF_HOST,
STATE_IDLE,
STATE_ON,
STATE_OFF
)
MULTI_ROOM_SOURCE_TYPE = [
'hdmi1',
'hdmi2',
'optical',
'bt',
'wifi'
]
BOOL_OFF = 'off'
BOOL_ON = 'on'
TIMEOUT = 10
SUPPORT_SAMSUNG_MULTI_ROOM = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE
CONF_MAX_VOLUME = 'max_volume'
CONF_PORT = 'port'
CONF_POWER_OPTIONS = 'power_options'
PLATFORM_SCHEMA = vol.Schema({
vol.Optional('platform', default='samsung_multi_room'): cv.string,
vol.Optional(CONF_NAME, default='soundbar'): cv.string,
vol.Optional(CONF_HOST, default='127.0.0.1'): cv.string,
vol.Optional(CONF_PORT, default='55001'): cv.string,
vol.Optional(CONF_MAX_VOLUME, default='100'): cv.string
vol.Optional(CONF_POWER_OPTIONS, default=True): cv.boolean
})
class MultiRoomApi():
def __init__(self, ip, port, session, hass):
self.session = session
self.hass = hass
self.ip = ip
self.port = port
self.endpoint = 'http://{0}:{1}'.format(ip, port)
async def _exec_cmd(self, cmd, key_to_extract):
import xmltodict
query = urllib.parse.urlencode({ "cmd": cmd }, quote_via=urllib.parse.quote)
url = '{0}/UIC?{1}'.format(self.endpoint, query)
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
_LOGGER.debug("Executing: {} with cmd: {}".format(url, cmd))
response = await self.session.get(url)
data = await response.text()
_LOGGER.debug(data)
response = xmltodict.parse(data)
if key_to_extract in response['UIC']['response']:
return response['UIC']['response'][key_to_extract]
else:
return None
async def _exec_get(self, action, key_to_extract):
return await self._exec_cmd('<name>{0}</name>'.format(action), key_to_extract)
async def _exec_set(self, action, property_name, value):
if type(value) is str:
value_type = 'str'
else:
value_type = 'dec'
cmd = '<name>{0}</name><p type="{3}" name="{1}" val="{2}"/>'.format(action, property_name, value, value_type)
return await self._exec_cmd(cmd, property_name)
async def get_main_info(self):
return await self._exec_get('GetMainInfo')
async def get_volume(self):
return int(await self._exec_get('GetVolume', 'volume'))
async def set_volume(self, volume):
return await self._exec_set('SetVolume', 'volume', int(volume))
async def get_speaker_name(self):
return await self._exec_get('GetSpkName', 'spkname')
async def get_muted(self):
return await self._exec_get('GetMute', 'mute') == BOOL_ON
async def set_muted(self, mute):
if mute:
return await self._exec_set('SetMute', 'mute', BOOL_ON)
else:
return await self._exec_set('SetMute', 'mute', BOOL_OFF)
async def get_source(self):
return await self._exec_get('GetFunc', 'function')
async def set_source(self, source):
return await self._exec_set('SetFunc', 'function', source)
async def get_state(self):
result = await self._exec_get('GetPowerStatus', '<powerStatus>(.*?)</powerStatus>')
if result:
return result[0]
return 0
async def set_state(self, key):
return await self._exec_set('SetPowerStatus', 'powerStatus', int(key))
class MultiRoomDevice(MediaPlayerDevice):
def __init__(self, name, max_volume, power_options, api):
_LOGGER.info('Initializing MultiRoomDevice')
self._name = name
self.api = api
self._state = STATE_OFF
self._current_source = None
self._volume = 0
self._muted = False
self._max_volume = max_volume
self._power_options = power_options
@property
def supported_features(self):
return SUPPORT_SAMSUNG_MULTI_ROOM | SUPPORT_TURN_OFF | SUPPORT_TURN_ON
@property
def name(self):
return self._name
@property
def state(self):
return self._state
async def turn_on(self):
await self.api.set_state(1)
async def turn_off(self):
await self.api.set_state(0)
@property
def volume_level(self):
return self._volume
async def async_set_volume_level(self, volume):
await self.api.set_volume(volume * self._max_volume)
await self.async_update()
@property
def source(self):
return self._current_source
@property
def source_list(self):
return sorted(MULTI_ROOM_SOURCE_TYPE)
async def async_select_source(self, source):
await self.api.set_source(source)
await self.async_update()
@property
def is_volume_muted(self):
return self._muted
async def async_mute_volume(self, mute):
self._muted = mute
await self.api.set_muted(self._muted)
await self.async_update()
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
async def async_update(self):
_LOGGER.info('Refreshing state...')
state = await self.api.get_state()
if state and int(state) == 1:
"If Power is ON, update other values"
self._state = STATE_ON
self._current_source = await self.api.get_source()
self._volume = await self.api.get_volume() / self._max_volume
self._muted = await self.api.get_muted()
else:
self._state = STATE_OFF
def setup_platform(hass, config, add_devices, discovery_info=None):
_LOGGER.error('Setup of the soundbar')
ip = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
max_volume = int(config.get(CONF_MAX_VOLUME))
session = async_get_clientsession(hass)
api = MultiRoomApi(ip, port, session, hass)
add_devices([MultiRoomDevice(name, max_volume, power_options, api)], True)
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import netaddr
from nova import context
from nova import db
from nova.objects import base
from nova.objects import instance
from nova.objects import security_group
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.objects import test_instance_fault
from nova.tests.objects import test_objects
class _TestInstanceObject(object):
@property
def fake_instance(self):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['deleted'] = False
fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
fake_instance['security_groups'] = None
return fake_instance
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
timeutils.isotime(datetime.datetime(1955, 11, 5)))
inst = instance.Instance()
inst.uuid = 'fake-uuid'
inst.launched_at = red_letter_date
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0',
'nova_object.data':
{'uuid': 'fake-uuid',
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['uuid', 'launched_at']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertTrue(isinstance(inst2.launched_at,
datetime.datetime))
self.assertEqual(inst2.launched_at, red_letter_date)
def test_ip_deserialization(self):
inst = instance.Instance()
inst.uuid = 'fake-uuid'
inst.access_ip_v4 = '1.2.3.4'
inst.access_ip_v6 = '::1'
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0',
'nova_object.data':
{'uuid': 'fake-uuid',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertTrue(isinstance(inst2.access_ip_v4, netaddr.IPAddress))
self.assertTrue(isinstance(inst2.access_ip_v6, netaddr.IPAddress))
self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
def test_get_without_expected(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, 'uuid', columns_to_join=[]
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, 'uuid')
# Make sure these weren't loaded
for attr in instance.INSTANCE_OPTIONAL_FIELDS:
attrname = base.get_attrname(attr)
self.assertFalse(hasattr(inst, attrname))
self.assertRemotes()
def test_get_with_expected(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(
ctxt, 'uuid',
columns_to_join=['metadata', 'system_metadata']
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(
ctxt, 'uuid', expected_attrs=instance.INSTANCE_OPTIONAL_FIELDS)
for attr in instance.INSTANCE_OPTIONAL_FIELDS:
attrname = base.get_attrname(attr)
self.assertTrue(hasattr(inst, attrname))
self.assertRemotes()
def test_get_by_id(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(ctxt, 'instid', columns_to_join=[]
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_id(ctxt, 'instid')
self.assertEqual(inst.uuid, self.fake_instance['uuid'])
self.assertRemotes()
def test_load(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
system_metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(ctxt, fake_uuid,
columns_to_join=['system_metadata']
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
self.assertFalse(hasattr(inst, '_system_metadata'))
sys_meta = inst.system_metadata
self.assertEqual(sys_meta, {'foo': 'bar'})
self.assertTrue(hasattr(inst, '_system_metadata'))
# Make sure we don't run load again
sys_meta2 = inst.system_metadata
self.assertEqual(sys_meta2, {'foo': 'bar'})
self.assertRemotes()
def test_get_remote(self):
# isotime doesn't have microseconds and is always UTC
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_instance = self.fake_instance
db.instance_get_by_uuid(ctxt, 'fake-uuid', columns_to_join=[]
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, 'fake-uuid')
self.assertEqual(inst.id, fake_instance['id'])
self.assertEqual(inst.launched_at, fake_instance['launched_at'])
self.assertEqual(str(inst.access_ip_v4),
fake_instance['access_ip_v4'])
self.assertEqual(str(inst.access_ip_v6),
fake_instance['access_ip_v6'])
self.assertRemotes()
def test_refresh(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
self.assertEqual(inst.host, 'orig-host')
inst.refresh()
self.assertEqual(inst.host, 'new-host')
self.assertRemotes()
self.assertEqual(set([]), inst.obj_what_changed())
def test_save(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance, host='oldhost')
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
db.instance_update_and_get_original(
ctxt, fake_uuid, {'user_data': 'foo'}).AndReturn(
(fake_inst, dict(fake_inst, host='newhost')))
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
inst.user_data = 'foo'
inst.save()
self.assertEqual(inst.host, 'newhost')
def test_get_deleted(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertEqual(inst.deleted, True)
def test_with_info_cache(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['info_cache'] = {'network_info': 'foo',
'instance_uuid': fake_uuid}
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
db.instance_info_cache_update(ctxt, fake_uuid,
{'network_info': 'bar'})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
self.assertEqual(inst.info_cache.network_info,
fake_inst['info_cache']['network_info'])
self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
inst.info_cache.network_info = 'bar'
inst.save()
def test_with_security_groups(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
db.security_group_update(ctxt, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
self.assertEqual(len(inst.security_groups), 2)
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
inst.security_groups[index][key])
self.assertTrue(isinstance(inst.security_groups[index],
security_group.SecurityGroup))
self.assertEqual(inst.security_groups.obj_what_changed(), set())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(inst.security_groups.obj_what_changed(), set())
def test_with_fault(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(ctxt, [fake_uuid]).AndReturn(
{fake_uuid: fake_faults})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
self.assertRemotes()
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(instance.Instance, 'name', 'foo')
inst = instance.Instance()
inst.uuid = 'fake-uuid'
self.assertEqual(inst.items(),
{'uuid': 'fake-uuid',
'name': 'foo',
}.items())
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
pass
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['info_cache'] = {'network_info': 'foo',
'instance_uuid': fake_instance['uuid']}
fake_instance['security_groups'] = []
fake_instance['deleted'] = 0
if updates:
fake_instance.update(updates)
return fake_instance
def test_get_all_by_filters(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(ctxt, {'foo': 'bar'}, 'uuid', 'asc',
limit=None, marker=None,
columns_to_join=['metadata']).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
ctxt, {'foo': 'bar'}, 'uuid', 'asc', expected_attrs=['metadata'])
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(ctxt, 'foo',
columns_to_join=None).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host(ctxt, 'foo')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertEqual(inst_list.objects[i]._context, ctxt)
self.assertEqual(inst_list.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_host_and_node(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host_and_node(ctxt, 'foo', 'bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_node(ctxt, 'foo',
'bar')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_by_host_and_not_type(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
db.instance_get_all_by_host_and_not_type(ctxt, 'foo',
type_id='bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_not_type(ctxt, 'foo',
'bar')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_hung_in_rebooting(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = timeutils.isotime()
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(ctxt, dt).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_hung_in_rebooting(ctxt, dt)
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_with_fault(self):
ctxt = context.get_admin_context()
fake_insts = [
fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
]
fake_faults = test_instance_fault.fake_faults
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(ctxt, 'host', columns_to_join=[]
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
ctxt, [x['uuid'] for x in fake_insts]).AndReturn(fake_faults)
self.mox.ReplayAll()
instances = instance.InstanceList.get_by_host(ctxt, 'host',
expected_attrs=['fault'])
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault.iteritems()))
self.assertEqual(None, instances[1].fault)
def test_fill_faults(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
inst1 = instance.Instance()
inst1.uuid = 'uuid1'
inst2 = instance.Instance()
inst2.uuid = 'uuid2'
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': 'uuid1',
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
db.instance_fault_get_by_instance_uuids(ctxt,
[x.uuid for x in insts],
).AndReturn(db_faults)
self.mox.ReplayAll()
inst_list = instance.InstanceList()
inst_list._context = ctxt
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual(faulty, ['uuid1'])
self.assertEqual(inst_list[0].fault.message,
db_faults['uuid1'][0]['message'])
self.assertEqual(inst_list[1].fault, None)
for inst in inst_list:
self.assertEqual(inst.obj_what_changed(), set())
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.TestCase):
def test_expected_cols(self):
self.stubs.Set(instance, 'INSTANCE_OPTIONAL_NON_COLUMNS', ['bar'])
self.assertEqual(['foo'], instance.expected_cols(['foo', 'bar']))
self.assertEqual(None, instance.expected_cols(None))
|
from typing import Dict
class ConfigError(RuntimeError):
"""An error encountered during reading the config file
Args:
msg (str): The message displayed to the user on error
"""
def __init__(self, msg):
super(ConfigError, self).__init__("%s" % (msg,))
class MonzoInvalidStateError(RuntimeError):
def __init__(self):
super(MonzoInvalidStateError, self).__init__("Invalid state in the login callback")
class ClientError(Exception):
"""An error that contains data to send to the user's client.
Args:
message_content: The content of the m.room.message event to send the user.
"""
def __init__(self, message_content: Dict[str, str]):
self.message_content = message_content
class InvalidParamsError(ClientError):
pass
class ProcessingError(ClientError):
pass
|
import os
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from functools import reduce
import abc
class imageDataset(Dataset):
def __init__(self, csv_file_path, cont_flag=True, standardize_dirty=False, dirty_csv_file_path=None):
self.df_dataset = pd.read_csv(csv_file_path)
self.dataset_type = 'image'
self.cont_flag = cont_flag
self.len_data = self.df_dataset.shape[0]
if cont_flag:
self.num_cols = self.df_dataset.columns.tolist()
self.cat_cols = []
else:
self.num_cols = []
self.cat_cols = self.df_dataset.columns.tolist()
if standardize_dirty:
self.df_dataset_dirty = pd.read_csv(dirty_csv_file_path)
## feature type cast -- all features are real
for col_name in self.num_cols:
self.df_dataset[col_name] = self.df_dataset[col_name].astype(float, copy=False)
if standardize_dirty:
self.df_dataset_dirty[col_name] = self.df_dataset_dirty[col_name].astype(float, copy=False)
# standardize (re-scale) continuous features defintions
if cont_flag:
if standardize_dirty:
# standardize using dirty statistics -- e.g. useful running clean data on dirty models.
self.cont_means = self.df_dataset_dirty[self.num_cols].stack().mean()
self.cont_stds = self.df_dataset_dirty[self.num_cols].stack().std()
else:
self.cont_means = self.df_dataset[self.num_cols].stack().mean()
self.cont_stds = self.df_dataset[self.num_cols].stack().std()
## global defs
self.size_tensor = len(self.df_dataset.columns)
self.feat_info = []
if cont_flag:
for col_name in self.df_dataset.columns:
# numerical (real)
self.feat_info.append((col_name, "real", 1))
else:
for col_name in self.df_dataset.columns:
# categorical (categ)
self.feat_info.append((col_name, "categ", 2)) # black and white
def standardize_dataset(self, dataset):
""" NOTE: this method standardize images or not depending on
whether they are binarized or not."""
if self.cont_flag:
dataset = self.from_raw_to_tensor_cont(dataset)
else:
dataset = self.from_raw_to_tensor_categ(dataset)
return dataset
def from_raw_to_tensor_cont(self, dataset):
return (dataset-self.cont_means)/self.cont_stds
def from_raw_to_tensor_categ(self, dataset):
return dataset
@abc.abstractmethod
def __len__(self):
"""Not implemented"""
@abc.abstractmethod
def __getitem__(self, idx):
"""Not implemented"""
class imageDatasetInstance(imageDataset):
def __init__(self, csv_file_path_all, csv_file_path_instance,
csv_file_cell_outlier_mtx=[], get_indexes=False, cont_flag=True,
standardize_dirty=False, dirty_csv_file_path=None):
super().__init__(csv_file_path_all, cont_flag=cont_flag,
standardize_dirty=standardize_dirty,
dirty_csv_file_path=dirty_csv_file_path)
self.df_dataset_instance = pd.read_csv(csv_file_path_instance)
self.get_indexes = get_indexes
# get ground-truth cell error matrix, if provided
if csv_file_cell_outlier_mtx:
self.cell_outlier_mtx = pd.read_csv(csv_file_cell_outlier_mtx).values
else:
self.cell_outlier_mtx = np.array([])
# make sure of data types in the dataframe
for col_name in self.num_cols:
self.df_dataset_instance[col_name] = self.df_dataset_instance[col_name].astype(float, copy=False)
#Directly standardize the dataset here (instead of row by row)
self.df_dataset_instance_standardized = self.standardize_dataset(self.df_dataset_instance)
def __len__(self):
return self.df_dataset_instance.shape[0]
def __getitem__(self, idx):
if self.get_indexes:
index_ret = [idx]
else:
index_ret = []
if self.cell_outlier_mtx.size:
cell_outlier_ret = [self.cell_outlier_mtx[idx,:]]
else:
cell_outlier_ret = []
ret_list = [torch.tensor(self.df_dataset_instance_standardized.iloc[idx].values, dtype=torch.float)]
# ret_list = [torch.tensor(self.df_dataset_instance.iloc[idx].values, dtype=torch.float)]
ret_list += cell_outlier_ret
ret_list += index_ret
return ret_list
|
import copy
import datetime
import json
import unittest
from mock import MagicMock, patch
import config
from archiver.archiver import IngestArchiver, Manifest, ArchiveSubmission, Biomaterial
# TODO use mocks for integration tests
class TestIngestArchiver(unittest.TestCase):
def setUp(self):
self.ontology_api = MagicMock()
self.ingest_api = MagicMock()
self.ingest_api.url = 'ingest_url'
self.dsp_api = MagicMock()
self.dsp_api.url = 'dsp_url'
self.dsp_api.get_current_version = MagicMock(return_value=None)
with open(config.JSON_DIR + 'hca/biomaterials.json', encoding=config.ENCODING) as data_file:
biomaterials = json.loads(data_file.read())
with open(config.JSON_DIR + 'hca/project.json', encoding=config.ENCODING) as data_file:
project = json.loads(data_file.read())
project['uuid']['uuid'] = self._generate_fake_id(prefix='project_')
with open(config.JSON_DIR + 'hca/process.json', encoding=config.ENCODING) as data_file:
assay = json.loads(data_file.read())
assay['uuid']['uuid'] = self._generate_fake_id(prefix='assay_')
with open(config.JSON_DIR + 'hca/library_preparation_protocol.json', encoding=config.ENCODING) as data_file:
library_preparation_protocol = json.loads(data_file.read())
library_preparation_protocol['uuid']['uuid'] = self._generate_fake_id(
prefix='library_preparation_protocol_')
with open(config.JSON_DIR + 'hca/sequencing_protocol.json', encoding=config.ENCODING) as data_file:
sequencing_protocol = json.loads(data_file.read())
sequencing_protocol['uuid']['uuid'] = self._generate_fake_id(prefix='sequencing_protocol_')
with open(config.JSON_DIR + 'hca/sequencing_file.json', encoding=config.ENCODING) as data_file:
sequencing_file = json.loads(data_file.read())
sequencing_file['uuid']['uuid'] = self._generate_fake_id(prefix='sequencing_file_')
biomaterial_objects = []
for biomaterial in biomaterials:
# TODO decide what to use for alias, assign random no for now
biomaterial['uuid']['uuid'] = self._generate_fake_id(prefix='biomaterial_')
biomaterial_objects.append(Biomaterial(biomaterial))
with open(config.JSON_DIR + 'hca/library_preparation_protocol_10x.json', encoding=config.ENCODING) as data_file:
library_preparation_protocol_10x = json.loads(data_file.read())
library_preparation_protocol_10x['uuid']['uuid'] = self._generate_fake_id(
prefix='library_preparation_protocol_10x_')
self.base_manifest = {
'biomaterials': biomaterial_objects,
'project': project,
'files': [sequencing_file],
'assay': assay,
'library_preparation_protocol': library_preparation_protocol,
'library_preparation_protocol_10x': library_preparation_protocol_10x,
'sequencing_protocol': sequencing_protocol,
'input_biomaterial': biomaterials[0],
'manifest_id': 'dummy_manifest_id'
}
def _generate_fake_id(self, prefix):
now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H%M%S")
return prefix + '_' + now
@staticmethod
def _mock_manifest(manifest):
assay_manifest = MagicMock(Manifest)
assay_manifest.get_biomaterials = MagicMock(
return_value=manifest.get('biomaterials'))
assay_manifest.get_project = MagicMock(
return_value=manifest.get('project'))
assay_manifest.get_assay_process = MagicMock(
return_value=manifest.get('assay'))
assay_manifest.get_library_preparation_protocol = MagicMock(
return_value=manifest.get('library_preparation_protocol'))
assay_manifest.get_sequencing_protocol = MagicMock(
return_value=manifest.get('sequencing_protocol'))
assay_manifest.get_input_biomaterial = MagicMock(
return_value=manifest.get('input_biomaterial'))
assay_manifest.get_files = MagicMock(
return_value=manifest.get('files'))
assay_manifest.manifest_id = manifest.get('manifest_id')
return assay_manifest
def test_archive_skip_metadata_with_accessions(self):
with open(config.JSON_DIR + 'hca/biomaterial_with_accessions.json', encoding=config.ENCODING) as data_file:
biomaterials = json.loads(data_file.read())
biomaterial_manifest = {'biomaterials': biomaterials}
mock_manifest = self._mock_manifest(biomaterial_manifest)
archiver = IngestArchiver(
ontology_api=self.ontology_api,
ingest_api=self.ingest_api,
dsp_api=self.dsp_api,
exclude_types=['sequencingRun'])
archiver.get_manifest = MagicMock(return_value=mock_manifest)
entity_map = archiver.convert('')
archive_submission = archiver.archive(entity_map)
self.assertTrue(archive_submission.is_completed)
self.assertTrue(archive_submission.errors)
self.assertFalse(archive_submission.processing_result)
|
from pybamm import Parameter
def lico2_volume_change_Ai2020(sto):
"""
lico2 particle volume change as a function of stochiometry [1, 2].
References
----------
.. [1] Ai, W., Kraft, L., Sturm, J., Jossen, A., & Wu, B. (2020).
Electrochemical Thermal-Mechanical Modelling of Stress Inhomogeneity in
Lithium-Ion Pouch Cells. Journal of The Electrochemical Society, 167(1), 013512
DOI: 10.1149/2.0122001JES.
.. [2] Rieger, B., Erhard, S. V., Rumpf, K., & Jossen, A. (2016).
A new method to model the thickness change of a commercial pouch cell
during discharge. Journal of The Electrochemical Society, 163(8), A1566-A1575.
Parameters
----------
sto: :class:`pybamm.Symbol`
Electrode stochiometry, dimensionless
should be R-averaged particle concentration
Returns
-------
t_change:class:`pybamm.Symbol`
volume change, dimensionless, normalised by particle volume
"""
omega = Parameter("Positive electrode partial molar volume [m3.mol-1]")
c_p_max = Parameter("Maximum concentration in positive electrode [mol.m-3]")
t_change = omega * c_p_max * sto
return t_change
|
import sys
import os
__all__=['mc_wordcloud.py']
sys.path.append("..")
def Get(name):
return os.path.dirname(os.path.realpath(__file__)) + '\\' + name
|
"""Loads configuration yaml and runs an experiment."""
from argparse import ArgumentParser
import os
import glob
from datetime import datetime
import shutil
import yaml
from tqdm import tqdm
import torch
import numpy as np
from collections import defaultdict
import data
import model
import probe
import regimen
import reporter
import task
import loss
import glob
import sys
from run_experiment import choose_dataset_class, choose_probe_class, choose_model_class
def load_projected_representations(probe, model, dataset):
"""
Loads projected representations under `probe` from `dataset`.
"""
projections_by_batch = []
for batch in tqdm(dataset, desc='[predicting]'):
observation_batch, label_batch, length_batch, _ = batch
word_representations = model(observation_batch)
transformed_representations = torch.matmul(word_representations, probe.proj)
projections_by_batch.append(transformed_representations.detach().cpu().numpy())
return projections_by_batch
def evaluate_vectors(args, probe, dataset, model, results_dir, output_name):
probe_params_path = os.path.join(results_dir, args['probe']['params_path'])
probe.load_state_dict(torch.load(probe_params_path))
probe.eval()
print(probe.proj)
dataloader = dataset.get_dev_dataloader()
projections = load_projected_representations(probe, model, dataloader)
relations_to_projections = defaultdict(list)
relations_to_sentences = defaultdict(list)
relations_to_idxs = defaultdict(list)
relations_to_words = defaultdict(list)
for projection_batch, (data_batch, label_batch, length_batch, observation_batch) in zip(projections, dataloader):
for projection, label, length, (observation, _) in zip(projection_batch, label_batch, length_batch, observation_batch):
for idx, word in enumerate(observation.sentence):
if observation.head_indices[idx] == '0':
pass # head word
else:
head_index = int(observation.head_indices[idx])
proj_diff = projection[idx] - projection[head_index-1]
relation = observation.governance_relations[idx]
relations_to_projections[relation].append(proj_diff)
relations_to_sentences[relation].append(" ".join(observation.sentence))
relations_to_idxs[relation].append(idx)
relations_to_words[relation].append(word)
relations_to_diffs = {}
all_relations = []
all_sentences = []
all_idxs = []
all_words = []
y_list = []
for relation in relations_to_projections:
diffs = torch.FloatTensor(relations_to_projections[relation])
# compute the SVD
u, s, v = diffs.svd()
average_diff = torch.mean(diffs, 0)
relations_to_diffs[relation] = average_diff
all_relations += relations_to_projections[relation]
all_sentences += relations_to_sentences[relation]
all_idxs += relations_to_idxs[relation]
all_words += relations_to_words[relation]
y_list += [relation] * len(relations_to_projections[relation])
allDiff = torch.FloatTensor(all_relations)
# print(y_list)
sentences_idxs_words = np.array([all_sentences, all_idxs, all_words])
if len(sys.argv) > 2:
np.save('/sailhome/ethanchi/structural-probes/relationOutputs/{}.npy'.format(output_name), allDiff.numpy())
np.save('/sailhome/ethanchi/structural-probes/relationOutputs/{}Y.npy'.format(output_name), np.array(y_list))
np.save('/sailhome/ethanchi/structural-probes/relationOutputs/{}-data.npy'.format(output_name), sentences_idxs_words)
allDiff = torch.mean(allDiff, 0)
cos = torch.nn.CosineSimilarity(dim=0, eps=1e-10)
# for relation in relations_to_diffs:
# print(relation, torch.norm(relations_to_diffs[relation]))
# for relation2 in relations_to_diffs:
# print(relation, relation2, cos(relations_to_diffs[relation], relations_to_diffs[relation2]))
# print("AllDiff", torch.norm(allDiff))
# print("Projection is: {}".format(projection[int(observation.head_indices[idx])-1]))
def execute_experiment(args, results_dir, output_name):
"""
Execute an experiment as determined by the configuration
in args.
Args:
train_probe: Boolean whether to train the probe
report_results: Boolean whether to report results
"""
dataset_class = choose_dataset_class(args)
# task_class, reporter_class, loss_class = choose_task_classes(args)
probe_class = choose_probe_class(args)
model_class = choose_model_class(args)
# regimen_class = regimen.ProbeRegimen
expt_dataset = dataset_class(args, task.DummyTask)
# expt_reporter = reporter_class(args)
expt_probe = probe_class(args)
expt_model = model_class(args)
# expt_regimen = regimen_class(args)
# expt_loss = loss_class(args)
evaluate_vectors(args, expt_probe, expt_dataset, expt_model, results_dir, output_name)
if __name__ == '__main__':
argp = ArgumentParser()
argp.add_argument('dir')
argp.add_argument('output_name')
argp.add_argument('--seed', default=0, type=int,
help='sets all random seeds for (within-machine) reproducibility')
cli_args = argp.parse_args()
if cli_args.seed:
np.random.seed(cli_args.seed)
torch.manual_seed(cli_args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# yaml_args = yaml.load(open(#")
os.chdir(cli_args.dir)
yaml_args = yaml.load(open(glob.glob("*.yaml")[0]))
# setup_new_experiment_dir(cli_args, yaml_args, cli_args.results_dir)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
yaml_args['device'] = device
execute_experiment(yaml_args, cli_args.dir, cli_args.output_name)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 5 01:30:10 2021
@author: kfp
"""
try:
import __devel__
_release_ = False
except:
_release_ = True
from functools import reduce
from pygnuplot import gnuplot
if not _release_:
from storage import STORAGE
else:
from numaplot.storage import STORAGE
keys2d = ['styles', 'samples', 'key', 'title']
keys3d = keys2d + ['dummy', 'urange', 'vrange', 'xyplane', 'hidden3d',\
'isosamples', 'parametric', 'output']
def check_minreq(data):
""" Minimal requirements: if data is admissible then it shall
be forwarded to the gnuplot handler, otherwise an error message
should be returned. Return type is <boolean>.
"""
minkeys = ['terminal','method']
b = [data.keys().__contains__(s) for s in minkeys]
f=lambda x,y: x & y
return reduce(f,b)
def check_key(data, key):
""" Check if key in data. If so, then return the value, otherwise
return the empty string.
"""
if data.keys().__contains__(key):
return data[key]
else:
return ''
def make_kw(keys,data):
""" Create the **kw for gnuplot.set() -- unpack operator ** """
d = dict()
for x in keys:
d.update({x : check_key(data,x)})
return d
def perform(uid):
""" With the argument='uid' get the the data from STORAGE and
perform the method requested.
"""
try:
data = STORAGE[uid]['post']
except Exception:
STORAGE.update({uid: {'error' : KeyError("xyz") }})
return False
if not check_minreq(data):
STORAGE.update({uid: {'error' : "check_minreq failed" }})
return False
g = gnuplot.Gnuplot(terminal = data['terminal'])
if data['method'] == 'plot':
g.set(**make_kw(keys2d, data))
g.plot(data['plot'])
elif data['method'] == 'splot':
g.set(**make_kw(keys3d, data))
g.splot(data['splot'])
else:
pass
print(data['terminal']) #debug
|
from rdflib import Graph
from rdflib.resource import Resource
from rdflib.namespace import RDFS
def test_properties(rdfs_graph: Graph) -> None:
"""
The properties of a `rdflib.resource.Resource` work as expected.
"""
cres = Resource(rdfs_graph, RDFS.Container)
assert cres.graph is rdfs_graph
assert cres.identifier == RDFS.Container
|
import pyglet
from pyglet.gl import *
from robocute.base import *
QUAD_SW = 0
QUAD_SE = 1
QUAD_NE = 2
QUAD_NW = 3
'''
Quad
'''
class Quad(Base):
def __init__(self, texture):
super().__init__()
self.texture = texture
self.width = texture.width
self.height = texture.height
self.vertices = None
#self.indices = [None] * 4
self.north = None
self.south = None
self.east = None
self.west = None
#
self.validate()
def get_tex_coord(self, ndx):
tex_coords = self.texture.tex_coords
u = tex_coords[ndx * 3]
v = tex_coords[ndx * 3 + 1]
r = tex_coords[ndx * 3 + 2]
return (u, v, r)
def validate(self):
super().validate()
'''
Compose
'''
def compose(self, g, mesh):
x1 = g.x
y1 = g.y
x2 = x1 + self.width
y2 = y1 + self.height
vertices = [ (x1, y1), (x2, y1), (x2, y2), (x1, y2) ]
self.compose_sw(vertices, mesh)
self.compose_se(vertices, mesh)
self.compose_ne(vertices, mesh)
self.compose_nw(vertices, mesh)
def compose_sw(self, vertices, mesh):
indice = mesh.add_vertex( vertices[QUAD_SW] )
mesh.set_tex_coord(indice, self.get_tex_coord(QUAD_SW))
#self.indices[QUAD_SW] = indice
def compose_se(self, vertices, mesh):
#if not self.east:
indice = mesh.add_vertex(vertices[QUAD_SE])
mesh.set_tex_coord(indice, self.get_tex_coord(QUAD_SE))
#self.indices[QUAD_SE] = indice
def compose_ne(self, vertices, mesh):
#if not self.north and not self.east:
indice = mesh.add_vertex(vertices[QUAD_NE])
mesh.set_tex_coord(indice, self.get_tex_coord(QUAD_NE))
#self.indices[QUAD_NE] = indice
def compose_nw(self, vertices, mesh):
#if not self.north:
indice = mesh.add_vertex(vertices[QUAD_NW])
mesh.set_tex_coord(indice, self.get_tex_coord(QUAD_NW))
#self.indices[QUAD_NW] = indice
'''
PostCompose
'''
def postcompose(self, mesh):
self.postcompose_sw()
self.postcompose_se()
self.postcompose_ne()
self.postcompose_nw()
#mesh.indices.extend(self.indices)
def postcompose_sw(self):
pass
def postcompose_se(self):
if self.east:
self.indices[QUAD_SE] = self.east.indices[QUAD_SW]
def postcompose_ne(self):
if self.north:
self.indices[QUAD_NE] = self.north.indices[QUAD_SE]
elif self.east:
self.indices[QUAD_NE] = self.east.indices[QUAD_NW]
def postcompose_nw(self):
if self.north:
self.indices[QUAD_NW] = self.north.indices[QUAD_SW]
def draw(self, graphics):
if self.texture:
self.texture.blit(graphics.x, graphics.y, graphics.z)
if graphics.query:
self.query(graphics)
'''
QuadContainer
'''
class QuadContainer(Base):
def __init__(self):
super().__init__()
self.quads = []
def validate(self):
super().validate()
def add_quad(self, quad):
self.invalidate()
self.quads.append(quad)
def repeat(self, quad, count):
self.invalidate()
i = 0
while i < count:
add_quad(quad.copy())
'''
QuadRow
'''
class QuadRow(QuadContainer):
def __init__(self):
super().__init__()
self.north = None
self.south = None
self.height = None
def validate(self):
super().validate()
lastQuad = None
for quad in self.quads:
if lastQuad:
lastQuad.east = quad
quad.west = lastQuad
lastQuad = quad
self.height = lastQuad.height
def compose(self, graphics, mesh):
if self.invalid:
self.validate()
for quad in self.quads:
quad.compose(graphics, mesh)
graphics.x += quad.width
'''
#fixme:reinstate this when shared vertices get figured out.
for quad in self.quads:
quad.postcompose(mesh)
'''
'''
QuadColumn
'''
class QuadColumn(QuadContainer):
def __init__(self):
super().__init__()
def validate(self):
super().validate()
lastQuad = None
for quad in self.quads:
if lastQuad:
lastQuad.north = quad
quad.south = lastQuad
lastQuad = quad
def compose(self, graphics, mesh):
if self.invalid:
self.validate()
for quad in self.quads:
quad.compose(graphics, mesh)
graphics.y += quad.height
'''
#fixme:reinstate this when shared vertices get figure out.
for quad in self.quads:
quad.postcompose(mesh)
'''
return
'''
QuadGrid
'''
class QuadGrid(QuadContainer):
def __init__(self):
super().__init__()
def add_row(self, row):
self.add_quad(row)
def validate(self):
super().validate()
lastRow = None
for row in self.quads:
if lastRow:
lastRow.north = row #fixme:going to use this?
quadNdx = 0
for quad in row.quads:
lastRow.quads[quadNdx].north = quad
row.south = lastRow
lastRow = row
def compose(self, graphics, mesh):
if self.invalid:
self.validate()
gX = graphics.x
for row in self.quads:
row.compose(graphics, mesh)
graphics.x = gX
graphics.y += row.height
'''
#fixme:reinstate this when shared vertices get figure out.
for row in self.rows:
row.postcompose(mesh)
'''
return
'''
QuadMesh
'''
class QuadMesh(Base):
def __init__(self, texture):
super().__init__()
self.texture = texture
#self.indices = []
self.vertices = []
self.tex_coords = []
#self.domain = pyglet.graphics.vertexdomain.create_indexed_domain('v2i/dynamic', 't3f')
self.domain = pyglet.graphics.vertexdomain.create_domain('v2i/dynamic', 't3f')
def add_vertex(self, vertex):
self.vertices.append(vertex[0])
self.vertices.append(vertex[1])
self.tex_coords.extend([0.]*3)
return int(len(self.vertices) / 2 - 1)
def set_tex_coord(self, indice, tex_coord):
self.tex_coords[indice * 3] = tex_coord[0]
self.tex_coords[indice * 3 + 1] = tex_coord[1]
self.tex_coords[indice * 3 + 2] = tex_coord[2]
def create_vertex_list(self):
#vertexList = self.domain.create(len(self.vertices)/2, len(self.indices))
vertexList = self.domain.create(int(len(self.vertices)/2))
#vertexList.indices = self.indices
vertexList.vertices = self.vertices
vertexList.tex_coords = self.tex_coords
return vertexList
def draw(self):
vertexList = self.create_vertex_list()
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
vertexList.draw(GL_QUADS)
glDisable(self.texture.target)
|
import py
from pypy.rpython.extfunc import BaseLazyRegistering, extdef, registering
from pypy.rlib import rarithmetic
from pypy.rpython.lltypesystem import lltype, rffi
from pypy.tool.autopath import pypydir
from pypy.rpython.ootypesystem import ootype
from pypy.rlib import rposix
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.tool.autopath import pypydir
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes = ['src/ll_strtod.h'],
include_dirs = [str(py.path.local(pypydir).join('translator', 'c'))],
separate_module_sources = ['#include <src/ll_strtod.h>'],
export_symbols = ['LL_strtod_formatd', 'LL_strtod_parts_to_float'],
)
class RegisterStrtod(BaseLazyRegistering):
def __init__(self):
self.configure(CConfig)
@registering(rarithmetic.formatd)
def register_formatd(self):
ll_strtod = self.llexternal('LL_strtod_formatd',
[rffi.CCHARP, rffi.DOUBLE], rffi.CCHARP,
sandboxsafe=True, threadsafe=False)
def llimpl(fmt, x):
res = ll_strtod(fmt, x)
return rffi.charp2str(res)
def oofakeimpl(fmt, x):
return ootype.oostring(rarithmetic.formatd(fmt._str, x), -1)
return extdef([str, float], str, 'll_strtod.ll_strtod_formatd',
llimpl=llimpl, oofakeimpl=oofakeimpl,
sandboxsafe=True)
@registering(rarithmetic.parts_to_float)
def register_parts_to_float(self):
ll_parts_to_float = self.llexternal('LL_strtod_parts_to_float',
[rffi.CCHARP] * 4, rffi.DOUBLE,
sandboxsafe=True,
threadsafe=False)
def llimpl(sign, beforept, afterpt, exponent):
res = ll_parts_to_float(sign, beforept, afterpt, exponent)
if res == -1 and rposix.get_errno() == 42:
raise ValueError("Wrong literal for float")
return res
def oofakeimpl(sign, beforept, afterpt, exponent):
return rarithmetic.parts_to_float(sign._str, beforept._str,
afterpt._str, exponent._str)
return extdef([str, str, str, str], float,
'll_strtod.ll_strtod_parts_to_float', llimpl=llimpl,
oofakeimpl=oofakeimpl, sandboxsafe=True)
|
import logging
import threading
import time
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
lock = threading.Lock()
acquired = lock.acquire()
print(acquired)
# acquired = lock.acquire() # this blocks the main thread
# print(acquired)
acquired = lock.acquire(0) # this does not block main thread blocks the main thread
print(acquired)
|
#!/usr/bin/python3
# update_produce.py - Corrects costs in produce sales spreadsheet.
import openpyxl
wb = openpyxl.load_workbook('produceSales.xlsx')
sheet = wb.get_sheet_by_name('Sheet')
# Os tipos de produtos e seus processo atualizados
PRICE_UPDATES = {'Garlic': 99.07,
'Celery': 101.19,
'Lemon': 333.27}
# Percorre as linhas em um loop e atualizar os preços
for row_num in range(2, sheet.max_row): # 2 = Pulando a primeira linha
produce_name = sheet.cell(row=row_num, column=1).value
if produce_name in PRICE_UPDATES:
sheet.cell(row=row_num, column=2).value = PRICE_UPDATES[produce_name]
wb.save('updated_produce_sales.xlsx')
|
# Exercise_3_tests.py
from Tests import *
# format testów
# TESTS = [ {"arg":arg0, "hint": hint0}, {"arg":arg1, "hint": hint1}, ... ]
TESTS = [
# 0
{
"arg": [[[(1, 2), (2, 4)],
[(0, 2), (3, 11), (4, 3)],
[(0, 4), (3, 13)],
[(1, 11), (2, 13), (5, 17), (6, 1)],
[(1, 3), (5, 5)],
[(3, 17), (4, 5), (7, 7)],
[(3, 1), (7, 3)],
[(5, 7), (6, 3), ]], 0, 7],
"hint": 7
},
# 1
{
"arg": [[[(1, 2), (2, 4)],
[(0, 2), (3, 11), (4, 3)],
[(0, 4), (3, 13)],
[(1, 11), (2, 13), (5, 17), (6, 1)],
[(1, 3), (5, 5)],
[(3, 17), (4, 5), (7, 7)],
[(3, 1), (7, 3)],
[(5, 7), (6, 3), ]], 4, 6],
"hint": 6
},
# 2
{
"arg": [[[(1, 2), (2, 4)],
[(0, 2), (3, 11), (4, 3)],
[(0, 4), (3, 13)],
[(1, 11), (2, 13), (5, 17), (6, 1)],
[(1, 3), (5, 5)],
[(3, 17), (4, 5), (7, 7)],
[(3, 1), (7, 3)],
[(5, 7), (6, 3), ]], 2, 5],
"hint": 4
},
# 3
{
"arg": [[[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)],
[(0, 1), (10, 9)],
[(0, 2), (10, 8)],
[(0, 3), (10, 7)],
[(0, 4), (10, 6)],
[(0, 5), (10, 5)],
[(0, 6), (10, 4)],
[(0, 7), (10, 3)],
[(0, 8), (10, 2)],
[(0, 9), (10, 1)],
[(1, 9), (2, 8), (3, 7), (4, 6), (5, 5), (6, 4), (7, 3), (8, 2), (9, 1)]], 0, 10],
"hint": 18
},
# 4
{
"arg": [[[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)],
[(0, 1), (10, 9)],
[(0, 2), (10, 8)],
[(0, 3), (10, 7)],
[(0, 4), (10, 6)],
[(0, 5), (10, 5)],
[(0, 6), (10, 4)],
[(0, 7), (10, 3)],
[(0, 8), (10, 2)],
[(0, 9), (10, 1)],
[(1, 9), (2, 8), (3, 7), (4, 6), (5, 5), (6, 4), (7, 3), (8, 2), (9, 1)]], 1, 9],
"hint": 4
},
# 5
{
"arg": [[[(5, 8), (6, 1), (20, 1), (21, 9), (24, 8)],
[(2, 2), (5, 4), (14, 1), (25, 5)],
[(1, 2), (11, 1), (18, 4), (21, 3), (26, 9)],
[(6, 4), (7, 1), (8, 6), (9, 6), (12, 8), (14, 6), (17, 8), (18, 6), (19, 2), (23, 3), (24, 5)],
[(6, 4), (10, 4), (25, 4), (25, 7), (29, 5)],
[(0, 8), (1, 4), (7, 6), (14, 7), (18, 7), (22, 8), (26, 3), (27, 1), (28, 1)],
[(0, 1), (3, 4), (4, 4), (16, 7), (19, 6)],
[(3, 1), (5, 6), (10, 7), (20, 2), (29, 8)],
[(3, 6), (27, 3)],
[(3, 6), (14, 1), (15, 7), (16, 8), (20, 1), (21, 9), (22, 5), (23, 4), (26, 4), (28, 7)],
[(4, 4), (7, 7), (13, 4), (16, 7), (19, 6)],
[(2, 1), (13, 9), (19, 5), (21, 1), (28, 8)],
[(3, 8), (14, 6), (16, 8)],
[(10, 4), (11, 9), (15, 5), (18, 2), (29, 2)],
[(1, 1), (3, 6), (5, 7), (9, 1), (12, 6), (17, 6), (22, 1)],
[(9, 7), (13, 5), (23, 4), (27, 1)],
[(6, 7), (9, 8), (10, 7), (12, 8), (18, 6), (27, 9)],
[(3, 8), (14, 6), (23, 2)],
[(2, 4), (3, 6), (5, 7), (13, 2), (16, 6), (23, 5), (24, 6)],
[(3, 2), (6, 6), (10, 6), (11, 5), (27, 8)],
[(0, 1), (7, 2), (9, 1), (21, 6), (23, 4), (25, 3), (28, 4), (29, 8)],
[(0, 9), (2, 3), (9, 9), (11, 1), (20, 6), (24, 8), (27, 7), (28, 1)],
[(5, 8), (9, 5), (14, 1), (24, 1), (27, 8)],
[(3, 3), (9, 4), (15, 4), (17, 2), (18, 5), (20, 4), (24, 8)],
[(0, 8), (3, 5), (18, 6), (21, 8), (22, 1), (23, 8), (28, 3), (29, 9)],
[(1, 5), (4, 4), (20, 3), (26, 4)],
[(2, 9), (5, 3), (9, 4), (25, 4)],
[(5, 1), (8, 3), (15, 1), (16, 9), (19, 8), (21, 7), (22, 8), (29, 3)],
[(5, 1), (9, 7), (11, 8), (20, 4), (21, 1), (24, 3)],
[(4, 5), (7, 8), (13, 2), (20, 8), (24, 9), (27, 3)]], 0, 18],
"hint": 11
},
# 6
{
"arg": [[[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)],
[(0, 1), (10, 9)],
[(0, 2), (10, 8)],
[(0, 3), (10, 7)],
[(0, 4), (10, 6)],
[(0, 5), (10, 5)],
[(0, 6), (10, 4)],
[(0, 7), (10, 3)],
[(0, 8), (10, 2)],
[(0, 9), (10, 1)],
[(1, 9), (2, 8), (3, 7), (4, 6), (5, 5), (6, 4), (7, 3), (8, 2), (9, 1)],
[(12, 3)],
[(11, 3)]], 0, 11],
"hint": 0
},
]
def printarg(G, s, k):
print("Graf: ", limit(G, 120))
print("Start: ", s)
print("Cel : ", k)
def printhint(hint):
print("Przykladowy wynik:", limit(hint, 120))
def printsol(sol):
print("Uzyskany wynik :", limit(sol, 120))
def check(G, s, k, hint, sol):
if hint == sol:
print("Test zaliczony")
return True
else:
print("NIEZALICZONY!")
return False
def runtests(f):
internal_runtests(printarg, printhint, printsol, check, TESTS, f)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
]
@pulumi.output_type
class GetUsersResult:
"""
A collection of values returned by getUsers.
"""
def __init__(__self__, comments=None, emails=None, enableds=None, expiration_dates=None, first_names=None, groups=None, id=None, keys=None, last_names=None, user_ids=None):
if comments and not isinstance(comments, list):
raise TypeError("Expected argument 'comments' to be a list")
pulumi.set(__self__, "comments", comments)
if emails and not isinstance(emails, list):
raise TypeError("Expected argument 'emails' to be a list")
pulumi.set(__self__, "emails", emails)
if enableds and not isinstance(enableds, list):
raise TypeError("Expected argument 'enableds' to be a list")
pulumi.set(__self__, "enableds", enableds)
if expiration_dates and not isinstance(expiration_dates, list):
raise TypeError("Expected argument 'expiration_dates' to be a list")
pulumi.set(__self__, "expiration_dates", expiration_dates)
if first_names and not isinstance(first_names, list):
raise TypeError("Expected argument 'first_names' to be a list")
pulumi.set(__self__, "first_names", first_names)
if groups and not isinstance(groups, list):
raise TypeError("Expected argument 'groups' to be a list")
pulumi.set(__self__, "groups", groups)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if keys and not isinstance(keys, list):
raise TypeError("Expected argument 'keys' to be a list")
pulumi.set(__self__, "keys", keys)
if last_names and not isinstance(last_names, list):
raise TypeError("Expected argument 'last_names' to be a list")
pulumi.set(__self__, "last_names", last_names)
if user_ids and not isinstance(user_ids, list):
raise TypeError("Expected argument 'user_ids' to be a list")
pulumi.set(__self__, "user_ids", user_ids)
@property
@pulumi.getter
def comments(self) -> Sequence[str]:
return pulumi.get(self, "comments")
@property
@pulumi.getter
def emails(self) -> Sequence[str]:
return pulumi.get(self, "emails")
@property
@pulumi.getter
def enableds(self) -> Sequence[bool]:
return pulumi.get(self, "enableds")
@property
@pulumi.getter(name="expirationDates")
def expiration_dates(self) -> Sequence[str]:
return pulumi.get(self, "expiration_dates")
@property
@pulumi.getter(name="firstNames")
def first_names(self) -> Sequence[str]:
return pulumi.get(self, "first_names")
@property
@pulumi.getter
def groups(self) -> Sequence[Sequence[str]]:
return pulumi.get(self, "groups")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def keys(self) -> Sequence[str]:
return pulumi.get(self, "keys")
@property
@pulumi.getter(name="lastNames")
def last_names(self) -> Sequence[str]:
return pulumi.get(self, "last_names")
@property
@pulumi.getter(name="userIds")
def user_ids(self) -> Sequence[str]:
return pulumi.get(self, "user_ids")
class AwaitableGetUsersResult(GetUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUsersResult(
comments=self.comments,
emails=self.emails,
enableds=self.enableds,
expiration_dates=self.expiration_dates,
first_names=self.first_names,
groups=self.groups,
id=self.id,
keys=self.keys,
last_names=self.last_names,
user_ids=self.user_ids)
def get_users(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
__ret__ = pulumi.runtime.invoke('proxmoxve:Permission/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
comments=__ret__.comments,
emails=__ret__.emails,
enableds=__ret__.enableds,
expiration_dates=__ret__.expiration_dates,
first_names=__ret__.first_names,
groups=__ret__.groups,
id=__ret__.id,
keys=__ret__.keys,
last_names=__ret__.last_names,
user_ids=__ret__.user_ids)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##----------------------------------------------------------
##file :get_maya_win.py
##author:xiyuhao
##email:695888835@qq.com
##----------------------------------------------------------
##date:2017.9.15
##-----------------------------------------------------------
import sys
import os
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
import shiboken as PyQtSip
import maya.OpenMayaUI as omui
def getMayaWin():
"""
@get the maya window
"""
ptr = omui.MQtUtil.mainWindow()
inptr = PyQtSip.wrapInstance(long(ptr),QtGui.QWidget)
return inptr
|
"""
Logging setup
By default: Log with lastResort logger, usually STDERR.
Logging can be overridden either programmatically in code using the library or by creating one or more of
- /etc/ocrd_logging.py
- $HOME/ocrd_logging.py
- $PWD/ocrd_logging.py
These files will be executed in the context of ocrd/ocrd_logging.py, with `logging` global set.
"""
# pylint: disable=no-member
from __future__ import absolute_import
from traceback import format_stack
import logging
import logging.config
import os
from .constants import LOG_FORMAT, LOG_TIMEFMT
__all__ = [
'disableLogging',
'getLevelName',
'getLogger',
'initLogging',
'logging',
'setOverrideLogLevel',
]
_initialized_flag = False
_overrideLogLevel = None
_ocrdLevel2pythonLevel = {
'TRACE': 'DEBUG',
'OFF': 'CRITICAL',
'FATAL': 'ERROR',
}
class PropagationShyLogger(logging.Logger):
def addHandler(self, hdlr):
super().addHandler(hdlr)
self.propagate = not self.handlers
def removeHandler(self, hdlr):
super().removeHandler(hdlr)
self.propagate = not self.handlers
logging.setLoggerClass(PropagationShyLogger)
logging.getLogger().propagate = False
def getLevelName(lvl):
"""
Get (string) python logging level for (string) spec-defined log level name.
"""
lvl = _ocrdLevel2pythonLevel.get(lvl, lvl)
return logging.getLevelName(lvl)
def setOverrideLogLevel(lvl, silent=False):
"""
Override all logger filter levels to include lvl and above.
- Set root logger level
- iterates all existing loggers and sets their log level to ``NOTSET``.
Args:
lvl (string): Log level name.
silent (boolean): Whether to log the override call
"""
if lvl is None:
return
root_logger = logging.getLogger('')
if not silent:
root_logger.info('Overriding log level globally to %s', lvl)
lvl = getLevelName(lvl)
global _overrideLogLevel # pylint: disable=global-statement
_overrideLogLevel = lvl
for loggerName in logging.Logger.manager.loggerDict:
logger = logging.Logger.manager.loggerDict[loggerName]
if isinstance(logger, logging.PlaceHolder):
continue
logger.setLevel(logging.NOTSET)
root_logger.setLevel(lvl)
def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
if not _initialized_flag:
initLogging()
logging.getLogger('').critical('getLogger was called before initLogging. Source of the call:')
for line in [x for x in format_stack(limit=2)[0].split('\n') if x]:
logging.getLogger('').critical(line)
name = args[0]
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel and name:
logger.setLevel(logging.NOTSET)
return logger
def initLogging():
"""
Reset root logger, read logging configuration if exists, otherwise use basicConfig
"""
global _initialized_flag # pylint: disable=global-statement
if _initialized_flag:
logging.getLogger('').critical('initLogging was called multiple times. Source of latest call:')
for line in [x for x in format_stack(limit=2)[0].split('\n') if x]:
logging.getLogger('').critical(line)
logging.disable(logging.NOTSET)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
CONFIG_PATHS = [
os.path.curdir,
os.path.join(os.path.expanduser('~')),
'/etc',
]
config_file = next((f for f \
in [os.path.join(p, 'ocrd_logging.conf') for p in CONFIG_PATHS] \
if os.path.exists(f)),
None)
if config_file:
logging.config.fileConfig(config_file)
logging.getLogger('ocrd.logging').debug("Picked up logging config at %s" % config_file)
else:
# Default logging config
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=LOG_TIMEFMT)
logging.getLogger('').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver.download_to_directory').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver.add_files_to_mets').setLevel(logging.INFO)
logging.getLogger('PIL').setLevel(logging.INFO)
# To cut back on the `Self-intersection at or near point` INFO messages
logging.getLogger('shapely.geos').setLevel(logging.ERROR)
logging.getLogger('tensorflow').setLevel(logging.ERROR)
if _overrideLogLevel:
logging.getLogger('').setLevel(_overrideLogLevel)
_initialized_flag = True
def disableLogging():
global _initialized_flag # pylint: disable=global-statement
_initialized_flag = False
global _overrideLogLevel # pylint: disable=global-statement
_overrideLogLevel = None
logging.basicConfig(level=logging.CRITICAL)
logging.disable(logging.ERROR)
# Initializing stream handlers at module level
# would cause message output in all runtime contexts,
# including those which are already run for std output
# (--dump-json, --version, ocrd-tool, bashlib etc).
# So this needs to be an opt-in from the CLIs/decorators:
#initLogging()
# Also, we even have to block log output for libraries
# (like matplotlib/tensorflow) which set up logging
# themselves already:
disableLogging()
|
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import numpy as np
from napari.utils import io
from napari.plugins.io import read_data_with_plugins
def test_builtin_reader_plugin(make_test_viewer):
"""Test the builtin reader plugin reads a temporary file."""
with NamedTemporaryFile(suffix='.tif', delete=False) as tmp:
data = np.random.rand(20, 20)
io.imsave(tmp.name, data)
tmp.seek(0)
layer_data = read_data_with_plugins(tmp.name)
assert isinstance(layer_data, list)
assert len(layer_data) == 1
assert isinstance(layer_data[0], tuple)
assert np.allclose(data, layer_data[0][0])
viewer = make_test_viewer()
viewer.open(tmp.name, plugin='builtins')
assert np.allclose(viewer.layers[0].data, data)
def test_builtin_reader_plugin_csv(make_test_viewer, tmpdir):
"""Test the builtin reader plugin reads a temporary file."""
tmp = os.path.join(tmpdir, 'test.csv')
column_names = ['index', 'axis-0', 'axis-1']
table = np.random.random((5, 3))
data = table[:, 1:]
# Write csv file
io.write_csv(tmp, table, column_names=column_names)
layer_data = read_data_with_plugins(tmp)
assert isinstance(layer_data, list)
assert len(layer_data) == 1
assert isinstance(layer_data[0], tuple)
assert layer_data[0][2] == 'points'
assert np.allclose(data, layer_data[0][0])
viewer = make_test_viewer()
viewer.open(tmp, plugin='builtins')
assert np.allclose(viewer.layers[0].data, data)
def test_builtin_reader_plugin_stacks(make_test_viewer):
"""Test the builtin reader plugin reads multiple files as a stack."""
data = np.random.rand(5, 20, 20)
tmps = []
for plane in data:
tmp = NamedTemporaryFile(suffix='.tif', delete=False)
io.imsave(tmp.name, plane)
tmp.seek(0)
tmps.append(tmp)
viewer = make_test_viewer()
# open should take both strings and Path object, so we make one of the
# pathnames a Path object
names = [tmp.name for tmp in tmps]
names[0] = Path(names[0])
viewer.open(names, stack=True, plugin='builtins')
assert np.allclose(viewer.layers[0].data, data)
for tmp in tmps:
tmp.close()
os.unlink(tmp.name)
|
import unittest
from hamcrest import *
from src.students import Students
from src.exampleData import Data
class StudentsAssertPyTest(unittest.TestCase):
def setUp(self):
self.temp = Students(Data().example)
def test_delete_lack_student(self):
assert_that(calling(self.temp.deleteStudent)
.with_args(3, "Kasia", "Polak"),
raises(Exception))
def test_delete_student(self):
assert_that(self.temp.deleteStudent(1, "Kasia", "Polak"),
not_(contains_exactly(('1', 'Kasia', 'Polak'))))
def tearDown(self):
self.temp = None
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import List, Tuple, Union
if TYPE_CHECKING:
from .account import Account
from qlib.backtest.position import BasePosition, Position
import random
import numpy as np
import pandas as pd
from ..data.data import D
from ..config import C
from ..constant import REG_CN
from ..log import get_module_logger
from .decision import Order, OrderDir, OrderHelper
from .high_performance_ds import BaseQuote, NumpyQuote
class Exchange:
def __init__(
self,
freq="day",
start_time=None,
end_time=None,
codes="all",
deal_price: Union[str, Tuple[str], List[str]] = None,
subscribe_fields=[],
limit_threshold: Union[Tuple[str, str], float, None] = None,
volume_threshold=None,
open_cost=0.0015,
close_cost=0.0025,
min_cost=5,
impact_cost=0.0,
extra_quote=None,
quote_cls=NumpyQuote,
**kwargs,
):
"""__init__
:param freq: frequency of data
:param start_time: closed start time for backtest
:param end_time: closed end time for backtest
:param codes: list stock_id list or a string of instruments(i.e. all, csi500, sse50)
:param deal_price: Union[str, Tuple[str, str], List[str]]
The `deal_price` supports following two types of input
- <deal_price> : str
- (<buy_price>, <sell_price>): Tuple[str] or List[str]
<deal_price>, <buy_price> or <sell_price> := <price>
<price> := str
- for example '$close', '$open', '$vwap' ("close" is OK. `Exchange` will help to prepend
"$" to the expression)
:param subscribe_fields: list, subscribe fields. This expressions will be added to the query and `self.quote`.
It is useful when users want more fields to be queried
:param limit_threshold: Union[Tuple[str, str], float, None]
1) `None`: no limitation
2) float, 0.1 for example, default None
3) Tuple[str, str]: (<the expression for buying stock limitation>,
<the expression for sell stock limitation>)
`False` value indicates the stock is tradable
`True` value indicates the stock is limited and not tradable
:param volume_threshold: Union[
Dict[
"all": ("cum" or "current", limit_str),
"buy": ("cum" or "current", limit_str),
"sell":("cum" or "current", limit_str),
],
("cum" or "current", limit_str),
]
1) ("cum" or "current", limit_str) denotes a single volume limit.
- limit_str is qlib data expression which is allowed to define your own Operator.
Please refer to qlib/contrib/ops/high_freq.py, here are any custom operator for high frequency,
such as DayCumsum. !!!NOTE: if you want you use the custom operator, you need to
register it in qlib_init.
- "cum" means that this is a cumulative value over time, such as cumulative market volume.
So when it is used as a volume limit, it is necessary to subtract the dealt amount.
- "current" means that this is a real-time value and will not accumulate over time,
so it can be directly used as a capacity limit.
e.g. ("cum", "0.2 * DayCumsum($volume, '9:45', '14:45')"), ("current", "$bidV1")
2) "all" means the volume limits are both buying and selling.
"buy" means the volume limits of buying. "sell" means the volume limits of selling.
Different volume limits will be aggregated with min(). If volume_threshold is only
("cum" or "current", limit_str) instead of a dict, the volume limits are for
both by deault. In other words, it is same as {"all": ("cum" or "current", limit_str)}.
3) e.g. "volume_threshold": {
"all": ("cum", "0.2 * DayCumsum($volume, '9:45', '14:45')"),
"buy": ("current", "$askV1"),
"sell": ("current", "$bidV1"),
}
:param open_cost: cost rate for open, default 0.0015
:param close_cost: cost rate for close, default 0.0025
:param trade_unit: trade unit, 100 for China A market.
None for disable trade unit.
**NOTE**: `trade_unit` is included in the `kwargs`. It is necessary because we must
distinguish `not set` and `disable trade_unit`
:param min_cost: min cost, default 5
:param impact_cost: market impact cost rate (a.k.a. slippage). A recommended value is 0.1.
:param extra_quote: pandas, dataframe consists of
columns: like ['$vwap', '$close', '$volume', '$factor', 'limit_sell', 'limit_buy'].
The limit indicates that the etf is tradable on a specific day.
Necessary fields:
$close is for calculating the total value at end of each day.
Optional fields:
$volume is only necessary when we limit the trade amount or calculate PA(vwap) indicator
$vwap is only necessary when we use the $vwap price as the deal price
$factor is for rounding to the trading unit
limit_sell will be set to False by default(False indicates we can sell this
target on this day).
limit_buy will be set to False by default(False indicates we can buy this
target on this day).
index: MultipleIndex(instrument, pd.Datetime)
"""
self.freq = freq
self.start_time = start_time
self.end_time = end_time
self.trade_unit = kwargs.pop("trade_unit", C.trade_unit)
if len(kwargs) > 0:
raise ValueError(f"Get Unexpected arguments {kwargs}")
if limit_threshold is None:
limit_threshold = C.limit_threshold
if deal_price is None:
deal_price = C.deal_price
# we have some verbose information here. So logging is enable
self.logger = get_module_logger("online operator")
# TODO: the quote, trade_dates, codes are not necessary.
# It is just for performance consideration.
self.limit_type = self._get_limit_type(limit_threshold)
if limit_threshold is None:
if C.region == REG_CN:
self.logger.warning(f"limit_threshold not set. The stocks hit the limit may be bought/sold")
elif self.limit_type == self.LT_FLT and abs(limit_threshold) > 0.1:
if C.region == REG_CN:
self.logger.warning(f"limit_threshold may not be set to a reasonable value")
if isinstance(deal_price, str):
if deal_price[0] != "$":
deal_price = "$" + deal_price
self.buy_price = self.sell_price = deal_price
elif isinstance(deal_price, (tuple, list)):
self.buy_price, self.sell_price = deal_price
else:
raise NotImplementedError(f"This type of input is not supported")
if isinstance(codes, str):
codes = D.instruments(codes)
self.codes = codes
# Necessary fields
# $close is for calculating the total value at end of each day.
# $factor is for rounding to the trading unit
# $change is for calculating the limit of the stock
# get volume limit from kwargs
self.buy_vol_limit, self.sell_vol_limit, vol_lt_fields = self._get_vol_limit(volume_threshold)
necessary_fields = {self.buy_price, self.sell_price, "$close", "$change", "$factor", "$volume"}
if self.limit_type == self.LT_TP_EXP:
for exp in limit_threshold:
necessary_fields.add(exp)
all_fields = necessary_fields | vol_lt_fields
all_fields = list(all_fields | set(subscribe_fields))
self.all_fields = all_fields
self.open_cost = open_cost
self.close_cost = close_cost
self.min_cost = min_cost
self.impact_cost = impact_cost
self.limit_threshold: Union[Tuple[str, str], float, None] = limit_threshold
self.volume_threshold = volume_threshold
self.extra_quote = extra_quote
self.get_quote_from_qlib()
# init quote by quote_df
self.quote_cls = quote_cls
self.quote: BaseQuote = self.quote_cls(self.quote_df, freq)
def get_quote_from_qlib(self):
# get stock data from qlib
if len(self.codes) == 0:
self.codes = D.instruments()
self.quote_df = D.features(
self.codes, self.all_fields, self.start_time, self.end_time, freq=self.freq, disk_cache=True
).dropna(subset=["$close"])
self.quote_df.columns = self.all_fields
# check buy_price data and sell_price data
for attr in "buy_price", "sell_price":
pstr = getattr(self, attr) # price string
if self.quote_df[pstr].isna().any():
self.logger.warning("{} field data contains nan.".format(pstr))
# update trade_w_adj_price
if self.quote_df["$factor"].isna().any():
# The 'factor.day.bin' file not exists, and `factor` field contains `nan`
# Use adjusted price
self.trade_w_adj_price = True
self.logger.warning("factor.day.bin file not exists or factor contains `nan`. Order using adjusted_price.")
if self.trade_unit is not None:
self.logger.warning(f"trade unit {self.trade_unit} is not supported in adjusted_price mode.")
else:
# The `factor.day.bin` file exists and all data `close` and `factor` are not `nan`
# Use normal price
self.trade_w_adj_price = False
# update limit
self._update_limit(self.limit_threshold)
# concat extra_quote
if self.extra_quote is not None:
# process extra_quote
if "$close" not in self.extra_quote:
raise ValueError("$close is necessray in extra_quote")
for attr in "buy_price", "sell_price":
pstr = getattr(self, attr) # price string
if pstr not in self.extra_quote.columns:
self.extra_quote[pstr] = self.extra_quote["$close"]
self.logger.warning(f"No {pstr} set for extra_quote. Use $close as {pstr}.")
if "$factor" not in self.extra_quote.columns:
self.extra_quote["$factor"] = 1.0
self.logger.warning("No $factor set for extra_quote. Use 1.0 as $factor.")
if "limit_sell" not in self.extra_quote.columns:
self.extra_quote["limit_sell"] = False
self.logger.warning("No limit_sell set for extra_quote. All stock will be able to be sold.")
if "limit_buy" not in self.extra_quote.columns:
self.extra_quote["limit_buy"] = False
self.logger.warning("No limit_buy set for extra_quote. All stock will be able to be bought.")
assert set(self.extra_quote.columns) == set(self.quote_df.columns) - {"$change"}
self.quote_df = pd.concat([self.quote_df, self.extra_quote], sort=False, axis=0)
LT_TP_EXP = "(exp)" # Tuple[str, str]
LT_FLT = "float" # float
LT_NONE = "none" # none
def _get_limit_type(self, limit_threshold):
"""get limit type"""
if isinstance(limit_threshold, Tuple):
return self.LT_TP_EXP
elif isinstance(limit_threshold, float):
return self.LT_FLT
elif limit_threshold is None:
return self.LT_NONE
else:
raise NotImplementedError(f"This type of `limit_threshold` is not supported")
def _update_limit(self, limit_threshold):
# check limit_threshold
limit_type = self._get_limit_type(limit_threshold)
if limit_type == self.LT_NONE:
self.quote_df["limit_buy"] = False
self.quote_df["limit_sell"] = False
elif limit_type == self.LT_TP_EXP:
# set limit
self.quote_df["limit_buy"] = self.quote_df[limit_threshold[0]]
self.quote_df["limit_sell"] = self.quote_df[limit_threshold[1]]
elif limit_type == self.LT_FLT:
self.quote_df["limit_buy"] = self.quote_df["$change"].ge(limit_threshold)
self.quote_df["limit_sell"] = self.quote_df["$change"].le(-limit_threshold) # pylint: disable=E1130
def _get_vol_limit(self, volume_threshold):
"""
preproccess the volume limit.
get the fields need to get from qlib.
get the volume limit list of buying and selling which is composed of all limits.
Parameters
----------
volume_threshold :
please refer to the doc of exchange.
Returns
-------
fields: set
the fields need to get from qlib.
buy_vol_limit: List[Tuple[str]]
all volume limits of buying.
sell_vol_limit: List[Tuple[str]]
all volume limits of selling.
Raises
------
ValueError
the format of volume_threshold is not supported.
"""
if volume_threshold is None:
return None, None, set()
fields = set()
buy_vol_limit = []
sell_vol_limit = []
if isinstance(volume_threshold, tuple):
volume_threshold = {"all": volume_threshold}
assert isinstance(volume_threshold, dict)
for key in volume_threshold:
vol_limit = volume_threshold[key]
assert isinstance(vol_limit, tuple)
fields.add(vol_limit[1])
if key in ("buy", "all"):
buy_vol_limit.append(vol_limit)
if key in ("sell", "all"):
sell_vol_limit.append(vol_limit)
return buy_vol_limit, sell_vol_limit, fields
def check_stock_limit(self, stock_id, start_time, end_time, direction=None):
"""
Parameters
----------
direction : int, optional
trade direction, by default None
- if direction is None, check if tradable for buying and selling.
- if direction == Order.BUY, check the if tradable for buying
- if direction == Order.SELL, check the sell limit for selling.
"""
if direction is None:
buy_limit = self.quote.get_data(stock_id, start_time, end_time, field="limit_buy", method="all")
sell_limit = self.quote.get_data(stock_id, start_time, end_time, field="limit_sell", method="all")
return buy_limit or sell_limit
elif direction == Order.BUY:
return self.quote.get_data(stock_id, start_time, end_time, field="limit_buy", method="all")
elif direction == Order.SELL:
return self.quote.get_data(stock_id, start_time, end_time, field="limit_sell", method="all")
else:
raise ValueError(f"direction {direction} is not supported!")
def check_stock_suspended(self, stock_id, start_time, end_time):
# is suspended
if stock_id in self.quote.get_all_stock():
return self.quote.get_data(stock_id, start_time, end_time, "$close") is None
else:
return True
def is_stock_tradable(self, stock_id, start_time, end_time, direction=None):
# check if stock can be traded
# same as check in check_order
if self.check_stock_suspended(stock_id, start_time, end_time) or self.check_stock_limit(
stock_id, start_time, end_time, direction
):
return False
else:
return True
def check_order(self, order):
# check limit and suspended
if self.check_stock_suspended(order.stock_id, order.start_time, order.end_time) or self.check_stock_limit(
order.stock_id, order.start_time, order.end_time, order.direction
):
return False
else:
return True
def deal_order(
self,
order,
trade_account: Account = None,
position: BasePosition = None,
dealt_order_amount: defaultdict = defaultdict(float),
):
"""
Deal order when the actual transaction
the results section in `Order` will be changed.
:param order: Deal the order.
:param trade_account: Trade account to be updated after dealing the order.
:param position: position to be updated after dealing the order.
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_val, trade_cost, trade_price
"""
# check order first.
if self.check_order(order) is False:
order.deal_amount = 0.0
# using np.nan instead of None to make it more convenient to should the value in format string
self.logger.debug(f"Order failed due to trading limitation: {order}")
return 0.0, 0.0, np.nan
if trade_account is not None and position is not None:
raise ValueError("trade_account and position can only choose one")
# NOTE: order will be changed in this function
trade_price, trade_val, trade_cost = self._calc_trade_info_by_order(
order, trade_account.current_position if trade_account else position, dealt_order_amount
)
if trade_val > 1e-5:
# If the order can only be deal 0 value. Nothing to be updated
# Otherwise, it will result in
# 1) some stock with 0 value in the position
# 2) `trade_unit` of trade_cost will be lost in user account
if trade_account:
trade_account.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
elif position:
position.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
return trade_val, trade_cost, trade_price
def get_quote_info(self, stock_id, start_time, end_time, method="ts_data_last"):
return self.quote.get_data(stock_id, start_time, end_time, method=method)
def get_close(self, stock_id, start_time, end_time, method="ts_data_last"):
return self.quote.get_data(stock_id, start_time, end_time, field="$close", method=method)
def get_volume(self, stock_id, start_time, end_time, method="sum"):
"""get the total deal volume of stock with `stock_id` between the time interval [start_time, end_time)"""
return self.quote.get_data(stock_id, start_time, end_time, field="$volume", method=method)
def get_deal_price(self, stock_id, start_time, end_time, direction: OrderDir, method="ts_data_last"):
if direction == OrderDir.SELL:
pstr = self.sell_price
elif direction == OrderDir.BUY:
pstr = self.buy_price
else:
raise NotImplementedError(f"This type of input is not supported")
deal_price = self.quote.get_data(stock_id, start_time, end_time, field=pstr, method=method)
if method is not None and (deal_price is None or np.isnan(deal_price) or deal_price <= 1e-08):
self.logger.warning(f"(stock_id:{stock_id}, trade_time:{(start_time, end_time)}, {pstr}): {deal_price}!!!")
self.logger.warning(f"setting deal_price to close price")
deal_price = self.get_close(stock_id, start_time, end_time, method)
return deal_price
def get_factor(self, stock_id, start_time, end_time) -> Union[float, None]:
"""
Returns
-------
Union[float, None]:
`None`: if the stock is suspended `None` may be returned
`float`: return factor if the factor exists
"""
assert start_time is not None and end_time is not None, "the time range must be given"
if stock_id not in self.quote.get_all_stock():
return None
return self.quote.get_data(stock_id, start_time, end_time, field="$factor", method="ts_data_last")
def generate_amount_position_from_weight_position(
self, weight_position, cash, start_time, end_time, direction=OrderDir.BUY
):
"""
The generate the target position according to the weight and the cash.
NOTE: All the cash will assigned to the tadable stock.
Parameter:
weight_position : dict {stock_id : weight}; allocate cash by weight_position
among then, weight must be in this range: 0 < weight < 1
cash : cash
start_time : the start time point of the step
end_time : the end time point of the step
direction : the direction of the deal price for estimating the amount
# NOTE: this function is used for calculating target position. So the default direction is buy
"""
# calculate the total weight of tradable value
tradable_weight = 0.0
for stock_id in weight_position:
if self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
# weight_position must be greater than 0 and less than 1
if weight_position[stock_id] < 0 or weight_position[stock_id] > 1:
raise ValueError(
"weight_position is {}, "
"weight_position is not in the range of (0, 1).".format(weight_position[stock_id])
)
tradable_weight += weight_position[stock_id]
if tradable_weight - 1.0 >= 1e-5:
raise ValueError("tradable_weight is {}, can not greater than 1.".format(tradable_weight))
amount_dict = {}
for stock_id in weight_position:
if weight_position[stock_id] > 0.0 and self.is_stock_tradable(
stock_id=stock_id, start_time=start_time, end_time=end_time
):
amount_dict[stock_id] = (
cash
* weight_position[stock_id]
/ tradable_weight
// self.get_deal_price(
stock_id=stock_id, start_time=start_time, end_time=end_time, direction=direction
)
)
return amount_dict
def get_real_deal_amount(self, current_amount, target_amount, factor):
"""
Calculate the real adjust deal amount when considering the trading unit
:param current_amount:
:param target_amount:
:param factor:
:return real_deal_amount; Positive deal_amount indicates buying more stock.
"""
if current_amount == target_amount:
return 0
elif current_amount < target_amount:
deal_amount = target_amount - current_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return deal_amount
else:
if target_amount == 0:
return -current_amount
else:
deal_amount = current_amount - target_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return -deal_amount
def generate_order_for_target_amount_position(self, target_position, current_position, start_time, end_time):
"""
Note: some future information is used in this function
Parameter:
target_position : dict { stock_id : amount }
current_position : dict { stock_id : amount}
trade_unit : trade_unit
down sample : for amount 321 and trade_unit 100, deal_amount is 300
deal order on trade_date
"""
# split buy and sell for further use
buy_order_list = []
sell_order_list = []
# three parts: kept stock_id, dropped stock_id, new stock_id
# handle kept stock_id
# because the order of the set is not fixed, the trading order of the stock is different, so that the backtest results of the same parameter are different;
# so here we sort stock_id, and then randomly shuffle the order of stock_id
# because the same random seed is used, the final stock_id order is fixed
sorted_ids = sorted(set(list(current_position.keys()) + list(target_position.keys())))
random.seed(0)
random.shuffle(sorted_ids)
for stock_id in sorted_ids:
# Do not generate order for the nontradable stocks
if not self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
continue
target_amount = target_position.get(stock_id, 0)
current_amount = current_position.get(stock_id, 0)
factor = self.get_factor(stock_id, start_time=start_time, end_time=end_time)
deal_amount = self.get_real_deal_amount(current_amount, target_amount, factor)
if deal_amount == 0:
continue
if deal_amount > 0:
# buy stock
buy_order_list.append(
Order(
stock_id=stock_id,
amount=deal_amount,
direction=Order.BUY,
start_time=start_time,
end_time=end_time,
factor=factor,
)
)
else:
# sell stock
sell_order_list.append(
Order(
stock_id=stock_id,
amount=abs(deal_amount),
direction=Order.SELL,
start_time=start_time,
end_time=end_time,
factor=factor,
)
)
# return order_list : buy + sell
return sell_order_list + buy_order_list
def calculate_amount_position_value(
self, amount_dict, start_time, end_time, only_tradable=False, direction=OrderDir.SELL
):
"""Parameter
position : Position()
amount_dict : {stock_id : amount}
direction : the direction of the deal price for estimating the amount
# NOTE:
This function is used for calculating current position value.
So the default direction is sell.
"""
value = 0
for stock_id in amount_dict:
if (
only_tradable is True
and self.check_stock_suspended(stock_id=stock_id, start_time=start_time, end_time=end_time) is False
and self.check_stock_limit(stock_id=stock_id, start_time=start_time, end_time=end_time) is False
or only_tradable is False
):
value += (
self.get_deal_price(
stock_id=stock_id, start_time=start_time, end_time=end_time, direction=direction
)
* amount_dict[stock_id]
)
return value
def _get_factor_or_raise_error(self, factor: float = None, stock_id: str = None, start_time=None, end_time=None):
"""Please refer to the docs of get_amount_of_trade_unit"""
if factor is None:
if stock_id is not None and start_time is not None and end_time is not None:
factor = self.get_factor(stock_id=stock_id, start_time=start_time, end_time=end_time)
else:
raise ValueError(f"`factor` and (`stock_id`, `start_time`, `end_time`) can't both be None")
return factor
def get_amount_of_trade_unit(self, factor: float = None, stock_id: str = None, start_time=None, end_time=None):
"""
get the trade unit of amount based on **factor**
the factor can be given directly or calculated in given time range and stock id.
`factor` has higher priority than `stock_id`, `start_time` and `end_time`
Parameters
----------
factor : float
the adjusted factor
stock_id : str
the id of the stock
start_time :
the start time of trading range
end_time :
the end time of trading range
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
factor = self._get_factor_or_raise_error(
factor=factor, stock_id=stock_id, start_time=start_time, end_time=end_time
)
return self.trade_unit / factor
else:
return None
def round_amount_by_trade_unit(
self, deal_amount, factor: float = None, stock_id: str = None, start_time=None, end_time=None
):
"""Parameter
Please refer to the docs of get_amount_of_trade_unit
deal_amount : float, adjusted amount
factor : float, adjusted factor
return : float, real amount
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
# the minimal amount is 1. Add 0.1 for solving precision problem.
factor = self._get_factor_or_raise_error(
factor=factor, stock_id=stock_id, start_time=start_time, end_time=end_time
)
return (deal_amount * factor + 0.1) // self.trade_unit * self.trade_unit / factor
return deal_amount
def _clip_amount_by_volume(self, order: Order, dealt_order_amount: dict) -> int:
"""parse the capacity limit string and return the actual amount of orders that can be executed.
NOTE:
this function will change the order.deal_amount **inplace**
- This will make the order info more accurate
Parameters
----------
order : Order
the order to be executed.
dealt_order_amount : dict
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
"""
if order.direction == Order.BUY:
vol_limit = self.buy_vol_limit
elif order.direction == Order.SELL:
vol_limit = self.sell_vol_limit
if vol_limit is None:
return order.deal_amount
vol_limit_num = []
for limit in vol_limit:
assert isinstance(limit, tuple)
if limit[0] == "current":
limit_value = self.quote.get_data(
order.stock_id,
order.start_time,
order.end_time,
field=limit[1],
method="sum",
)
vol_limit_num.append(limit_value)
elif limit[0] == "cum":
limit_value = self.quote.get_data(
order.stock_id,
order.start_time,
order.end_time,
field=limit[1],
method="ts_data_last",
)
vol_limit_num.append(limit_value - dealt_order_amount[order.stock_id])
else:
raise ValueError(f"{limit[0]} is not supported")
vol_limit_min = min(vol_limit_num)
orig_deal_amount = order.deal_amount
order.deal_amount = max(min(vol_limit_min, orig_deal_amount), 0)
if vol_limit_min < orig_deal_amount:
self.logger.debug(f"Order clipped due to volume limitation: {order}, {list(zip(vol_limit_num, vol_limit))}")
def _get_buy_amount_by_cash_limit(self, trade_price, cash, cost_ratio):
"""return the real order amount after cash limit for buying.
Parameters
----------
trade_price : float
position : cash
cost_ratio : float
Return
----------
float
the real order amount after cash limit for buying.
"""
max_trade_amount = 0
if cash >= self.min_cost:
# critical_price means the stock transaction price when the service fee is equal to min_cost.
critical_price = self.min_cost / cost_ratio + self.min_cost
if cash >= critical_price:
# the service fee is equal to cost_ratio * trade_amount
max_trade_amount = cash / (1 + cost_ratio) / trade_price
else:
# the service fee is equal to min_cost
max_trade_amount = (cash - self.min_cost) / trade_price
return max_trade_amount
def _calc_trade_info_by_order(self, order, position: Position, dealt_order_amount):
"""
Calculation of trade info
**NOTE**: Order will be changed in this function
:param order:
:param position: Position
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_price, trade_val, trade_cost
"""
trade_price = self.get_deal_price(order.stock_id, order.start_time, order.end_time, direction=order.direction)
total_trade_val = self.get_volume(order.stock_id, order.start_time, order.end_time) * trade_price
order.factor = self.get_factor(order.stock_id, order.start_time, order.end_time)
order.deal_amount = order.amount # set to full amount and clip it step by step
# Clipping amount first
# - It simulates that the order is rejected directly by the exchange due to large order
# Another choice is placing it after rounding the order
# - It simulates that the large order is submitted, but partial is dealt regardless of rounding by trading unit.
self._clip_amount_by_volume(order, dealt_order_amount)
# TODO: the adjusted cost ratio can be overestimated as deal_amount will be clipped in the next steps
trade_val = order.deal_amount * trade_price
if not total_trade_val or np.isnan(total_trade_val):
# TODO: assert trade_val == 0, f"trade_val != 0, total_trade_val: {total_trade_val}; order info: {order}"
adj_cost_ratio = self.impact_cost
else:
adj_cost_ratio = self.impact_cost * (trade_val / total_trade_val) ** 2
if order.direction == Order.SELL:
cost_ratio = self.close_cost + adj_cost_ratio
# sell
# if we don't know current position, we choose to sell all
# Otherwise, we clip the amount based on current position
if position is not None:
current_amount = (
position.get_stock_amount(order.stock_id) if position.check_stock(order.stock_id) else 0
)
if not np.isclose(order.deal_amount, current_amount):
# when not selling last stock. rounding is necessary
order.deal_amount = self.round_amount_by_trade_unit(
min(current_amount, order.deal_amount), order.factor
)
# in case of negative value of cash
if position.get_cash() + order.deal_amount * trade_price < max(
order.deal_amount * trade_price * cost_ratio,
self.min_cost,
):
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cash limitation: {order}")
elif order.direction == Order.BUY:
cost_ratio = self.open_cost + adj_cost_ratio
# buy
if position is not None:
cash = position.get_cash()
trade_val = order.deal_amount * trade_price
if cash < max(trade_val * cost_ratio, self.min_cost):
# cash cannot cover cost
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cost higher than cash: {order}")
elif cash < trade_val + max(trade_val * cost_ratio, self.min_cost):
# The money is not enough
max_buy_amount = self._get_buy_amount_by_cash_limit(trade_price, cash, cost_ratio)
order.deal_amount = self.round_amount_by_trade_unit(
min(max_buy_amount, order.deal_amount), order.factor
)
self.logger.debug(f"Order clipped due to cash limitation: {order}")
else:
# The money is enough
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
# Unknown amount of money. Just round the amount
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
raise NotImplementedError("order type {} error".format(order.type))
trade_val = order.deal_amount * trade_price
trade_cost = max(trade_val * cost_ratio, self.min_cost)
if trade_val <= 1e-5:
# if dealing is not successful, the trade_cost should be zero.
trade_cost = 0
return trade_price, trade_val, trade_cost
def get_order_helper(self) -> OrderHelper:
if not hasattr(self, "_order_helper"):
# cache to avoid recreate the same instance
self._order_helper = OrderHelper(self)
return self._order_helper
|
from urllib.parse import urlencode
import pytest
from django.core.exceptions import ImproperlyConfigured
from saleor.core.emails import get_email_context, prepare_url
def test_get_email_context(site_settings):
site = site_settings.site
expected_send_kwargs = {"from_email": site_settings.default_from_email}
proper_context = {
"domain": site.domain,
"site_name": site.name,
}
send_kwargs, received_context = get_email_context()
assert send_kwargs == expected_send_kwargs
assert proper_context == received_context
def test_email_having_display_name_in_settings(customer_user, site_settings, settings):
expected_from_email = "Info <hello@mirumee.com>"
site_settings.default_mail_sender_name = None
site_settings.default_mail_sender_address = None
settings.DEFAULT_FROM_EMAIL = expected_from_email
assert site_settings.default_from_email == expected_from_email
def test_email_with_email_not_configured_raises_error(settings, site_settings):
"""Ensure an exception is thrown when not default sender is set;
both missing in the settings.py and in the site settings table.
"""
site_settings.default_mail_sender_address = None
settings.DEFAULT_FROM_EMAIL = None
with pytest.raises(ImproperlyConfigured) as exc:
_ = site_settings.default_from_email
assert exc.value.args == ("No sender email address has been set-up",)
def test_prepare_url():
redirect_url = "https://www.example.com"
params = urlencode({"param1": "abc", "param2": "xyz"})
result = prepare_url(params, redirect_url)
assert result == "https://www.example.com?param1=abc¶m2=xyz"
|
from pokermon.poker.board import Board, mkflop
from pokermon.poker.cards import mkcard
from pokermon.poker.deal import FullDeal
from pokermon.poker.evaluation import EvaluationResult
from pokermon.poker.game_runner import GameRunner
from pokermon.poker.hands import HandType, mkhand
from pokermon.poker.result import Result, get_result
def test_game_result():
deal = FullDeal(
hole_cards=[mkhand("AcAh"), mkhand("KdKs"), mkhand("JhJd")],
board=Board(flop=mkflop("6dQc2s"), turn=mkcard("6s"), river=mkcard("3c")),
)
game = GameRunner(starting_stacks=[100, 200, 300])
game.start_game()
# Preflop
game.bet_raise(to=10)
game.call()
game.call()
# Flop
game.bet_raise(to=20)
game.call()
game.call()
# Turn
game.bet_raise(to=30)
game.call()
game.call()
# River
game.check()
game.check()
game.check()
result = get_result(deal, game.game_view())
assert result == Result(
won_hand=[True, False, False],
hand_results=[
EvaluationResult(
hand_type=HandType.TWO_PAIR,
kicker=33686528,
),
EvaluationResult(hand_type=HandType.TWO_PAIR, kicker=16909312),
EvaluationResult(hand_type=HandType.TWO_PAIR, kicker=4326400),
],
went_to_showdown=[True, True, True],
remained_in_hand=[True, True, True],
earned_from_pot=[180, 0, 0],
profits=[120, -60, -60],
)
def test_game_with_tie():
deal = FullDeal(
hole_cards=[mkhand("Ac3h"), mkhand("Ad4s"), mkhand("5s5d")],
board=Board(flop=mkflop("AsAhKc"), turn=mkcard("Kd"), river=mkcard("7h")),
)
game = GameRunner(starting_stacks=[100, 200, 300])
game.start_game()
# Preflop
game.bet_raise(to=30)
game.call()
game.call()
# Flop
game.bet_raise(to=50)
game.call()
game.call()
# Turn
game.bet_raise(to=20)
game.call()
game.call()
# River
game.bet_raise(to=100)
game.call()
result = get_result(deal, game.game_view())
assert result == Result(
won_hand=[True, True, False],
hand_results=[
EvaluationResult(hand_type=HandType.FULL_HOUSE, kicker=33556480),
EvaluationResult(hand_type=HandType.FULL_HOUSE, kicker=33556480),
EvaluationResult(hand_type=HandType.TWO_PAIR, kicker=50331680),
],
remained_in_hand=[True, True, True],
went_to_showdown=[True, True, True],
earned_from_pot=[150, 350, 0],
profits=[50, 150, -200],
)
|
import requests
from jikanpy import Jikan
from animeoffline import *
from animeoffline import config
from animeoffline import utils
# if anime_offline/config.py is unedited, this assumes a local instance
# of jikan-rest is running on your system
# https://github.com/jikan-me/jikan-rest
jikan = Jikan(selected_base=config.jikan_url)
|
#!/bin/usr/env python3
import argparse, sys
import time, os
from rclpy.qos import qos_profile_system_default
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
from rclpy.utilities import remove_ros_args
from rclpy.node import Node
import rclpy
import nudged
# from rmf_task_msgs.msg import Loop
from rmf_fleet_msgs.msg import Location as rmf_loc
from rmf_fleet_msgs.msg import FleetState, RobotState, RobotMode
from rmf_fleet_msgs.msg import PathRequest, ModeRequest, DestinationRequest, ModeParameter
from rna_task_msgs.msg import RnaTask, RnaTaskstatus, Location, RnaEmergency, RnaPredefinepos, RnaVsm
# the robot
ROBOT_UNIQUE_ID = os.environ.get('ROBOT_ID', 'RNAxx')
# topic define:
RMF_TASK = '/rna_task'
RMF_TASK_STATUS = '/rna_task_status'
RMF_VSM_RECORD = '/rna_vsm_record'
RMF_PARSE_REQUESTS = '/parse_requests'
RMF_FLEET_STATES = '/fleet_states'
RMF_MODE_REQUESTS = '/mode_requests'
RMF_PATH_REQUESTS = '/path_requests'
RMF_DESTINATION_REQUESTS = '/destination_requests'
# some navigation points hardcode below, to be changed accordingly
# x y heading
NURSE_STATION = [0.581, 19.511, -1.511]
# x y heading
HOME_POSITION = [-2.912, 18.559, 1.561]
# x y heading bed_heading
Neutral_point = [-2.324, 21.091, 1.557, 1.557]
# x y heading bed_heading
Bedside_left = [-3.170, 21.261, 0.9025, 1.557]
# x y heading bed_heading
Bedside_right = [-1.485, 21.229, 2.143, 1.557]
# def parse_argv(argv=sys.argv[1:]):
# parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument("-task_id", "--task_id", type=str, help="task id", default='T001')
# parser.add_argument("-task_name", "--task_name", type=str, help="task name", default='VSM')
# parser.add_argument("-name", "--patient_name", type=str, help="patient name", default='John')
# parser.add_argument("-qr", "--qr_code", type=str, help="qr code value", default='1321351')
# parser.add_argument("-item", "--item_name", type=str, help="item name", default='water')
# parser.add_argument("-load", "--load_operation", type=str, help="open, or close compartment", default='open')
# parser.add_argument("-patient_id", "--patient_id", type=int, help="patient_id", default=2)
# parser.add_argument("-robot", "--robot_id", type=str, help="robot id", default=ROBOT_UNIQUE_ID)
# parser.add_argument("-timer", "--enable_timer", type=int, help="enable_timer for debugging", default=1)
# parser.add_argument("-status", "--show_fleet_state", type=int, help="show_fleet_state msg", default=0)
# parser.add_argument("-bed", "--bed", type=int, help="bed number: i.e 1, 2 or...", default=1)
# parser.add_argument("-escape", "--escape_on", type=int, help="1 is on , 0 is off", default=1)
# parser.add_argument("-topic", "--rmf_topic", type=int, help="1=RnaTask, 2=ModeRequest, 3=PathRequest, 4=DestinationRequest", default=1)
# parser.add_argument("-mode", "--mode", type=int, help="robot mode", default=1)
# parser.add_argument("-schedule_type", "--schedule_type", type=str, help="schedule_type", default='NONE_SCHEDULE')
# parser.add_argument("-schedule_time", "--schedule_time", type=str, help="in format of yyyy-mm-dd hh:mm:ss", default='yyyy-mm-dd hh:mm:ss')
# parser.add_argument(
# 'argv', nargs=argparse.REMAINDER,
# help='Pass arbitrary arguments to the executable')
# argv = remove_ros_args(args=argv)
# args = parser.parse_args(argv)
# return args
class Simple_Pub_Sub(Node):
def __init__(self, rna_task): # enable_timer=True, show_fleet_state=True):#, rmf_topic=1):
super().__init__('simple_pub')
qos_reliable = QoSProfile(
depth=10,
reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_RELIABLE)
# Requires four x,y points
# x , y
self.ref_coordinates_rna = [[NURSE_STATION[0], NURSE_STATION[1]],
[HOME_POSITION[0], HOME_POSITION[1]],
[Neutral_point[0], Neutral_point[1]],
[Bedside_right[0], Bedside_right[1]]]
self.ref_coordinates_rmf = [[75.787, -23.029],
[73.013, -23.350],
[73.537, -20.965],
[74.57, -20.78]]
self.rmf2rna_transform = nudged.estimate(
self.ref_coordinates_rmf,
self.ref_coordinates_rna
)
self.rna2rmf_transform = nudged.estimate(
self.ref_coordinates_rna,
self.ref_coordinates_rmf
)
self.rna_task = RnaTask()
self.enable_timer = enable_timer
self.show_fleet_state = show_fleet_state
self.fleet_name='rna'
self.path_requests = PathRequest()
# Publishers and subscribers
self.pub_rna_task = self.create_publisher(RnaTask, RMF_TASK, qos_profile=qos_reliable) # send the task to robot
# receive vsm record from Robot
self.create_subscription(
RnaVsm, RMF_VSM_RECORD, self.vsm_record_callback, qos_profile=qos_reliable)
# receive task status from Robot
self.create_subscription(
RnaTaskstatus, RMF_TASK_STATUS, self.rmf_task_status_callback, qos_profile=qos_reliable)
if self.enable_timer:
timer_period = 1.0
self.tmr = self.create_timer(timer_period, self.rna_task_callback)
self.create_subscription(
PathRequest, RMF_PATH_REQUESTS, self.path_req_callback, qos_profile=qos_reliable)
def vsm_record_callback(self, vsm_record):
print(vsm_record.robot_name, vsm_record.patient_id, vsm_record.record_time, vsm_record.heart_rate)
print(vsm_record.blood_pressure, vsm_record.temperature, vsm_record.respiration_rate, vsm_record.spo2, vsm_record.pain_score)
def rmf_task_status_callback(self, rmf_status):
print(rmf_status.task_id, rmf_status.status, rmf_status.started_time, rmf_status.ended_time, rmf_status.description)
def rna_task_callback(self, args):
home = HOME_POSITION
PRDEF_POS = (Bedside_left, Neutral_point, Bedside_right)
task = RnaTask()
# common parameters for all the tasks
task.task_id = args.task_id #'T001'
task.task_name = args.task_name
task.robot_name = args.robot_id
# home position
loc = Location()
loc.x = home[0]
loc.y = home[1]
loc.heading = home[2]
task.home_position = loc
if task.task_name in ('VSM', 'MEDICINE_DELIVERY', 'ITEM_DELIVERY'):
task.bed_id = args.bed
task.patient_name = args.patient_name
task.patient_id = args.patient_id
task.barcode = args.qr_code
task.item_name = args.item_name
# schedule task checking
task.schedule_type = args.schedule_type
if task.schedule_type != 'NONE_SCHEDULE': # if it's not NONE_SCHEDULE, the field of task.schedule_time need to be specified
task.schedule_time = args.schedule_time # in format of "yyyy-mm-dd hh:mm:ss"
# predefined patient engage points
for pos in PRDEF_POS:
prepos = RnaPredefinepos()
loc = Location()
loc.x = pos[0]
loc.y = pos[1]
loc.heading = pos[2]
prepos.point=loc
prepos.bed_heading= pos[3]
task.pre_def_pos.append(prepos)
elif task.task_name == 'GO_NURSE_STATION':
loc = Location()
loc.x = NURSE_STATION[0]
loc.y = NURSE_STATION[1]
loc.heading = NURSE_STATION[2]
task.nurse_station = loc
elif task.task_name == 'CODE_RED_BLUE':
escape = RnaEmergency()
loc = Location()
loc.x = home[0] # hardcode here for temp testing, to be defined the emergency holding point
loc.y = home[1]
loc.heading = home[2]
escape.point = loc
escape.emergency_on = bool(args.escape_on) # True/False to switch it on/off
task.escape = escape
elif task.task_name == 'LOAD_ITEM':
task.item_name = args.item_name
task.load_operation = args.load_operation
#elif task.task_name == 'CANCEL_TASK' or task.task_name == 'GO_HOME':
#pass
print("debug: timer_callbak, topic issued only once")
self.tmr.cancel()
# On reciept of a path_request. Transform Locations to RNA plane
# Set the path request destination to be a task name
self.pub_rna_task.publish(self.rna_task)
def path_req_callback(self, msg: PathRequest, rna_task):
self.path_requests = msg
pprint(self.path_requests)
# Removing all the points in path except the last one for now.
holder = self.path_requests.path[-1]
print('debug: holder has value of', holder)
self.path_requests.path.clear()
self.path_requests.path.append(holder)
# Only assign a rna task when path request is recieved
self.rna_task = rna_task
def main(argv=sys.argv[1:]):
args = parse_argv()
rclpy.init(args=argv)
node = Simple_Pub_Sub(task)
try:
rclpy.spin(node)
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import dace.sdfg.nodes as nodes
from dace.fpga_testing import xilinx_test
import importlib.util
import numpy as np
from pathlib import Path
@xilinx_test()
def test_map_unroll_processing_elements():
# Grab the systolic GEMM implementation the samples directory
spec = importlib.util.spec_from_file_location(
"gemm",
Path(__file__).parent.parent.parent / "samples" / "fpga" / "gemm_systolic_vectorized.py")
gemm = importlib.util.module_from_spec(spec)
spec.loader.exec_module(gemm)
N = 128
K = 256
M = 512
P = 8
W = 4
TN = 32
TM = 128
# Create an SDFG with multiple processing elements
sdfg = gemm.make_sdfg("map_unroll_processing_elements", dace.vector(dace.float32, W))
sdfg.specialize({"P": P, "W": W, "TN": TN, "TM": TM})
for state in sdfg.states():
for node in state.nodes():
if isinstance(node, nodes.MapEntry) and node.params == ["p"]:
node.unroll = False
node.schedule = dace.ScheduleType.Unrolled
# Initialize arrays: Randomize A and B, zero C
A = np.ndarray([N, K], dtype=dace.float32.type)
B = np.ndarray([K, M], dtype=dace.float32.type)
C = np.ndarray([N, M], dtype=dace.float32.type)
A[:] = np.random.rand(N, K).astype(dace.float32.type)
B[:] = np.random.rand(K, M).astype(dace.float32.type)
C[:] = np.random.rand(N, M).astype(dace.float32.type)
C_regression = A @ B + C
sdfg(A=A, B=B, C=C, N=N, M=M, K=K)
diff = np.linalg.norm(C_regression - C) / float(N * M)
if not np.allclose(C_regression, C):
raise ValueError("Verification failed.")
return sdfg
if __name__ == "__main__":
test_map_unroll_processing_elements(None)
|
class Component:
def __init__(self, start_node, end_node, val):
self.symbol = ''
self.type = 'generic'
self.start_node = start_node
self.end_node = end_node
self.val = val
self.id = -1
def is_node(self, node):
if (self.start_node == node) or (self.end_node == node):
return True
else:
return False
def set_id(self, id_num):
self.id = id_num
def get_dir(self, node):
if self.start_node == node:
return 1
elif self.end_node == node:
return -1
else:
return 'error'
def get_other_node(self, node):
if self.start_node != node and self.end_node == node:
return self.start_node
elif self.end_node != node and self.start_node == node:
return self.end_node
else:
return -1
class Resistor(Component):
def __init__(self, start_node, end_node, val):
super().__init__(start_node, end_node, val)
self.symbol = 'R'
self.type = 'passive'
class Capacitor(Component):
def __init__(self, start_node, end_node, val):
super().__init__(start_node, end_node, val)
self.symbol = 'C'
self.type = 'passive'
class VSource(Component):
def __init__(self, start_node, end_node, val):
super().__init__(start_node, end_node, val)
self.symbol = 'V'
self.type = 'active'
class ISource(Component):
def __init__(self, start_node, end_node, val):
super().__init__(start_node, end_node, val)
self.symbol = 'I'
self.type = 'active'
|
# Generated by Django 3.0.6 on 2020-05-25 22:47
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0013_auto_20200525_2239'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='birth_year',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1900), django.core.validators.MaxValueValidator(2020)], verbose_name='Year of birth'),
),
migrations.AlterField(
model_name='patient',
name='sex',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Female'), (2, 'Male'), (3, 'Other')], null=True),
),
]
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from threading import Lock
class _Status(object):
OPEN = object()
CLOSED = object()
SUSPENDED = object()
class _ScheduleState(object):
IDLE = 1
SCHEDULED = 2
class InternalMailbox(object):
def __init__(self, dispatcher, actor, throughput=5):
# Should only be set in message handler
self._primary_status = _Status.OPEN
self._idle = _ScheduleState.IDLE
self._idle_lock = Lock()
self._message_queue = deque()
self._system_message_queue = deque()
self._actor = actor
self._dispatcher = dispatcher
self._throughput = throughput
def enqueue(self, item):
self._message_queue.append(item)
def enqueue_system(self, item):
self._system_message_queue.append(item)
def close(self):
""" Stop processing of all messages."""
self._primary_status = _Status.CLOSED
def suspend(self):
""" Stop processing of user message. """
self._primary_status = _Status.SUSPENDED
def resume(self):
""" Continue processing of user message. """
self._primary_status = _Status.OPEN
def is_closed(self):
return self._primary_status is _Status.CLOSED
def is_suspended(self):
return self._primary_status is _Status.SUSPENDED
def has_messages(self):
return len(self._message_queue) > 0 or len(self._system_message_queue) > 0
def is_idle(self):
return self._idle
def is_scheduled(self):
return not self._idle
def set_scheduled(self):
"""
Returns True if state was successfully changed from idle to scheduled.
"""
with self._idle_lock:
if self._idle:
self._idle = False
return True
return False
def set_idle(self):
with self._idle_lock:
self._idle = True
def process_messages(self):
if self._primary_status is _Status.CLOSED or not self.has_messages():
return
self._process_system_message()
if self._primary_status is _Status.OPEN:
for _ in range(min(self._throughput, len(self._message_queue))):
self._actor.handle_message(self._message_queue.popleft())
self._process_system_message()
if self._primary_status is not _Status.OPEN:
break
self.set_idle()
self._dispatcher.schedule_execution(self)
def _process_system_message(self):
if len(self._system_message_queue) > 0:
self._actor.handle_system_message(self._system_message_queue.popleft())
def flush_messages(self):
if not self.is_closed():
raise Exception()
messages = []
while len(self._message_queue) > 0:
messages.append(self._message_queue.popleft())
return messages
|
# class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def solve(self, root):
# Write your code here
siblings = []
count = 0
if root:
siblings.append(root)
while siblings:
new = []
for node in siblings:
if node.left is None and node.right:
count += 1
if node.right is None and node.left:
count += 1
if node.left:
new.append(node.left)
if node.right:
new.append(node.right)
siblings = new
return count
|
from tests import TestCase
from unittest import skip
class TestMine(TestCase):
def test_my_account_as_anonymous(self):
self.assertLogin('/user')
def test_my_account_as_gateway(self):
self.login('main-gateway1@example.com', 'admin')
html = self.assertOk('/user')
response = self.client.post(html.find('//form').get('action'), data={'email': 'another@example.com'}, follow_redirects=True)
assert 'Update successful' in str(response.get_data())
@skip('getting a 400 status, suspect file issues, can\'t duplicate irl')
def test_my_gateway_as_gateway(self):
self.login('main-gateway1@example.com', 'admin')
html = self.assertOk('/gateway')
form = html.find('//form')
response = self.client.post(form.get('action'),
content_type=form.get('enctype'),
data={'id': 'main-gateway1', 'title': 'Another Title', 'logo': ''},
follow_redirects=True)
output = str(response.get_data())
assert 'Update Another Title successful' in output
def test_my_network_as_gateway(self):
self.login('main-gateway1@example.com', 'admin')
self.assertForbidden('/network')
def test_my_account_as_network(self):
self.login('main-network@example.com', 'admin')
html = self.assertOk('/user')
response = self.client.post(html.find('//form').get('action'), data={'email': 'another@example.com'}, follow_redirects=True)
assert 'Update successful' in str(response.get_data())
def test_my_gateway_as_network(self):
self.login('main-network@example.com', 'admin')
self.assertForbidden('/gateway')
def test_my_network_as_network(self):
self.login('main-network@example.com', 'admin')
html = self.assertOk('/network')
response = self.client.post(html.find('//form').get('action'), data={'id': 'main-network', 'title': 'Another Title'}, follow_redirects=True)
assert 'Update successful' in str(response.get_data())
|
from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
def index(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('qa:index'))
else:
return render(request, 'web/index.html')
|
"""
Package for vectorized test particle simulation
Author : Tien Vo
Date : 12-29-2021
"""
from .plasma_parameters import *
from .hdf5_helpers import *
from .distribution import *
from .conversions import *
from .mpi_helpers import *
from .constants import *
from .whistler import *
from .pusher import *
from .chaos import *
from .misc import *
|
# Problem Name: Tandem Bicycle
# Problem Description:
# A tandem bicycle is a bicycle that's operated by two people: person A and person B. Both peope pedal the bicycle, but the person that pedals faster dictates the speed of the bicycle. So if person A is pedals at a speed of 5, and person B pedals at a speed of 4, the tandem bicycle moves at a speed of 5(i.e tandem speed = max(speedA, speedB)).
# You're given two lists of positive integers: one that contains the speeds of riders wearing red-shirts and one that contains the speeds of riders wearing blue-shirts. Each rider is represented by a single positive integer, which is the speed that they pedal a tandem bicycle at. Both lists have the same length, meaning that there are as many red-shirt riders as there are blue-shirt riders. Your goal is to pair every rider wearing a red-shirt with rider wearing a blue-shirt to operate a tandem bicycle.
# Write a function that returns the maximum possible total speed or the minimum possible total speed of all the tandem bicycles being ridden based on an input parameters, fastest. If fastest = True, your function should return the maximum possible total speed; otherwise it should return the minimum total speed.
# "Total Speed" is defined as the sum of the speeds of all the tandem bicycle being ridden. For example, if there are 4 riders (2 red-shirt riders and 2 blue-shirt riders) who have speeds of 1,3,4,5, and if they're paired on tandem bicycles as follows: [1, 4], [5, 3], then the total speed of these tandem bicyles is 4 + 5 = 9.
####################################
# Sample Input:
# redShirtSpeeds = [5, 5, 3, 9, 2]
# blueShirtSpeeds = [3, 6, 7, 2, 1]
# fastest = true
# Sample Output: 32
####################################
"""
Explain the solution:
- The brute-force approach to solve this problem is to generate every possible set of pairs of riders and to determine the total speed that each of these sets generates. This solution does not work but, it isn't optimal. Can you think of better way to solve this problem?
- Try looking at the input arrays in sorted order. How might this help you solve the problem?
- When generating the maximum total speed, you want to pair the slowest red-shirt riders with the fastest blue-shirt riders and vice versa, so as to always take advantage of the largest speeds. When generating the minimum total speed, you want to pair the fastest red-shirt riders with the fastest blue-shirt riders, so as to "eliminate" a large speed by pairing it with a another large(larger) speed.
- Sort the input arrays in place, and follow the strategy discussed in Hint #3. With the inputs sorted, you can find the slowest and largest speeds from each shirt color in constant time.
- O(n(log(n)) time | O(1) space - where n is the length of the tandem bicycles
##################
Detailed explanation of the Solution:
create a function of tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
sort the redShirtSpeeds and blueShirtSpeeds arrays in place
if not fastest:
call the function reverseArrayInPlace(redShirtSpeeds)
totalSpeed is initialized to 0
for idx in range(len(redShirtSpeeds)):
rider1 = redShirtSpeeds[idx] # array in sorted ascending order
rider2 = blueShirtSpeeds[len(blueShirtSpeeds) - 1 - idx] # Reverse the blueShirtSpeeds array in descending order
totalSpeed += max(rider1, rider2)
return totalSpeed
create a function of reverseArrayInPlace(array):
start = 0
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
"""
####################################
def tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
redShirtSpeeds.sort()
blueShirtSpeeds.sort()
if not fastest:
reverseArrayInPlace(redShirtSpeeds)
totalSpeed = 0
for idx in range(len(redShirtSpeeds)):
rider1 = redShirtSpeeds[idx] # array in sorted ascending order
rider2 = blueShirtSpeeds[len(blueShirtSpeeds) - 1 - idx] # Reverse the blueShirtSpeeds array in descending order
totalSpeed += max(rider1, rider2)
return totalSpeed
def reverseArrayInPlace(array):
start = 0
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
print(tandemBicycle([5, 5, 3, 9, 2], [3, 6, 7, 2, 1], True))
|
"""
Convenience functions and sample script for converting
Freesurfer annot files to a set of VTKs (and manifest file)
for use with the roygbiv web tool.
"""
import glob
import os
import json
import numpy as np
import nibabel as nib
HTML_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'web'))
DATA_DIR = os.environ.get('ROYGBIV_PATH', os.path.join(HTML_DIR, 'data'))
def downsample_vtk(vtk_file, sample_rate):
"""Sample rate: number between 0 and 1."""
from mindboggle.mio.vtks import read_vtk, write_vtk
from mindboggle.guts.mesh import decimate_file
if (sample_rate < 0 or sample_rate > 1):
raise ValueError('0 <= sample_rate <= 1; you input %f' % sample_rate)
# Downsample
decimate_file(vtk_file, reduction=1 - sample_rate, output_vtk=vtk_file,
save_vtk=True, smooth_steps=0)
# Hack to re-save in
vtk_data = read_vtk(vtk_file)
write_vtk(vtk_file, *vtk_data[:-2])
def add_metadata(metadata, json_file='files_to_load.json',
output_dir=DATA_DIR):
"""Additional metadata to insert into the manifest file."""
json_filepath = os.path.join(output_dir, json_file)
with open(json_filepath, 'rb') as fp:
old_metadata = json.load(fp)
old_metadata.update(metadata)
with open(json_filepath, 'wb') as fp:
json.dump(old_metadata, fp)
def freesurfer_annot_to_vtks(surface_file, label_file, output_stem='',
json_file=None,
sample_rate=1,
force=False, verbose=True, output_dir=DATA_DIR):
""" Splits a surface file into vtk files based on regions in the label file.
"""
def print_verbose(*args):
"""Print only if verbose True"""
if verbose:
print(args)
if json_file is None:
json_file = output_stem + 'files_to_download.json'
#
vtk_dir = os.path.join(output_dir, os.path.dirname(output_stem))
# Make the output directory
if not os.path.exists(output_dir):
os.makedirs(vtk_dir)
# Convert the surface file to vtk
if os.path.splitext(surface_file)[1] == '.vtk':
surface_vtk = surface_file
else:
surface_vtk = os.path.join(vtk_dir,
os.path.basename(surface_file) + '.vtk')
if force or not os.path.exists(surface_vtk):
print_verbose('Converting surface to vtk: %s' % surface_file)
from mindboggle.mio.vtks import freesurfer_surface_to_vtk
freesurfer_surface_to_vtk(surface_file, surface_vtk)
# Convert the data file to vtk
if os.path.splitext(label_file)[1] == '.vtk':
label_vtk = label_file
labels, names = None, None
else:
label_vtk = os.path.join(vtk_dir,
os.path.basename(label_file) + '.vtk')
if force or not os.path.exists(label_vtk):
print_verbose('Converting data to vtk: %s' % label_file)
from mindboggle.mio.vtks import freesurfer_annot_to_vtk
freesurfer_annot_to_vtk(label_file, surface_vtk, label_vtk)
labels, _, names = nib.freesurfer.read_annot(label_file)
used_labels = np.unique(labels[labels >= 1])
used_names = np.asarray(names)[used_labels]
print_verbose("Unused areas: %s" % (set(names) - set(used_names)))
names = used_names
labels = used_labels
# Expand the data file to multiple vtks
print_verbose('Expanding vtk data to multiple files.')
from mindboggle.mio.vtks import explode_scalars
explode_output_stem = os.path.join(output_dir, output_stem)
explode_scalars(label_vtk, output_stem=explode_output_stem)
output_vtks = filter(lambda p: p not in [surface_vtk, label_vtk],
glob.glob(explode_output_stem + '*.vtk'))
print_verbose('Downsampling vtk files.')
for vtk_file in output_vtks:
downsample_vtk(vtk_file, sample_rate=sample_rate)
print_verbose('Creating download manifest file.')
if labels is None:
names = labels = [os.path.splitext(vtk_file)[0]
for vtk_file in output_vtks]
vtk_dict = dict([(name, output_stem + '%s.vtk' % lbl)
for lbl, name in zip(labels, names)])
json_file = os.path.join(output_dir, json_file)
with open(json_file, 'wb') as fp:
json.dump(dict(filename=vtk_dict), fp)
return json_file
def atlas2aparc(atlas_name, hemi=None):
""" Find freesurfer atlas aparc from atlas key.
Valid keys: desikan, destrieux, dkt
if `hemi` is specified, it a valid filename will be returned;
otherwise a format string will be returned."""
if atlas_name == 'desikan':
annot_file_template = '%s.aparc.annot'
elif atlas_name == 'destrieux':
annot_file_template = '%s.aparc.a2009s.annot'
elif atlas_name == 'dkt':
annot_file_template = '%s.aparc.DKTatlas40.annot'
else:
raise ValueError('Unknown atlas: %s' % atlas_name)
return annot_file_template % (hemi if hemi else '%s')
def dump_vtks(subject_path, atlas_name, sample_rate=1, surface='pial',
force=False, output_dir=DATA_DIR):
""" Convenience function to dump vtk parcels for each hemisphere."""
all_data = dict(filename=dict())
for hemi in ['lh', 'rh']:
surface_file = os.path.join(subject_path, 'surf', '%s.%s' % (
hemi, surface))
label_file = os.path.join(subject_path, 'label',
atlas2aparc(atlas_name, hemi=hemi))
json_file = freesurfer_annot_to_vtks(
surface_file, label_file, output_stem='%s_' % hemi,
json_file='%s_files_to_load.json' % hemi,
sample_rate=sample_rate, force=force, output_dir=output_dir)
with open(json_file, 'rb') as fp:
hemi_files = json.load(fp)['filename']
for key, val in hemi_files.items():
hemi_key = '%s_%s' % (hemi, key)
all_data['filename'][hemi_key] = val
# Create a unified json file for lh/rh
with open('files_to_load.json', 'wb') as fp:
json.dump(all_data, fp)
|
#!/usr/bin/env python
import os
import scipy.stats as stats
import numpy as np
import pandas as pd
import ndjson
from pandas.io.json import json_normalize
import statsmodels.stats.multitest as mt
# The schema of the cpg genotype file is the following and the header is included
# snp_id
# chr
# asm_region_inf
# asm_region_sup
# ref_reads
# alt_reads
# effect
# ref: array of 'methyl', which is the methyl % of each REF reaf
# alt: array of 'methyl', which is the methyl % of each ALT reaf
# nb_cpg
# nb_sig_cpg
# cpg: array of pos, effect, fisher_pvalue, ref_cov, and alt_cov
INPUT_FILE = os.environ['ASM_REGION']
OUTPUT_FILE = os.environ['ASM_REGION_PVALUE']
P_VALUE = float(os.environ['P_VALUE'])
BH_THRESHOLD = float(os.environ['BH_THRESHOLD'])
# load from file-like objects
with open(INPUT_FILE) as f:
data = ndjson.load(f)
# Converte the JSON file in dataframe
df = json_normalize(data)
################################## Calculate p-value of asm_region
# Function to extract Wilcoxon p-value (5-digit rounding)
def wilcoxon_pvalue(row):
try:
_, pvalue = stats.mannwhitneyu(json_normalize(row['ref']), json_normalize(row['alt']), alternative = "two-sided")
return round(pvalue,5)
# If the ref and alt datasets are equal or one is included in the other one:
except ValueError:
return 1
# Create a column with the p-value
df['wilcoxon_pvalue'] = df.apply(wilcoxon_pvalue, axis = 1)
################################## Calculate p-value corrected for multiple testing using Benjamini–Hochberg
df['wilcoxon_corr_pvalue'] = mt.multipletests(df['wilcoxon_pvalue'], alpha = BH_THRESHOLD, method = 'fdr_bh')[1]
df['wilcoxon_corr_pvalue'] = df['wilcoxon_corr_pvalue'].round(5)
################################## Calculate number of significant consecutive CpGs in the same direction.
# Find consecutive significant ASM CpGs that are negative
def consecutive_neg_cpg(row):
if int(row['nb_sig_cpg']) > 1 :
flat_cpg = json_normalize(row['cpg'])
max_nb_consec = 0
current_nb_consec = 0
for index, row in flat_cpg.iterrows():
if (index > 0):
if (flat_cpg.iloc[index-1].fisher_pvalue < P_VALUE and
row.fisher_pvalue < P_VALUE and
np.sign(flat_cpg.iloc[index-1].effect) == -1 and
np.sign(row.effect) == -1):
if (current_nb_consec == 0):
current_nb_consec = 2
else:
current_nb_consec = current_nb_consec + 1
max_nb_consec = max(max_nb_consec, current_nb_consec)
else:
current_nb_consec = 0
return max_nb_consec
else:
return 0
# Find consecutive significant ASM CpGs that are negative
def consecutive_pos_cpg(row):
if int(row['nb_sig_cpg']) > 1 :
flat_cpg = json_normalize(row['cpg'])
max_nb_consec = 0
current_nb_consec = 0
for index, row in flat_cpg.iterrows():
if (index > 0):
if (flat_cpg.iloc[index-1].fisher_pvalue < P_VALUE and
row.fisher_pvalue < P_VALUE and
np.sign(flat_cpg.iloc[index-1].effect) == 1 and
np.sign(row.effect) == 1):
if (current_nb_consec == 0):
current_nb_consec = 2
else:
current_nb_consec = current_nb_consec + 1
max_nb_consec = max(max_nb_consec, current_nb_consec)
else:
current_nb_consec = 0
return max_nb_consec
else:
return 0
# Create a column with the number of consecutive CpGs that have significant ASM in the same direction
df['nb_consec_pos_sig_asm'] = df.apply(consecutive_pos_cpg, axis = 1)
df['nb_consec_neg_sig_asm'] = df.apply(consecutive_neg_cpg, axis = 1)
# Sort the dataframe per the chromosome column (pushing Y chromosomes first) to avoid BigQuery treat the chr column as integers
df_sorted = df.sort_values(by=['chr'], ascending = False)
################################## Save file in JSON format
# Save to JSON
df.to_json(OUTPUT_FILE, orient = "records", lines = True)
|
import numpy as np
import torch
from pytorch3d.transforms import quaternion_multiply, quaternion_apply
class Skeleton:
def __init__(self, offsets, parents, device, joints_left=None, joints_right=None):
assert len(offsets) == len(parents)
self._offsets = torch.tensor(offsets, dtype=torch.float32, device=device)
self._parents = torch.tensor(parents, dtype=torch.int8, device=device)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def cuda(self):
self._offsets = self._offsets.cuda()
return self
def num_joints(self):
return self._offsets.shape[0]
def offsets(self):
return self._offsets
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def forward_kinematics(self, rotations, root_positions):
"""
Perform forward kinematics using the given trajectory and local rotations.
Arguments (where N = batch size, L = sequence length, J = number of joints):
-- rotations: (N, L, J, 4) tensor of unit quaternions describing the local rotations of each joint.
-- root_positions: (N, L, 3) tensor describing the root joint positions.
"""
assert len(rotations.shape) == 4
assert rotations.shape[-1] == 4
positions_world = []
rotations_world = []
expanded_offsets = self._offsets.expand(rotations.shape[0], rotations.shape[1],
self._offsets.shape[0], self._offsets.shape[1])
# Parallelize along the batch and time dimensions
for i in range(self._offsets.shape[0]):
if self._parents[i] == -1:
positions_world.append(root_positions)
rotations_world.append(rotations[:, :, 0])
else:
positions_world.append(quaternion_apply(rotations_world[self._parents[i]], expanded_offsets[:, :, i]) \
+ positions_world[self._parents[i]])
if self._has_children[i]:
rotations_world.append(quaternion_multiply(rotations_world[self._parents[i]], rotations[:, :, i]))
else:
# This joint is a terminal node -> it would be useless to compute the transformation
rotations_world.append(None)
return torch.stack(positions_world, dim=3).permute(0, 1, 3, 2)
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
|
from collections import OrderedDict
import param
from ..io.resources import bundled_files
from ..reactive import ReactiveHTML
from ..util import classproperty
from .grid import GridSpec
class GridStack(ReactiveHTML, GridSpec):
"""
The GridStack layout builds on the GridSpec component and
gridstack.js to allow resizing and dragging items in the grid.
"""
allow_resize = param.Boolean(default=True, doc="""
Allow resizing the grid cells.""")
allow_drag = param.Boolean(default=True, doc="""
Allow dragging the grid cells.""")
state = param.List(doc="""
Current state of the grid (updated as items are resized and
dragged).""")
width = param.Integer(default=None)
height = param.Integer(default=None)
_template = """
<div id="grid" class="grid-stack">
{% for key, obj in objects.items() %}
<div data-id="{{ id(obj) }}" class="grid-stack-item" gs-h="{{ (key[2] or nrows)-(key[0] or 0) }}" gs-w="{{ (key[3] or ncols)-(key[1] or 0) }}" gs-y="{{ (key[0] or 0) }}" gs-x="{{ (key[1] or 0) }}">
<div id="content" class="grid-stack-item-content">${obj}</div>
</div>
{% endfor %}
</div>
""" # noqa
_scripts = {
'render': ["""
const options = {
column: data.ncols,
disableResize: !data.allow_resize,
disableDrag: !data.allow_drag,
margin: 0
}
if (data.nrows)
options.row = data.nrows
if (model.height)
options.cellHeight = Math.floor(model.height/data.nrows)
const gridstack = GridStack.init(options, grid);
function sync_state() {
const items = []
for (const node of gridstack.engine.nodes) {
items.push({id: node.el.getAttribute('data-id'), x0: node.x, y0: node.y, x1: node.x+node.w, y1: node.y+node.h})
}
data.state = items
}
gridstack.on('resizestop', (event, el) => {
window.dispatchEvent(new Event("resize"));
sync_state()
})
gridstack.on('dragstop', (event, el) => {
sync_state()
})
sync_state()
state.gridstack = gridstack
"""],
'allow_drag': ["state.gridstack.enableMove(data.allow_drag)"],
'allow_resize': ["state.gridstack.enableResize(data.allow_resize)"],
'ncols': ["state.gridstack.column(data.ncols)"],
'nrows': ["""
state.gristack.opts.row = data.nrows
if (data.nrows && model.height)
state.gridstack.cellHeight(Math.floor(model.height/data.nrows))
else
state.gridstack.cellHeight('auto')
"""]
}
__css_raw__ = [
'https://cdn.jsdelivr.net/npm/gridstack@4.2.5/dist/gridstack.min.css',
'https://cdn.jsdelivr.net/npm/gridstack@4.2.5/dist/gridstack-extra.min.css'
]
__javascript_raw__ = [
'https://cdn.jsdelivr.net/npm/gridstack@4.2.5/dist/gridstack-h5.js'
]
__js_require__ = {
'paths': {
'gridstack': 'https://cdn.jsdelivr.net/npm/gridstack@4.2.5/dist/gridstack-h5'
},
'exports': {
'gridstack': 'GridStack'
},
'shim': {
'gridstack': {
'exports': 'GridStack'
}
}
}
@classproperty
def __js_skip__(cls):
return {
'GridStack': cls.__javascript__[0:1],
}
_rename = {}
@classproperty
def __javascript__(cls):
return bundled_files(cls)
@classproperty
def __css__(cls):
return bundled_files(cls, 'css')
@param.depends('state', watch=True)
def _update_objects(self):
objects = OrderedDict()
object_ids = {str(id(obj)): obj for obj in self}
for p in self.state:
objects[(p['y0'], p['x0'], p['y1'], p['x1'])] = object_ids[p['id']]
self.objects.clear()
self.objects.update(objects)
self._update_sizing()
@param.depends('objects', watch=True)
def _update_sizing(self):
if self.ncols:
width = int(float(self.width)/self.ncols)
else:
width = 0
if self.nrows:
height = int(float(self.height)/self.nrows)
else:
height = 0
for i, ((y0, x0, y1, x1), obj) in enumerate(self.objects.items()):
x0 = 0 if x0 is None else x0
x1 = (self.ncols) if x1 is None else x1
y0 = 0 if y0 is None else y0
y1 = (self.nrows) if y1 is None else y1
h, w = y1-y0, x1-x0
if self.sizing_mode in ['fixed', None]:
properties = {'width': w*width, 'height': h*height}
else:
properties = {'sizing_mode': self.sizing_mode}
if 'width' in self.sizing_mode:
properties['height'] = h*height
elif 'height' in self.sizing_mode:
properties['width'] = w*width
obj.param.set_param(**{k: v for k, v in properties.items()
if not obj.param[k].readonly})
|
#!/usr/bin/env python
#
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
"""Test file for the track algorithms library."""
import unittest
import opentimelineio as otio
import opentimelineio.test_utils as otio_test_utils
class TimelineTrimmingTests(unittest.TestCase, otio_test_utils.OTIOAssertions):
""" test harness for timeline trimming function """
def make_sample_timeline(self):
result = otio.adapters.read_from_string(
"""
{
"OTIO_SCHEMA": "Timeline.1",
"metadata": {},
"name": null,
"tracks": {
"OTIO_SCHEMA": "Stack.1",
"children": [
{
"OTIO_SCHEMA": "Track.1",
"children": [
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "A",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "B",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "C",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
}
],
"effects": [],
"kind": "Video",
"markers": [],
"metadata": {},
"name": "Sequence1",
"source_range": null
}
],
"effects": [],
"markers": [],
"metadata": {},
"name": "tracks",
"source_range": null
}
}""",
"otio_json"
)
return result, result.tracks[0]
def test_trim_to_existing_range(self):
original_timeline, original_track = self.make_sample_timeline()
self.assertEqual(
original_track.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(150, 24)
)
)
# trim to the exact range it already has
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(150, 24)
)
)
# it shouldn't have changed at all
self.assertIsOTIOEquivalentTo(original_timeline, trimmed)
def test_trim_to_longer_range(self):
original_timeline, original_track = self.make_sample_timeline()
# trim to a larger range
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(-10, 24),
duration=otio.opentime.RationalTime(160, 24)
)
)
# it shouldn't have changed at all
self.assertJsonEqual(original_timeline, trimmed)
def test_trim_front(self):
original_timeline, original_track = self.make_sample_timeline()
# trim off the front (clip A and part of B)
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(60, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
self.assertNotEqual(original_timeline, trimmed)
trimmed = trimmed.tracks[0]
self.assertEqual(len(trimmed), 2)
self.assertEqual(
trimmed.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# did clip B get trimmed?
self.assertEqual(trimmed[0].name, "B")
self.assertEqual(
trimmed[0].trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(10, 24),
duration=otio.opentime.RationalTime(40, 24)
)
)
# clip C should have been left alone
self.assertIsOTIOEquivalentTo(trimmed[1], original_track[2])
def test_trim_end(self):
original_timeline, original_track = self.make_sample_timeline()
# trim off the end (clip C and part of B)
trimmed_timeline = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# rest of the tests are on the track
trimmed = trimmed_timeline.tracks[0]
self.assertNotEqual(original_timeline, trimmed)
self.assertEqual(len(trimmed), 2)
self.assertEqual(
trimmed.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# clip A should have been left alone
self.assertIsOTIOEquivalentTo(trimmed[0], original_track[0])
# did clip B get trimmed?
self.assertEqual(trimmed[1].name, "B")
self.assertEqual(
trimmed[1].trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(40, 24)
)
)
def test_trim_with_transitions(self):
original_timeline, original_track = self.make_sample_timeline()
self.assertEqual(
otio.opentime.RationalTime(150, 24),
original_timeline.duration()
)
self.assertEqual(len(original_track), 3)
# add a transition
tr = otio.schema.Transition(
in_offset=otio.opentime.RationalTime(12, 24),
out_offset=otio.opentime.RationalTime(20, 24)
)
original_track.insert(1, tr)
self.assertEqual(len(original_track), 4)
self.assertEqual(
otio.opentime.RationalTime(150, 24),
original_timeline.duration()
)
# if you try to sever a Transition in the middle it should fail
with self.assertRaises(otio.exceptions.CannotTrimTransitionsError):
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(5, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
with self.assertRaises(otio.exceptions.CannotTrimTransitionsError):
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(45, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(25, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
self.assertNotEqual(original_timeline, trimmed)
expected = otio.adapters.read_from_string(
"""
{
"OTIO_SCHEMA": "Timeline.1",
"metadata": {},
"name": null,
"tracks": {
"OTIO_SCHEMA": "Stack.1",
"children": [
{
"OTIO_SCHEMA": "Track.1",
"children": [
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "A",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25.0
}
}
},
{
"OTIO_SCHEMA": "Transition.1",
"in_offset": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 12
},
"metadata": {},
"name": null,
"out_offset": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 20
},
"transition_type": null
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "B",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
}
],
"effects": [],
"kind": "Video",
"markers": [],
"metadata": {},
"name": "Sequence1",
"source_range": null
}
],
"effects": [],
"markers": [],
"metadata": {},
"name": "tracks",
"source_range": null
}
}
""",
"otio_json"
)
self.assertJsonEqual(expected, trimmed)
if __name__ == '__main__':
unittest.main()
|
import logging
_LOGGER = logging.getLogger(__name__)
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.util import slugify
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN, SIGNAL_STATE_UPDATED
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
data = hass.data[DOMAIN]
tracker = AIKADeviceTracker(hass, data, async_see)
await tracker.async_update()
_LOGGER.info("Setup of AIKA device tracker")
async_dispatcher_connect(hass, SIGNAL_STATE_UPDATED, tracker.async_update)
return True
class AIKADeviceTracker(TrackerEntity):
"""AIKA Connected Drive device tracker."""
def __init__(self, hass, data, async_see):
"""Initialize the Tracker."""
self.hass = hass
self.data = data
self.async_see = async_see
self.status = None
self._latitude = None
self._longitude = None
self._attributes = {
"trackr_id": None,
"heading": None,
"speed": None
}
async def async_update(self):
"""Update the device info.
Only update the state in home assistant if tracking in
the car is enabled.
"""
self.status = self.data.status
self._latitude = float(self.status["maika.lat"])
self._longitude = float(self.status["maika.lng"])
self._attributes = {
"trackr_id": self.status["maika.sn"],
"heading": self.get_heading(int(self.status["maika.course"])),
"speed": self.status["maika.speed"]
}
await self.async_see(
dev_id = "maika_{}".format(self.status["maika.iccid"]),
mac = self.status["maika.iccid"],
host_name = self.status["maika.devicename"],
gps = (self._latitude, self._longitude),
attributes = self._attributes,
icon="mdi:car"
)
def get_heading(self, course: int):
if course > 349 or course < 10: return "N"
if course > 9 and course < 80: return "NE"
if course > 79 and course < 100: return "E"
if course > 99 and course < 170: return "SE"
if course > 169 and course < 190: return "S"
if course > 189 and course < 260: return "SW"
if course > 259 and course < 280: return "W"
if course > 279 and course < 350: return "NW"
@property
def dev_id(self):
return "maika_{}".format(slugify(self.status["maika.iccid"]))
@property
def source_type(self):
return SOURCE_TYPE_GPS
@property
def location_accuracy(self):
return 4 # default for GPS
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return self._latitude
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return self._longitude
def force_update(self):
return True
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Alexey Pechnikov. All rights reserved.
# https://orcid.org/0000-0001-9626-8615 (ORCID)
# pechnikov@mobigroup.ru (email)
# License: http://opensource.org/licenses/MIT
# process [multi]geometry
def _NCubeGeometryToPolyData(geometry, dem=None):
#from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from vtk import vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray, vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
import xarray as xr
import numpy as np
if geometry is None or geometry.is_empty:
return
vtk_points = vtkPoints()
vtk_cells = vtkCellArray()
# get part(s) of (multi)geometry
#if isinstance(geometry, (BaseMultipartGeometry)):
if geometry.type.startswith('Multi') or geometry.type == 'GeometryCollection':
geometries = [geom for geom in geometry]
else:
geometries = [geometry]
for geom in geometries:
# polygon
#print ("geom.type", geom.type)
if geom.type == 'Polygon':
coords = np.asarray(geom.exterior.coords)
else:
coords = np.asarray(geom.coords)
#print ("coords", coords)
xs = coords[:,0]
ys = coords[:,1]
if coords.shape[1] > 2:
zs = np.array(coords[:,2])
else:
zs = np.zeros(len(xs))
#print (xs)
# rasterize geometries (lines only, not points)
# alas, modern scipy or matplotlib don't work in ParaView 5.7 on MacOS
if dem is not None:
# print (dem)
if dem.res and len(xs)>1:
res = min(dem.res)
_xs = [xs[:1]]
_ys = [ys[:1]]
_zs = [zs[:1]]
for (x0,y0,z0,x,y,z) in zip(xs[:-1],ys[:-1],zs[:-1],xs[1:],ys[1:],zs[1:]):
length = max(abs(x-x0),abs(y-y0))
num = round(length/res+0.5)
# print ("num",num)
if num > 1:
_x = np.linspace(x0,x,num)
_y = np.linspace(y0,y,num)
_z = np.linspace(z0,z,num)
_xs.append(_x[1:])
_ys.append(_y[1:])
_zs.append(_z[1:])
else:
_xs.append([x])
_ys.append([y])
_zs.append([z])
xs = np.concatenate(_xs)
ys = np.concatenate(_ys)
zs = np.concatenate(_zs)
zs += dem.sel(x=xr.DataArray(xs), y=xr.DataArray(ys), method='nearest').values
#print ("xs", xs)
mask = np.where(~np.isnan(zs))[0]
mask2 = np.where(np.diff(mask)!=1)[0]+1
xs = np.split(xs[mask], mask2)
ys = np.split(ys[mask], mask2)
zs = np.split(zs[mask], mask2)
for (_xs,_ys,_zs) in zip(xs,ys,zs):
# need to have 2 point or more
#if len(_xs) <= 1:
# continue
vtk_cells.InsertNextCell(len(_xs))
for (x,y,z) in zip(_xs,_ys,_zs):
pointId = vtk_points.InsertNextPoint(x, y, z)
vtk_cells.InsertCellPoint(pointId)
# not enougth valid points
if vtk_points.GetNumberOfPoints() < 1:
return
#print ("GetNumberOfPoints", vtk_points.GetNumberOfPoints())
vtk_polyData = vtkPolyData()
vtk_polyData.SetPoints(vtk_points)
#if geometry.type in ['Point','MultiPoint']:
if geometry.type.endswith('Point'):
vtk_polyData.SetVerts(vtk_cells)
else:
vtk_polyData.SetLines(vtk_cells)
return vtk_polyData
# process geodataframe and xarray raster
def _NCubeGeometryOnTopography(df, dem):
from vtk import vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray, vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from shapely.geometry import box
#import xarray as xr
import numpy as np
#print ("_NCUBEGeometryOnTopography start")
dem_extent = dem_crs = None
if dem is not None:
# TODO: that's better to direct use NODATA values
if dem.values.dtype not in [np.dtype('float16'),np.dtype('float32'),np.dtype('float64'),np.dtype('float128')]:
dem.values = dem.values.astype("float32")
# dask array can't be processed by this way
dem.values[dem.values == dem.nodatavals[0]] = np.nan
# NaN border to easy lookup
dem.values[0,:] = np.nan
dem.values[-1,:] = np.nan
dem.values[:,0] = np.nan
dem.values[:,-1] = np.nan
dem_extent = box(dem.x.min(),dem.y.min(),dem.x.max(),dem.y.max())
dem_crs = dem.crs if 'crs' in dem.attrs.keys() else None
#print (dem.values)
df = _NCubeGeoDataFrameToTopography(df, dem_extent, dem_crs)
groups = df.index.unique() ;#[11454:11455]
#print ("groups",groups)
# TEST
#groups = groups[:1]
# iterate blocks
vtk_blocks = []
for group in groups:
#print ("group",group)
# Python 2 string issue wrapped
if hasattr(group, 'encode'):
# select only equals
_df = df[df.index.str.startswith(group)&df.index.str.endswith(group)&(df.index.str.len()==len(group))].reset_index()
else:
_df = df[df.index == group].reset_index()
#print (_df.geometry)
vtk_appendPolyData = vtkAppendPolyData()
# iterate rows with the same attributes and maybe multiple geometries
for rowidx,row in _df.iterrows():
#print ("row", row)
vtk_polyData = _NCubeGeometryToPolyData(row.geometry, dem)
if vtk_polyData is None:
#print ("vtk_polyData is None")
continue
vtk_arrays = _NCubeGeoDataFrameRowToVTKArrays(row.to_dict())
for (vtk_arr, val) in vtk_arrays:
if val is None:
continue
# for _ in range(vtk_polyData.GetNumberOfCells()):
# vtk_arr.InsertNextValue(val)
if isinstance(val, (tuple)):
# if np.any(np.isnan(val)):
# continue
# add vector
for _ in range(vtk_polyData.GetNumberOfCells()):
vtk_arr.InsertNextTuple(val)
vtk_polyData.GetCellData().AddArray(vtk_arr)
else:
# add scalar
for _ in range(vtk_polyData.GetNumberOfCells()):
vtk_arr.InsertNextValue(val)
vtk_polyData.GetCellData().AddArray(vtk_arr)
# compose vtkPolyData
vtk_appendPolyData.AddInputData(vtk_polyData)
# nothing to process
if vtk_appendPolyData.GetNumberOfInputConnections(0) == 0:
continue
vtk_appendPolyData.Update()
vtk_block = vtk_appendPolyData.GetOutput()
vtk_blocks.append((str(group),vtk_block))
#print ("_NCUBEGeometryOnTopography end")
return vtk_blocks
def _NCubeGeoDataFrameToTopography(df, dem_extent, dem_crs=None):
import geopandas as gpd
# extract the geometry coordinate system
if df.crs is not None and df.crs != {}:
df_crs = df.crs
else:
df_crs = None
print ("df_crs",df_crs,"dem_crs",dem_crs)
# reproject when the both coordinate systems are defined and these are different
if df_crs and dem_crs:
# load error fix for paraView 5.8.1rc1 Python3
try:
# ParaView 5.7 Python 2.7
df_extent = gpd.GeoDataFrame([], crs={'init' : dem_crs}, geometry=[dem_extent])
except:
# ParaView 5.8 RC2 Python 3.7
df_extent = gpd.GeoDataFrame([], crs=dem_crs, geometry=[dem_extent])
print ("df_extent", df_extent.crs, df_extent.geometry)
extent_reproj = df_extent.to_crs(df_crs)['geometry'][0]
# if original or reprojected raster extent is valid, use it to crop geometry
print ("crop geometry", extent_reproj.is_valid,extent_reproj.wkt)
if extent_reproj.is_valid:
# geometry intersection to raster extent in geometry coordinate system
df = df[df.geometry.intersects(extent_reproj)].copy()
# dangerous operation, see https://github.com/Toblerity/Shapely/issues/553
df['geometry'] = df.geometry.intersection(extent_reproj)
try:
# ParaView 5.7 Python 2.7
# reproject [cropped] geometry to original raster coordinates if needed
return df.to_crs({'init' : dem_crs})
except:
# ParaView 5.8 RC2 Python 3.7
return df.to_crs(dem_crs)
# let's assume the coordinate systems are the same
if dem_extent is not None:
df = df[df.geometry.intersects(dem_extent)]
# wrap issue with 3D geometry intersection by 2D extent
# if df.geometry[0].has_z:
# print ("df.geometry[0].has_z")
# else:
# df['geometry'] = df.geometry.intersection(dem_extent)
return df
# Load shapefile or geojson
def _NCubeGeoDataFrameLoad(shapename, shapecol=None, shapeencoding=None):
import geopandas as gpd
df = gpd.read_file(shapename, encoding=shapeencoding)
# very important check
df = df[df.geometry.notnull()]
if shapecol is not None:
df = df.sort_values(shapecol).set_index(shapecol)
else:
# to merge all geometries in output
df.index = len(df)*['None']
return df
def _NcubeDataFrameToVTKArrays(df):
from vtk import vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
arrays = []
# Create columns
for colname in df.columns:
dtype = df[colname].dtype
#print (colname, dtype)
if dtype in ['O','str','datetime64']:
vtk_arr = vtkStringArray()
elif dtype in ['int64']:
vtk_arr = vtkIntArray()
elif dtype in ['float64']:
vtk_arr = vtkFloatArray()
elif dtype in ['bool']:
vtk_arr = vtkBitArray()
else:
print ('Unknown Pandas column type', dtype)
vtk_arr = vtkStringArray()
vtk_arr.SetNumberOfComponents(1)
vtk_arr.SetName(colname)
for val in df[colname]:
# some different datatypes could be saved as strings
if isinstance(vtk_arr, vtkStringArray):
val = str(val)
vtk_arr.InsertNextValue(val)
arrays.append(vtk_arr)
return arrays
# list of list of VtkArray's
# we ignore case of scientific notation for numbers
# https://re-thought.com/how-to-suppress-scientific-notation-in-pandas/
def _NCubeGeoDataFrameRowToVTKArrays(items):
#vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray,
from vtk import vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
vtk_row = []
for (key,value) in items.items():
#print (key,value)
components = 1
# define attribute as array
if isinstance(value, (BaseMultipartGeometry)):
#print ('BaseMultipartGeometry')
continue
elif isinstance(value, (BaseGeometry)):
#print ('BaseGeometry')
continue
elif isinstance(value, (tuple)):
#print ('vtkFloatArray')
vtk_arr = vtkFloatArray()
components = len(value)
# elif isinstance(value, (int)) or (type(value)==str and value.replace('-','',1).isdigit()):
elif isinstance(value, (int)) \
or (type(value)==str and value[0] in ['-','+'] and value[1:].isdigit()) \
or (type(value)==str and value.isdigit()):
# ParaView category editor converts strings to numeric when it's possible
#print('vtkIntArray')
value = int(value)
vtk_arr = vtkIntArray()
# elif isinstance(value, (float)) or (type(value)==str and value.replace('-','',1).replace('.','',1).isdigit()):
elif isinstance(value, (float)) \
or (type(value)==str and value[0] in ['-','+'] and value[1:].replace('.','',1).isdigit()) \
or (type(value)==str and value.replace('.','',1).isdigit()):
# ParaView category editor converts strings to numeric when it's possible
#print ('vtkFloatArray')
value = float(value)
vtk_arr = vtkFloatArray()
elif isinstance(value, (bool)):
#print ('vtkBitArray')
vtk_arr = vtkBitArray()
else:
# some different datatypes could be saved as strings
value = str(value)
vtk_arr = vtkStringArray()
vtk_arr.SetNumberOfComponents(components)
vtk_arr.SetName(key)
vtk_row.append((vtk_arr, value))
return vtk_row
|
#!/usr/bin/env python3
from strips import *
from golog_program import *
from domains.elevator import *
up = lambda: Exec(ElevatorState.up())
down = lambda: Exec(ElevatorState.down())
turn_off = lambda: Exec(ElevatorState.turn_off())
def next_floor_to_serve(fl):
return Test(lambda s: s.light[fl])
def go_floor(fl):
return While(lambda s: s.at != fl,
If(lambda s: s.at < fl,
up(),
down()
)
)
def serve_a_floor():
return Pick(Floor, lambda x:
Sequence(
next_floor_to_serve(x.num),
go_floor(x.num),
turn_off()
)
)
def control():
return Sequence(
While(lambda s: any(s.light.values()),
serve_a_floor()
),
go_floor(1)
)
p = control()
print('initial state: %s' % s)
print('program: %s' % p)
numSolutions = 0
for pn, sn, an in trans_star(p, s, []):
print('solution: %s' % an)
print('resulting state: %s' % sn)
numSolutions += 1
print('%d solutions found.' % numSolutions)
|
from pydantic import BaseModel, root_validator
from app.resources.utils import str_to_isoformat
class MeasurementRequest(BaseModel):
# API expects a json object like:
start_time: str # YYYY-MM-DDThh:mm:ss+00:00
stop_time: str # YYYY-MM-DDThh:mm:ss+00:00
@root_validator(pre=True)
def check_required_items(cls, values):
start_time = values.get("start_time")
stop_time = values.get("stop_time")
if not start_time:
raise ValueError("Field 'start_time' required")
if not stop_time:
raise ValueError("Field 'end_time' required")
if not isinstance(start_time, str):
raise ValueError("Field 'start_time' must be a str")
str_to_isoformat(start_time)
if not isinstance(stop_time, str):
raise ValueError("Field 'stop_time' must be a str")
str_to_isoformat(stop_time)
return values
class TextRequest(BaseModel):
# API expects a json object like:
channel_id: str # welo channel to search in
start_time: str # YYYY-MM-DDThh:mm:ss+00:00.
stop_time: str # YYYY-MM-DDThh:mm:ss+00:00.
text: str # to search
@root_validator(pre=True)
def check_required_items(cls, values):
channel_id = values.get("channel")
start_time = values.get("start_time")
stop_time = values.get("stop_time")
text = values.get("text")
if not channel_id:
raise ValueError("Field 'channel_id' required")
if not isinstance(channel_id, str):
raise ValueError("Field 'channel_id' must be a str")
if not start_time:
raise ValueError("Field 'start_time' required")
if not isinstance(start_time, str):
raise ValueError("Field 'start_time' must be a str")
str_to_isoformat(start_time)
if not stop_time:
raise ValueError("Field 'end_time' required")
if not isinstance(stop_time, str):
raise ValueError("Field 'stop_time' must be a str")
str_to_isoformat(stop_time)
if not text:
raise ValueError("Field 'text' required")
if not isinstance(text, str):
raise ValueError("Field 'text' must be a str")
return values
|
"""
Django settings for sana_pchr project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
from sana_pchr.settings_base import *
from sana_pchr.settings_sms import *
from sana_pchr.settings_update import *
ADMINS = [('', '')]
EMAIL_HOST = ""
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = True
EMAIL_PORT = 587
DEBUG = False
SERVER_EMAIL = ''
DATABASES = {'default': dj_database_url.config()}
|
import json
import pickle
import zlib
# from sqlalchemy.orm.interfaces import SessionExtension
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.types import (
TypeDecorator,
VARCHAR,
PickleType
)
# py3 compatibility
try:
unicode
except NameError:
unicode = str
from six import iteritems
class ASCII(TypeDecorator):
'''
A database string type that only allows ASCII characters.
This is a data type check since all strings in the database could
be unicode.
'''
impl = VARCHAR
def process_bind_param(self, value, dialect):
'''
Run encode on a unicode string to make sure the string only
contains ASCII characterset characters. To avoid another conversion
pass, the original unicode value is passed to the underlying db.
'''
# run an encode on the value with ascii to see if it contains
# non ascii. Ignore the return value because we only want to throw
# an exception if the encode fails. The data can stay unicode for
# insertion into the db.
if value is not None:
value.encode('ascii')
return value
def process_result_value(self, value, dialect):
'''
Run encode on a unicode string coming out of the database to turn the
unicode string back into an encoded python bytestring.
'''
if isinstance(value, unicode):
value = value.encode('ascii')
return value
class JSONEncodedDict(TypeDecorator):
'''
Represents a dictionary as a json-encoded string in the database.
'''
impl = VARCHAR
def process_bind_param(self, value, dialect):
'''
Turn a dictionary into a JSON encoded string on the way into
the database.
'''
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
'''
Turn a JSON encoded string into a dictionary on the way out
of the database.
'''
if value is not None:
value = json.loads(value)
return value
class ZipPickler(object):
'''Simple wrapper for pickle that auto compresses/decompresses values'''
def loads(self, string):
return pickle.loads(zlib.decompress(string))
def dumps(self, obj, protocol):
return zlib.compress(pickle.dumps(obj, protocol))
class ZipPickleType(PickleType):
def __init__(self, *pargs, **kargs):
super(ZipPickleType, self).__init__(pickler=ZipPickler(), *pargs, **kargs)
class MutationDict(Mutable, dict):
'''
A dictionary that automatically emits change events for SQA
change tracking.
Lifted almost verbatim from the SQA docs.
'''
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutationDict."
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def update(self, *args, **kwargs):
'''
Updates the current dictionary with kargs or a passed in dict.
Calls the internal setitem for the update method to maintain
mutation tracking.
'''
for k, v in iteritems(dict(*args, **kwargs)):
self[k] = v
def __setitem__(self, key, value):
'''Detect dictionary set events and emit change events.'''
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
'''Detect dictionary del events and emit change events.'''
dict.__delitem__(self, key)
self.changed()
def __getstate__(self):
'''Get state returns a plain dictionary for pickling purposes.'''
return dict(self)
def __setstate__(self, state):
'''
Set state assumes a plain dictionary and then re-constitutes a
Mutable dict.
'''
self.update(state)
def pop(self, *pargs, **kargs):
"""
Wrap standard pop() to trigger self.changed()
"""
result = super(MutationDict, self).pop(*pargs, **kargs)
self.changed()
return result
def popitem(self, *pargs, **kargs):
"""
Wrap standard popitem() to trigger self.changed()
"""
result = super(MutationDict, self).popitem(*pargs, **kargs)
self.changed()
return result
__all__ = ['ASCII', 'JSONEncodedDict', 'ZipPickler', 'MutationDict', 'ZipPickleType']
|
from django import template
register = template.Library()
@register.inclusion_tag('project_components/tables/headers.html')
def header(*headers, expand_last_by=0):
context = {}
context.update({'headers': [header for header in headers]})
return context
|
import numpy as np
def identity():
return np.eye(3, dtype=np.float32)
def scale(s):
matrix = identity()
np.fill_diagonal(matrix[:2, :2], s)
return matrix
def translate(t):
matrix = identity()
matrix[:2, 2] = t
return matrix
def rotate(radian): # clockwise
cos = np.cos(radian)
sin = np.sin(radian)
return np.array(
[
[cos, -sin, 0],
[sin, cos, 0],
[0, 0, 1]
], dtype=np.float32)
def center_rotate_scale_cw(center, angle, s):
center = np.array(center)
return translate(center) @ rotate(angle * np.pi / 180) @ scale(s) @ translate(-center)
def hflip(width):
return np.array([
[-1, 0, width],
[0, 1, 0],
[0, 0, 1]
], dtype=np.float32)
def vflip(height):
return np.array([
[1, 0, 0],
[0, -1, height],
[0, 0, 1]
], dtype=np.float32)
def shear(shr_x, shr_y):
m = identity()
m[0, 1] = shr_x
m[1, 0] = shr_y
return m
|
# Application condition
beep.id == max_used_id and not cur_node_is_processed
# Reaction
beep_code = "ecrobot_sound_tone(1000, 100, " + str(beep.Volume)+ ");\n"
code.append([beep_code])
id_to_pos_in_code[beep.id] = len(code) - 1
cur_node_is_processed = True
|
# Copyright 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mrack.outputs.utils"""
from unittest.mock import patch
from mrack.outputs.utils import get_external_id
@patch("mrack.outputs.utils.resolve_hostname")
def test_get_external_id(mock_resolve, provisioning_config, host1_aws, metahost1):
"""
Test that resolve_hostname is not called when it is not supposed to be.
"""
dns = "my.dns.name"
mock_resolve.return_value = dns
# By default, it resolves DNS
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == dns
# Disable in host metadata
metahost1["resolve_host"] = False
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
# Disable in provider
del metahost1["resolve_host"]
provisioning_config["aws"]["resolve_host"] = False
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
# Explicitly enabled in provider
provisioning_config["aws"]["resolve_host"] = True
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == dns
# Resolution enabled, but nothing is resolved
mock_resolve.return_value = None
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
|
# Serializers define the API representation.
from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
"id",
"email",
"name",
]
|
import pytest
from sympy.abc import a, b, c, d, e
from devito.tools import toposort
from devito import configuration
pytestmark = pytest.mark.skipif(configuration['backend'] == 'yask' or
configuration['backend'] == 'ops',
reason="testing is currently restricted")
@pytest.mark.parametrize('elements, expected', [
([[a, b, c], [c, d, e]], [a, b, c, d, e]),
([[e, d, c], [c, b, a]], [e, d, c, b, a]),
([[a, b, c], [b, d, e]], [a, b, d, c, e]),
([[a, b, c], [d, b, c]], [a, d, b, c]),
([[a, b, c], [c, d, b]], None),
])
def test_toposort(elements, expected):
try:
ordering = toposort(elements)
assert ordering == expected
except ValueError:
assert expected is None
|
from .WeiXinCrawler import WeiXinCrawler
|
import os, sys
from logs import logDecorator as lD
import json
from scipy.interpolate import interp1d
from scipy.integrate import odeint
import numpy as np
import tensorflow as tf
import time
from tensorflow.python.client import timeline
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.lib.multipleODE.multipleODE_tf'
class multipleODE:
'''[summary]
[description]
'''
@lD.log(logBase + '.__init__')
def __init__(logger, self, Npat, Nnt, Nl, tspan, Atimesj, Btimesj, fj, rj, mj,
stress_t, stress_v, layers, activations, gpu_device='0'):
'''[summary]
[description]
Parameters
----------
logger : {[type]}
[description]
self : {[type]}
[description]
'''
try:
self.Npat = Npat # --> 1 number
self.Nnt = Nnt # --> 1 number
self.Nl = Nl # --> 1 number
self.NperUser = Nnt + Nl # --> 1 number
self.tspan = tspan # --> 1D array
self.fj = fj # --> Npat arrays
self.rj = rj # --> Npat arrays
self.mj = mj # --> Npat arrays
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_device
self.device = ['/device:GPU:{}'.format(g) for g in gpu_device.split(',')]
self.stressInterp = [interp1d(t_vec, s_vec) for t_vec, s_vec in zip(stress_t, stress_v)]
self.AjInterp = [interp1d(tspan, a_vec) for a_vec in Atimesj]
self.BjInterp = [interp1d(tspan, b_vec) for b_vec in Btimesj]
activation_map = { 'tanh' : tf.nn.tanh,
'sigmoid' : tf.nn.sigmoid,
'relu' : tf.nn.relu,
'linear' : tf.identity }
activations = [ activation_map[a] for a in activations ]
start = time.time()
for d in self.device:
with tf.device(d):
self.tf_opsFlow(layers=layers, activations=activations)
timespent = time.time() - start
print('graphTime', timespent)
# self.options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# self.run_metadata = tf.RunMetadata()
except Exception as e:
logger.error('Unable to initialize multipleODE \n{}'.format(str(e)))
@lD.log(logBase + '.tf_opsFlow')
def tf_opsFlow(logger, self, layers, activations):
try:
with tf.variable_scope('weights'):
self.fj_tf = [ tf.Variable(fj_vec, dtype=tf.float32, name='fj_{}'.format(index))
for index, fj_vec in enumerate(self.fj)]
self.rj_tf = [ tf.Variable(rj_vec, dtype=tf.float32, name='rj_{}'.format(index))
for index, rj_vec in enumerate(self.rj)]
self.mj_tf = [ tf.Variable(mj_vec, dtype=tf.float32, name='mj_{}'.format(index))
for index, mj_vec in enumerate(self.mj)]
self.NNwts_tf = []
self.NNb_tf = []
self.NNact_tf = []
self.Taus_tf = []
prev_l = self.Nnt + 1
for i, l in enumerate(layers):
wts = tf.Variable(np.random.random(size=(l, prev_l)), dtype=tf.float32, name='wts_{}'.format(i))
prev_l = l
bias = tf.Variable(np.random.rand(), dtype=tf.float32, name='bias_{}'.format(i))
act = activations[i]
tau = tf.Variable(np.random.rand(), dtype=tf.float32, name='tau_{}'.format(i))
self.NNwts_tf.append(wts)
self.NNb_tf.append(bias)
self.NNact_tf.append(act)
self.Taus_tf.append(tau)
with tf.variable_scope('rhs_operation'):
self.y_tf = tf.placeholder(dtype=tf.float32, name='y_tf')
self.t = tf.placeholder(dtype=tf.float32, name='dt')
self.stress_val = tf.placeholder(dtype=tf.float32, name='stress_val')
self.Aj = tf.placeholder(dtype=tf.float32, name='Aj')
self.Bj = tf.placeholder(dtype=tf.float32, name='Bj')
def get_slowVaryingComponents(j, Nnt_list, stressVal, slowComponents):
with tf.variable_scope('cpnt{}'.format(j)):
# concatenate to [ n1, n2, n3, s ]
res = tf.concat([Nnt_list, [stressVal]], axis=0, name='concat{}'.format(j))
res = tf.reshape(res, [-1, 1])
for index, (w, b, a) in enumerate(zip(self.NNwts_tf, self.NNb_tf, self.NNact_tf)):
res = tf.matmul(w, res) + b
res = a(res)
res = res[0][0] - slowComponents[j] / self.Taus_tf[j]
return res
def rhs_operation(user):
with tf.variable_scope('user_{:05}'.format(user)):
# Extract [n1, n2, n3]
Nnt_list = self.y_tf[ (user*self.NperUser) : (user*self.NperUser + self.Nnt)]
stressVal = self.stress_val[user]
slowComponents = self.y_tf[ (user*self.NperUser) : (user*self.NperUser + 3)]
with tf.variable_scope('Nnt_parts'):
# Calculate the neurotransmitters
Nnt_result = self.fj_tf[user] - self.rj_tf[user] * Nnt_list / ( 1 + self.Aj[user] ) \
- self.mj_tf[user] * Nnt_list / ( 1 + self.Bj[user] )
with tf.variable_scope('slow_Components'):
# Calculate long-term dependencies
# This is the NN([ n1, n2, n3, s ])
res_ls = [get_slowVaryingComponents(j, Nnt_list, stressVal, slowComponents) for j in range(self.Nl)]
results = tf.concat([Nnt_result, res_ls], axis=0)
return results
self.rhs_results = [rhs_operation(user) for user in range(self.Npat)]
self.rhs_results = tf.concat(self.rhs_results, axis=0)
self.init = tf.global_variables_initializer()
config = tf.ConfigProto(gpu_options={'allow_growth':True})
self.sess = tf.Session(config=config)
tfwriter = tf.summary.FileWriter('./tensorlog/', self.sess.graph)
tfwriter.close()
self.sess.run( self.init )
except Exception as e:
logger.error('Unable to create tensorflow ops flow \n{}'.format(str(e)))
@lD.log(logBase + '.interpolate')
def interpolate(logger, self, dx_T, dy_T, x, name='interpolate' ):
try:
with tf.variable_scope(name):
with tf.variable_scope('neighbors'):
delVals = dx_T - x
ind_1 = tf.argmax(tf.sign( delVals ))
ind_0 = ind_1 - 1
with tf.variable_scope('calculation'):
value = tf.cond( x[0] <= dx_T[0],
lambda : dy_T[:1],
lambda : tf.cond(
x[0] >= dx_T[-1],
lambda : dy_T[-1:],
lambda : (dy_T[ind_0] + \
(dy_T[ind_1] - dy_T[ind_0]) \
*(x-dx_T[ind_0])/ \
(dx_T[ind_1]-dx_T[ind_0]))
))
result = tf.multiply(value[0], 1, name='y')
return result
except Exception as e:
logger.error('Unable to interpolate \n{}'.format(str(e)))
# @lD.log(logBase + '.dy')
def dy(self, y, t):
'''[summary]
[description]
Arguments:
y {[type]} -- [description]
t {[type]} -- [description]
'''
try:
rhs_results = self.sess.run( self.rhs_results,
# options=self.options, run_metadata=self.run_metadata,
feed_dict={
self.y_tf : y,
self.t : [t],
self.stress_val : [interp(t) for interp in self.stressInterp],
self.Aj : [interp(t) for interp in self.AjInterp],
self.Bj : [interp(t) for interp in self.BjInterp]
})
return rhs_results
except Exception as e:
# logger.error('Unable to get dy result \n{}'.format(str(e)))
print('Unable to get dy result \n{}'.format(str(e)))
@lD.log(logBase + '.solveY')
def solveY(logger, self, y0, t, args, useJac=False, full_output=False):
try:
NNwts, NNb, NNact, Taus = args
for i, (weights, bias, tau) in enumerate(zip(NNwts, NNb, Taus)):
self.sess.run( self.NNwts_tf[i].assign(weights) )
self.sess.run( self.NNb_tf[i].assign(bias) )
self.sess.run( self.Taus_tf[i].assign(tau) )
jac = None
if useJac:
jac = self.jac
start = time.time()
result_dict = {}
if full_output:
y_t, result_dict = odeint(self.dy, y0, t, Dfun=jac, full_output=True, mxstep=50000)
else:
y_t = odeint(self.dy, y0, t, Dfun=jac, full_output=False)
timespent = time.time() - start
print('odeTime', timespent)
# fetched_timeline = timeline.Timeline(self.run_metadata.step_stats)
# chrome_trace = fetched_timeline.generate_chrome_trace_format()
# with open('timeline_step.json', 'w') as f:
# f.write(chrome_trace)
# if useJac:
# print('')
return y_t, result_dict
except Exception as e:
logger.error('Unable to solve Y \n{}'.format(str(e)))
class multipleODE_new:
'''[summary]
[description]
'''
@lD.log(logBase + '.__init__')
def __init__(logger, self, Npat, Nnt, Nl, tspan, Atimesj, Btimesj, fj, rj, mj,
stress_t, stress_v, layers, activations, gpu_device='0'):
'''[summary]
[description]
Parameters
----------
logger : {[type]}
[description]
self : {[type]}
[description]
'''
try:
self.Npat = Npat # --> 1 number
self.Nnt = Nnt # --> 1 number
self.Nl = Nl # --> 1 number
self.NperUser = Nnt + Nl # --> 1 number
self.tspan = tspan # --> 1D array
self.fj = fj # --> Npat arrays
self.rj = rj # --> Npat arrays
self.mj = mj # --> Npat arrays
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_device
self.device = ['/device:GPU:{}'.format(g) for g in gpu_device.split(',')]
self.stressInterp = [interp1d(t_vec, s_vec) for t_vec, s_vec in zip(stress_t, stress_v)]
self.AjInterp = [interp1d(tspan, a_vec) for a_vec in Atimesj]
self.BjInterp = [interp1d(tspan, b_vec) for b_vec in Btimesj]
activation_map = { 'tanh' : tf.nn.tanh,
'sigmoid' : tf.nn.sigmoid,
'relu' : tf.nn.relu,
'linear' : tf.identity }
activations = [ activation_map[a] for a in activations ]
start = time.time()
for d in self.device:
with tf.device(d):
self.tf_opsFlow(layers=layers, activations=activations)
timespent = time.time() - start
print('graphTime', timespent)
# self.options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# self.run_metadata = tf.RunMetadata()
except Exception as e:
logger.error('Unable to initialize multipleODE \n{}'.format(str(e)))
@lD.log(logBase + '.tf_opsFlow')
def tf_opsFlow(logger, self, layers, activations):
try:
with tf.variable_scope('weights'):
self.fj_tf = tf.Variable(self.fj, dtype=tf.float32, name='fj')
self.rj_tf = tf.Variable(self.rj, dtype=tf.float32, name='rj')
self.mj_tf = tf.Variable(self.mj, dtype=tf.float32, name='mj')
self.NNwts_tf = []
self.NNb_tf = []
self.NNact_tf = []
self.Taus_tf = []
prev_l = self.Nnt + 1
for i, l in enumerate(layers):
wts = tf.Variable(np.random.random(size=(l, prev_l)), dtype=tf.float32, name='wts_{}'.format(i))
prev_l = l
bias = tf.Variable(np.random.rand(), dtype=tf.float32, name='bias_{}'.format(i))
act = activations[i]
tau = tf.Variable(np.random.rand(), dtype=tf.float32, name='tau_{}'.format(i))
self.NNwts_tf.append(wts)
self.NNb_tf.append(bias)
self.NNact_tf.append(act)
self.Taus_tf.append(tau)
with tf.variable_scope('rhs_operation'):
self.y_tf = tf.placeholder(dtype=tf.float32, name='y_tf')
self.stress_val = tf.placeholder(dtype=tf.float32, name='stress_val')
self.Aj = tf.placeholder(dtype=tf.float32, name='Aj')
self.Bj = tf.placeholder(dtype=tf.float32, name='Bj')
def get_slowVaryingComponents(j, Nnt_list, stressVal, slowComponents):
with tf.variable_scope('cpnt{}'.format(j)):
# concatenate to [ n1, n2, n3, s ]
res = tf.concat([Nnt_list, [stressVal]], axis=0, name='concat{}'.format(j))
res = tf.reshape(res, [-1, 1])
for index, (w, b, a) in enumerate(zip(self.NNwts_tf, self.NNb_tf, self.NNact_tf)):
res = tf.matmul(w, res) + b
res = a(res)
res = res[0][0] - slowComponents[j] / self.Taus_tf[j]
return res
def rhs_operation(user):
with tf.variable_scope('user_{:05}'.format(user)):
# Extract [n1, n2, n3]
Nnt_list = self.y_tf[ (user*self.NperUser) : (user*self.NperUser + self.Nnt)]
stressVal = self.stress_val[user]
slowComponents = self.y_tf[ (user*self.NperUser) : (user*self.NperUser + 3)]
with tf.variable_scope('Nnt_parts'):
# Calculate the neurotransmitters
Nnt_result = self.fj_tf[user] - self.rj_tf[user] * Nnt_list / ( 1 + self.Aj[user] ) \
- self.mj_tf[user] * Nnt_list / ( 1 + self.Bj[user] )
with tf.variable_scope('slow_Components'):
# Calculate long-term dependencies
# This is the NN([ n1, n2, n3, s ])
res_ls = [get_slowVaryingComponents(j, Nnt_list, stressVal, slowComponents) for j in range(self.Nl)]
results = tf.concat([Nnt_result, res_ls], axis=0)
return results
# self.rhs_results = [rhs_operation(user) for user in range(self.Npat)]
# self.rhs_results = tf.concat(self.rhs_results, axis=0)
self.rhs_results = self.fj_tf - self.rj_tf * self.y_tf / ( 1 + self.Aj ) \
- self.mj_tf * self.y_tf / ( 1 + self.Bj )
self.rhs_results = tf.reshape(self.rhs_results, shape=[-1], name='flattenOps')
self.init = tf.global_variables_initializer()
config = tf.ConfigProto(gpu_options={'allow_growth':True})
self.sess = tf.Session(config=config)
tfwriter = tf.summary.FileWriter('./tensorlog/', self.sess.graph)
tfwriter.close()
self.sess.run( self.init )
except Exception as e:
logger.error('Unable to create tensorflow ops flow \n{}'.format(str(e)))
@lD.log(logBase + '.interpolate')
def interpolate(logger, self, dx_T, dy_T, x, name='interpolate' ):
try:
with tf.variable_scope(name):
with tf.variable_scope('neighbors'):
delVals = dx_T - x
ind_1 = tf.argmax(tf.sign( delVals ))
ind_0 = ind_1 - 1
with tf.variable_scope('calculation'):
value = tf.cond( x[0] <= dx_T[0],
lambda : dy_T[:1],
lambda : tf.cond(
x[0] >= dx_T[-1],
lambda : dy_T[-1:],
lambda : (dy_T[ind_0] + \
(dy_T[ind_1] - dy_T[ind_0]) \
*(x-dx_T[ind_0])/ \
(dx_T[ind_1]-dx_T[ind_0]))
))
result = tf.multiply(value[0], 1, name='y')
return result
except Exception as e:
logger.error('Unable to interpolate \n{}'.format(str(e)))
# @lD.log(logBase + '.dy')
def dy(self, y, t):
'''[summary]
[description]
Arguments:
y {[type]} -- [description]
t {[type]} -- [description]
'''
try:
rhs_results = self.sess.run( self.rhs_results,
# options=self.options, run_metadata=self.run_metadata,
feed_dict={
self.y_tf : np.array(y).reshape(self.Npat, -1),
self.stress_val : np.array([interp(t) for interp in self.stressInterp]).reshape(self.Npat, -1),
self.Aj : np.array([interp(t) for interp in self.AjInterp]).reshape(self.Npat, -1),
self.Bj : np.array([interp(t) for interp in self.BjInterp]).reshape(self.Npat, -1)
})
return rhs_results
except Exception as e:
# logger.error('Unable to get dy result \n{}'.format(str(e)))
print('Unable to get dy result \n{}'.format(str(e)))
@lD.log(logBase + '.solveY')
def solveY(logger, self, y0, t, args, useJac=False, full_output=False):
try:
NNwts, NNb, NNact, Taus = args
for i, (weights, bias, tau) in enumerate(zip(NNwts, NNb, Taus)):
self.sess.run( self.NNwts_tf[i].assign(weights) )
self.sess.run( self.NNb_tf[i].assign(bias) )
self.sess.run( self.Taus_tf[i].assign(tau) )
jac = None
if useJac:
jac = self.jac
start = time.time()
result_dict = {}
if full_output:
y_t, result_dict = odeint(self.dy, y0, t, Dfun=jac, full_output=True, mxstep=50000)
else:
y_t = odeint(self.dy, y0, t, Dfun=jac, full_output=False)
timespent = time.time() - start
print('odeTime', timespent)
# fetched_timeline = timeline.Timeline(self.run_metadata.step_stats)
# chrome_trace = fetched_timeline.generate_chrome_trace_format()
# with open('timeline_step.json', 'w') as f:
# f.write(chrome_trace)
# if useJac:
# print('')
return y_t, result_dict
except Exception as e:
logger.error('Unable to solve Y \n{}'.format(str(e)))
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
### 创建带有偏差的 y = 3x^2 -2 的数据
x = np.random.rand(100, 1) # 制作100个0到1的随机数
x = x * 4 - 2 # 值的范围变更为-2~2
y = 3 * x**2 - 2 # y = 3x^2 - 2
y += np.random.randn(100, 1) # 加上标准正态分布(均值0、标准偏差1)的随机数
### 学习
from sklearn import linear_model
model = linear_model.LinearRegression()
model.fit(x**2, y) # 把x平方之后给出
### 显示系统、截距和决定系数
print('系数', model.coef_)
print('截距', model.intercept_)
print('决定系数', model.score(x**2, y))
### 显示图表
plt.scatter(x, y, marker ='+')
plt.scatter(x, model.predict(x**2), marker='o') # 将x平方交给predict
plt.show()
|
from collections import defaultdict
class trienode():
def __init__(self, isleaf=False):
self.isLeaf = isleaf
self.mapping = defaultdict(trienode)
def get_mapping(self):
return self.mapping
def is_leaf(self):
return self.isLeaf
class trie:
def __init__(self, head=None):
self.root = None
self.trie_size = 0
def insert(self, value):
curr = self.root
if self.root == None:
self.root = trienode()
curr = self.root
for token in value:
if token in curr.get_mapping():
curr = curr.get_mapping()[token]
else:
curr.get_mapping()[token] = trienode()
curr = curr.get_mapping()[token]
curr.isLeaf = True
self.trie_size = self.trie_size + 1
def search(self, value):
if self.root == None:
return False
curr = self.root
for token in value:
if token not in curr.get_mapping():
return False
else:
curr = curr.get_mapping()[token]
return curr.isLeaf
def __dfs(self, start_char, next_link, helper, sugg):
helper = helper + start_char
if next_link.is_leaf():
sugg.append(helper)
for k, v in next_link.get_mapping().items():
self.__dfs(k, v, helper, sugg)
def suggestions(self, prefix):
sugg = []
if self.root == None:
return sugg
curr = self.root
for token in prefix:
if token not in curr.get_mapping():
return sugg
else:
curr = curr.get_mapping()[token]
helper = ''
for key, val in curr.get_mapping().items():
self.__dfs(key, val, helper, sugg)
for i in range(len(sugg)):
sugg[i] = prefix + sugg[i]
return sugg
def size(self):
return self.trie_size
|
import sys
PATH = [
"src/comms/mqtt"
]
for lib in PATH:
sys.path.append(lib)
import mqtt_net as mqtt
# defining for test script
if __name__ == '__main__':
# Note: make sure you are reading values from a subscriber on the same board to see the result
# Start a client
mqtt_tx = mqtt.MQTTLink("ece180d/MEAT/general", user_id = 'Jack')
message = {
'sender' : 'Jack',
'color' : (255, 0, 0),
'data' : 'This is a test message.',
'time' : {
'hour': 11,
'minute': 52,
'second': 0
},
'emoji' : [1, 5]
}
mqtt_tx.send(message)
|
# coding: utf-8
# import the packages
import numpy as np
from scipy.misc import imread, imresize, imsave
import matplotlib.pyplot as plt
from numpy import matlib
import math
from scipy import stats
import imageio
from skimage.transform import resize
import skimage
import zlib, sys
import gzip
import matplotlib
import scipy
import copy
# define a function to covert the image to a gray scale image
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
# define a function to get the proper Haar matrix and permutation matrix
def GetHaarMatrices(N):
Q = np.matrix("[1,1;1,-1]")
M = int(N/2)
T = np.kron(matlib.eye(M),Q)/np.sqrt(2)
P = np.vstack((matlib.eye(N)[::2,:],matlib.eye(N)[1::2,:]))
return T,P
# reads in a jpeg image
A = imageio.imread('image.jpg')
# show the original image just read in
plt.imshow(A, cmap = plt.get_cmap('gray'))
plt.title("original image")
plt.show()
# resize the image(before apply gray scale function) as a 256 by 256 matrix
A = skimage.transform.resize(A, [256, 256], mode='constant')
# show the jpeg image in a figure
plt.imshow(A, cmap = plt.get_cmap('gray'))
plt.title("original image after resize")
plt.show()
# Apply the rgb2gray function to the image
A = rgb2gray(A)
# show the jpeg image in a figure
plt.imshow(A, cmap = plt.get_cmap('gray'))
plt.title("Gray-scale after resize")
plt.show()
# make a deep copy of resize&gray-scale image
B = copy.deepcopy(A)
# set size to 256
N = 256
# Doing full-level Encoding (Forward Haar Transform)
for i in range(int(np.log2(N))):
T,P = GetHaarMatrices(N)
#print(T.shape)
B[0:N, 0:N] = P*T*B[0:N, 0:N]*T.T*P.T
N = int(N/2)
# show the result of full-level encoding
plt.figure()
plt.imshow(B[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("Full-levelForward Haar Transform")
plt.show()
# print the info of B
print(B)
# make 2 deep copy of B
X = copy.deepcopy(B)
Y = copy.deepcopy(B)
# convert X(2D numpy array) into 1D numpy array
Y = Y.ravel()
# print the shape of reshaped X
print(Y.shape)
# create a codebook to store the sign of the numpy array elements
sign = np.ones(len(Y),)
# set the positive 1 to -1 if the correspond element in X is negative
for element in Y:
if element < 0:
element = -1
# print the sign codebook
print(sign)
# make a deep copy to X to get the threshold but not affect X
Z = copy.deepcopy(Y)
# sort the numpy array by its absolute value
Z = np.sort(abs(Z))
# promopt to ask user what the top percent pixel will retain the same
percent = input('How many percents of smallest elements you want to set to zero?')
# define thresholding function to find the threshold
def thresholding(source, percentage):
index = 0
index = math.floor(len(source) * percentage / 100)
threshold = source[index]
return threshold
# apply the thresholding function to find the threshold th
th = thresholding(Z, int(percent))
print(th)
# create an empty list to store the pixel which set to zeros
data = []
# implementation of the threshold process to numpy array X
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i][j] > th:
continue
else:
data.append(X[i][j])
X[i][j] = 0
#print(len(data))
# show the image after apply to threshold
plt.imshow(X[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("After Thresholding")
plt.show()
# print the matrix out the make sure A apply to the threshold function correctly
print(X)
# make a copy of image after thresholding as M
M = copy.deepcopy(X)
# read in M row by row, skip the element of 0
# and take binary log to the nonzero positive element
# set only one element in each container
def log_quantiz(inp):
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
if inp[i][j] == 0:
continue
else:
inp[i][j] = math.log2(inp[i][j])
# Apply log_quantiz function to M
log_quantiz(M)
# show the image after apply to log quantization
plt.imshow(M[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("After Log-quantization")
plt.show()
#print(M)
# make a copy of image after thresholding as N
N = copy.deepcopy(M)
# start of the lossless compression by using package
# compress the image
compressed_data = zlib.compress(N, 9)
compress_ratio = float(sys.getsizeof(compressed_data))/sys.getsizeof(N)
# print out the percent of lossless compression
#print("Size before compress:", sys.getsizeof(N))
#print("Size after compress:", sys.getsizeof(compressed_data))
print("compress_ratio:", compress_ratio * 100, "%")
# ----------------------------------------------------------------
# start of decompressed image
# ----------------------------------------------------------------
# convert the lossless compressed image by using zlib
decompressed_data = zlib.decompress(compressed_data)
print(sys.getsizeof(decompressed_data))
# convert the bytes type to numpy array
decompressed_data = np.frombuffer(decompressed_data)
# to check that we won't loss any info since of compression and decompression
print(decompressed_data == M.ravel())
# convert the 1D decompressed data into a 2D numpy array E
E = np.reshape(decompressed_data, (256, 256))
# show the image before reverse log quantization
plt.imshow(E[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("decompress the compressed data")
plt.show()
# reverse log quantization to E
# make a deep copy of E as F
F = copy.deepcopy(E)
# read in F row by row, skip the element of 0
# and take binary power to the nonzero positive element
# set only one element in each container
def reverse_log_quantiz(inp):
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
if inp[i][j] == 0:
continue
else:
inp[i][j] = math.pow(2, inp[i][j])
# Apply reverse_log_quantiz function to M
reverse_log_quantiz(F)
# show the image after apply to reverse log quantization
plt.imshow(F[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("reverse of log-quantization")
plt.show()
print(F)
# reverse threshold to F
# make a deep copy of F as G
G = copy.deepcopy(F)
# read in F row by row, find the min nonzero pixel
# put the number from data codebook before apply thresholding function
# in order to put the data back to nonzero
def reverse_thresholding(source, preplacement):
index = 0
for i in range(source.shape[0]):
for j in range(source.shape[1]):
if source[i][j] == 0:
source[i][j] = data[index]
index += 1
else:
continue
# Apply reverse thresholding function to M
reverse_thresholding(G, data)
# show the image after apply to reverse threshold
plt.imshow(G[127:256,127:256], cmap = plt.get_cmap('gray'))
plt.title("Reverse of Thresholding")
plt.show()
print(G)
# make a deep copy of G
J = copy.deepcopy(G)
# get number of times of decoding and the starting point
N = len(J)
times = int(np.log2(N))
start = 2
# Doing full-level decoding (Backward Haar Transform)
for i in range(times):
T,P = GetHaarMatrices(start)
J[0:start, 0:start] = T.T*P.T*J[0:start, 0:start]*P*T
start = 2 * start
# show the result of full-level decoding
plt.figure()
plt.imshow(J, cmap = plt.get_cmap('gray'))
plt.show()
# print the info of J
print(J)
|
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from sequgen.dimension import Dimension
from sequgen.parameter_space import ParameterSpace
from sequgen.samplers.sample_uniform_random import sample_uniform_random
class TestParameterSpaceWithSinglePlainDimension:
@pytest.fixture
def space(self, random_seeded):
return ParameterSpace(
[Dimension('somename', -10, 10)],
sample_uniform_random
)
def test_repr(self, space: ParameterSpace):
expected = "1-D parameter space with dimensions: 'somename'"
assert repr(space) == expected
def test_format_str(self, space: ParameterSpace):
expected = 'somename={somename:.2f}'
assert space.format_str() == expected
def test_sample(self, space: ParameterSpace):
result = space.sample()
assert result.keys() == {'somename'}
assert_almost_equal(result['somename'], np.array([-2.5091976]))
def test_default_sampler(random_seeded):
space = ParameterSpace([Dimension('somename', -10, 10)])
assert space.sampler == sample_uniform_random
|
"""
Classes in this file are standalone because we don't want to impose a false hierarchy
between two classes. That is, inheritance may imply a hierarchy that isn't real.
"""
class Settings(object):
kExactTestBias = 1.0339757656912846e-25
kSmallEpsilon = 5.684341886080802e-14
kLargeEpsilon = 1e-07
SMALL_EPSILON = 5.684341886080802e-14
local_scratch = '/app/scratch'
python = 'python'
plink = "/srv/gsfs0/software/plink/1.90/plink"
redis_uri = 'redis://hydra_redis:6379'
class Commands(object):
HELP = "HELP"
INIT = "INIT"
INIT_STATS = 'INIT_STATS'
QC = "QC"
PCA = "PCA"
ECHO = "ECHO"
ASSO = "ASSO"
EXIT = "EXIT"
all_commands = [HELP, INIT, QC, PCA, ASSO, EXIT] # used by v.1 interface
commands_with_parms = [QC, PCA, ASSO]
class Thresholds(object):
# ECHO options
ECHO_COUNTS = 20
# QC Options
QC_hwe = 1e-10
QC_maf = 0.01
# PCA Options
PCA_maf = 0.1
PCA_ld_window = 50
PCA_ld_threshold = 0.2
PCA_pcs = 10
# Association Options
ASSO_pcs = 10
class Options(object):
# HELP = Commands.HELP
INIT = Commands.INIT
QC = Commands.QC
PCA = Commands.PCA
ASSO = Commands.ASSO
EXIT = Commands.EXIT
HWE = "HWE"
MAF = "MAF"
MPS = "MPS"
MPI = "MPI"
SNP = "snp"
LD = "LD"
NONE = "NONE"
class QCOptions(object):
HWE = Options.HWE
MAF = Options.MAF
MPS = Options.MPS
MPI = Options.MPI
SNP = Options.SNP
all_options = [HWE, MAF, MPS, MPI, SNP]
class PCAOptions(object):
HWE = Options.HWE
MAF = Options.MAF
MPS = Options.MPS
MPI = Options.MPI
SNP = Options.SNP
LD = Options.LD
NONE = Options.NONE
all_options = [HWE, MAF, MPS, MPI, SNP, LD, NONE]
class QCFilterNames(object):
QC_HWE = Options.HWE
QC_MAF = Options.MAF
QC_MPS = Options.MPS
QC_MPI = Options.MPI
QC_snp = Options.SNP
class PCAFilterNames(object):
PCA_HWE = Options.HWE
PCA_MAF = Options.MAF
PCA_MPS = Options.MPS
PCA_MPI = Options.MPI
PCA_snp = Options.SNP
PCA_LD = Options.LD
PCA_NONE = Options.NONE
external_host = "hydratest23.azurewebsites.net"
class ServerHTTP(object):
listen_host = '0.0.0.0'
external_host = external_host#"localhost"#external_host#'hydraapp.azurewebsites.net'#"localhost"#
port = '9001'
max_content_length = 1024 * 1024 * 1024 # 1 GB
wait_time = 0.5 # for the time.sleep() hacks
class ClientHTTP(object):
default_max_content_length = 1024 * 1024 * 1024 # 1 GB
default_listen_host = '0.0.0.0'
default_external_host = external_host#'hydraapp.azurewebsites.net' # "localhost"#
clients = [{
'name': 'Center1',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9002,
'max_content_length': default_max_content_length
},
{
'name': 'Center2',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9003,
'max_content_length': default_max_content_length
},
{
'name': 'Center3',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9004,
'max_content_length': default_max_content_length
}
]
|
"""
Common methods and constants
"""
UUID_PATTERN = r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
|
"""
File: 405.py
Title: Convert a Number to Hexadecimal
Difficulty: Easy
URL: https://leetcode.com/problems/convert-a-number-to-hexadecimal/
"""
import unittest
class Solution:
def toHex(self, num: int) -> str:
if num == 0:
return "0"
if num < 0:
num = 0xffffffff + num + 1
hex_str = ""
while True:
d = num % 16
if d >= 10:
hex_str = chr(ord('a') + (d - 10)) + hex_str
else:
hex_str = chr(ord('0') + d) + hex_str
num //= 16
if num == 0:
break
return hex_str
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
num = 26
# Output
output = "1a"
solution = Solution()
self.assertEqual(solution.toHex(num), output)
def test_example2(self):
# Input
num = -1
# Output
output = "ffffffff"
solution = Solution()
self.assertEqual(solution.toHex(num), output)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import sys
import skimage.io as sio
import os
import sys
import shutil
from objloader import LoadTextureOBJ
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(libpath + '/../lib')
import render
import objloader
input_obj = sys.argv[1]
V, F, VT, FT, VN, FN, face_mat, kdmap = objloader.LoadTextureOBJ(input_obj)
# set up camera information
info = {'Height':480, 'Width':640, 'fx':575, 'fy':575, 'cx':319.5, 'cy':239.5}
render.setup(info)
# set up mesh buffers in cuda
context = render.SetMesh(V, F)
cam2world = np.array([[ 0.85408425, 0.31617427, -0.375678 , 0.56351697 * 2],
[ 0. , -0.72227067, -0.60786998, 0.91180497 * 2],
[-0.52013469, 0.51917219, -0.61688 , 0.92532003 * 2],
[ 0. , 0. , 0. , 1. ]], dtype=np.float32)
world2cam = np.linalg.inv(cam2world).astype('float32')
# the actual rendering process
render.render(context, world2cam)
# get depth information
depth = render.getDepth(info)
# get information of mesh rendering
# vindices represents 3 vertices related to pixels
# vweights represents barycentric weights of the 3 vertices
# findices represents the triangle index related to pixels
vindices, vweights, findices = render.getVMap(context, info)
#Example: render texture based on the info, loop in python is inefficient, consider move to cpp
uv = np.zeros((findices.shape[0], findices.shape[1], 3), dtype='float32')
for j in range(2):
uv[:,:,j] = 0
for k in range(3):
vind = FT[findices][:,:,k]
uv_ind = VT[vind][:,:,j]
uv[:,:,j] += vweights[:,:,k] * uv_ind
mask = np.sum(vweights, axis=2) > 0
mat = face_mat[findices]
diffuse = np.zeros((findices.shape[0],findices.shape[1],3), dtype='uint8')
for i in range(findices.shape[0]):
for j in range(findices.shape[1]):
if mask[i,j] == 0:
diffuse[i,j,:] = 0
continue
elif kdmap[mat[i,j]].shape == (1,1,3):
diffuse[i,j,:] = kdmap[mat[i,j]][0,0]
else:
img = kdmap[mat[i,j]]
x = uv[i,j,0]
y = 1 - uv[i,j,1]
x = x - int(x)
y = y - int(y)
while x >= 1:
x -= 1
while x < 0:
x += 1
while y >= 1:
y -= 1
while y < 0:
y += 1
#uv[i,j,0] = x
#uv[i,j,1] = y
x = x * (img.shape[1] - 1)
y = y * (img.shape[0] - 1)
px = int(x)
py = int(y)
rx = px + 1
ry = py + 1
if ry == img.shape[0]:
ry -= 1
if rx == img.shape[1]:
rx -= 1
wx = (x - px)
wy = y - py
albedo = (img[py,px,:] * (1 - wx) + img[py,rx,:] * wx) * (1 - wy) + (img[ry,px,:] * (1 - wx) + img[ry,rx,:]*wx) * wy
diffuse[i,j,:] = albedo
render.Clear()
sio.imsave('../resources/depth.png', depth / np.max(depth))
sio.imsave('../resources/vweights.png', vweights)
sio.imsave('../resources/diffuse.png', diffuse)
|
"""
Module for working with modulemd YAML definitions with the least abstractions as
possible. Within this module, modulemd YAMLs are represented simply as a string,
and all transformation functions are `str` -> `str`.
"""
import os
import gi
import yaml
# python3-packaging in not available in RHEL 8.x
try:
from packaging.version import Version
except ModuleNotFoundError:
from distutils.version import StrictVersion as Version
gi.require_version("Modulemd", "2.0")
from gi.repository import Modulemd # noqa: E402
def is_valid(mod_yaml):
"""
Determine whether the `mod_yaml` string is a valid modulemd YAML definition
"""
idx = Modulemd.ModuleIndex.new()
try:
ret, _ = idx.update_from_string(mod_yaml, strict=True)
return ret
except gi.repository.GLib.GError:
return False
def validate(mod_yaml):
"""
Same as `is_valid` but raises exception if the YAML is not valid.
"""
idx = Modulemd.ModuleIndex.new()
try:
ret, failures = idx.update_from_string(mod_yaml, strict=True)
except gi.repository.GLib.GError as ex:
raise RuntimeError(ex)
if not ret:
raise RuntimeError(failures[0].get_gerror().message)
return ret
def create(name, stream):
"""
Create a minimal modulemd YAML definition containing only a module name and
module stream. To set any additional attributes, use `update` function.
"""
mod_stream = Modulemd.ModuleStreamV2.new(name, stream)
mod_stream.set_summary("")
mod_stream.set_description("")
mod_stream.add_module_license("")
index = Modulemd.ModuleIndex.new()
index.add_module_stream(mod_stream)
return index.dump_to_string()
def update(mod_yaml, name=None, stream=None, version=None, context=None,
arch=None, summary=None, description=None, module_licenses=None,
content_licenses=None, rpms_nevras=None, requires=None,
buildrequires=None, api=None, filters=None, profiles=None,
components=None):
"""
Transform a given modulemd YAML string into another, updated one. The input
string remains unchanged.
This function allows to modify specified modulemd attributes while leaving
the rest of them as is. For structured attributes, such as `module_licenses`
which value is a list, new values are not appended to a list, but the new
value is used instead.
For the official documentation of the modulemd YAML format and it's values,
please see
https://github.com/fedora-modularity/libmodulemd/blob/main/yaml_specs/modulemd_stream_v2.yaml
It will allow you to better understand the parameters of this function.
Args:
mod_yaml (str): An input modulelmd YAML
name (str): The name of the module
stream (str): Module update stream name
version (int): Module version, integer, cannot be negative
context (str): Module context flag
arch (str): Module artifact architecture
summary (str): A short summary describing the module
description (str): A verbose description of the module
module_licenses (list): A list of module licenses
content_licenses (list): A list of licenses used by the packages in
the module.
rpms_nevras (list): RPM artifacts shipped with this module
requires (dict): Module runtime dependencies represented as a `dict` of
module names as keys and list of streams as their values.
buildrequires (dict): Module buildtime dependencies represented as a
`dict` of module names as keys and list of streams as their values.
api (list): The module's public RPM-level API represented as a list of
package names.
filters (list): Module component filters represented as a list of pckage
names.
profiles (dict): A `dict` of profile names as keys and lists of package
names as their values.
components (list): Functional components of the module represented as a
`dict` with package names as keys and `dict`s representing the
particular components as keys. The component `dict` should contain
keys like `name`, `rationale`, `repository`, etc.
Returns:
An updated modulemd YAML represented as string
"""
mod_stream = _yaml2stream(mod_yaml)
name = name or mod_stream.get_module_name()
stream = stream or mod_stream.get_stream_name()
mod_stream = _modulemd_read_packager_string(mod_yaml, name, stream)
if version:
mod_stream.set_version(version)
if context:
mod_stream.set_context(context)
if arch:
mod_stream.set_arch(arch)
if summary:
mod_stream.set_summary(summary)
if description:
mod_stream.set_description(description)
if module_licenses:
mod_stream.clear_module_licenses()
for module_license in module_licenses:
mod_stream.add_module_license(module_license)
if content_licenses:
mod_stream.clear_content_licenses()
for content_license in content_licenses:
mod_stream.add_content_license(content_license)
if rpms_nevras:
mod_stream.clear_rpm_artifacts()
for nevra in rpms_nevras:
mod_stream.add_rpm_artifact(nevra)
if api:
mod_stream.clear_rpm_api()
for rpm in api:
mod_stream.add_rpm_api(rpm)
if filters:
mod_stream.clear_rpm_filters()
for rpm in filters:
mod_stream.add_rpm_filter(rpm)
if profiles:
mod_stream.clear_profiles()
for profile_name, rpms in profiles.items():
profile = Modulemd.Profile.new(profile_name)
for rpm in rpms:
profile.add_rpm(rpm)
mod_stream.add_profile(profile)
if components:
mod_stream.clear_rpm_components()
for component in components:
component_rpm = Modulemd.ComponentRpm.new(component.pop("name"))
for key, value in component.items():
component_rpm.set_property(key, value)
mod_stream.add_component(component_rpm)
# Updating dependencies is quite messy because AFAIK the only operations
# that `libmodoulemd` allows us to do is adding a runtime/buildtime
# dependencies one be one and dropping all of them at once.
# We need to help ourselves a little and drop all runtime dependencies and
# re-populate them with the old ones if a new ones weren't set. Similarly
# for buildrequires.
old_deps = Modulemd.Dependencies()
# Module can contain multiple pairs of dependencies. If we want to update
# both `requires` and `buildrequires` at the same time, we can drop all
# current dependencies and just set a new one. If we want to update only
# one of them, we are getting to an ambiguous situation, not knowing what
# pair of dependencies we should update. Let's just raise an exception.
if (len(mod_stream.get_dependencies()) > 1
and (requires or buildrequires)
and not (requires and buildrequires)):
raise AttributeError("Provided YAML contains multiple pairs of "
"dependencies. It is ambiguous which one to "
"update.")
if mod_stream.get_dependencies():
old_deps = mod_stream.get_dependencies()[0]
new_deps = Modulemd.Dependencies()
if requires:
for depname, depstreams in requires.items():
for depstream in depstreams:
new_deps.add_runtime_stream(depname, depstream)
else:
for depname in old_deps.get_runtime_modules():
for depstream in old_deps.get_runtime_streams(depname):
new_deps.add_runtime_stream(depname, depstream)
if buildrequires:
for depname, depstreams in buildrequires.items():
for depstream in depstreams:
new_deps.add_buildtime_stream(depname, depstream)
else:
for depname in old_deps.get_buildtime_modules():
for depstream in old_deps.get_buildtime_streams(depname):
new_deps.add_buildtime_stream(depname, depstream)
if requires or buildrequires:
mod_stream.clear_dependencies()
mod_stream.add_dependencies(new_deps)
idx2 = Modulemd.ModuleIndex.new()
idx2.add_module_stream(mod_stream)
return idx2.dump_to_string()
def upgrade(mod_yaml, version):
"""
Upgrade the input module YAML from an older version to a newer one.
Downgrades aren't supported even in case where it would be possible.
"""
parsed = yaml.safe_load(mod_yaml or "")
if not parsed or "version" not in parsed:
raise ValueError("Missing modulemd version")
supported = [1, 2]
if parsed["version"] not in supported or version not in supported:
raise ValueError("Unexpected modulemd version")
if parsed["version"] > version:
raise ValueError("Cannot downgrade modulemd version")
mod_stream = _modulemd_read_packager_string(
mod_yaml,
parsed["data"].get("name", ""),
parsed["data"].get("stream", ""))
mod_stream_upgraded = _modulestream_upgrade_ext(mod_stream, version)
return _stream2yaml(mod_stream_upgraded)
def load(path):
"""
Load modulemd YAML definition from a file
"""
with open(path, "r") as yaml_file:
mod_yaml = yaml_file.read()
validate(mod_yaml)
return mod_yaml
def dump(mod_yaml, dest=None):
"""
Dump modulemd YAML definition into a file
If `dest` is not specified, the file will be created in the current working
directory and it's name is going to be generated from the module attributes
in the `N:S:V:C:A.modulemd.yaml` format.
If `dest` is a directory, then the file is going to be generated in that
directory.
If `dest` is pointing to a (nonexisting) file, it is going to be dumped in
its place.
"""
filename = _generate_filename(mod_yaml)
path = dest
if not path:
path = os.path.join(os.getcwd(), filename)
elif os.path.isdir(path):
path = os.path.join(dest, filename)
with open(path, "w") as moduleyaml:
moduleyaml.write(mod_yaml)
def _generate_filename(mod_yaml):
"""
Generate filename for a module yaml
"""
mod_stream = _yaml2stream(mod_yaml)
return "{N}:{S}:{V}:{C}:{A}.modulemd.yaml".format(
N=mod_stream.get_module_name(),
S=mod_stream.get_stream_name(),
V=mod_stream.get_version(),
C=mod_stream.get_context(),
A=mod_stream.get_arch() or "noarch")
def _yaml2stream(mod_yaml):
try:
return _modulemd_read_packager_string(mod_yaml)
except gi.repository.GLib.GError as ex:
raise ValueError(ex.message)
def _stream2yaml(mod_stream):
idx = Modulemd.ModuleIndex.new()
idx.add_module_stream(mod_stream)
try:
return idx.dump_to_string()
except gi.repository.GLib.GError as ex:
raise RuntimeError(ex.message)
def _modulemd_read_packager_string(mod_yaml, name=None, stream=None):
"""
For the time being we happen to be in a transition state when
`Modulemd.ModuleStream.read_string` is deprecated and throws warnings on
Fedora but we still use old libmodulemd (2.9.4) on RHEL8, which doesn't
provide its replacement in the form of `Modulemd.read_packager_string`.
"""
if Version(Modulemd.get_version()) < Version("2.11"):
mod_stream = Modulemd.ModuleStreamV2.new(name, stream)
mod_stream = mod_stream.read_string(mod_yaml, True, name, stream)
return mod_stream
return Modulemd.read_packager_string(mod_yaml, name, stream)
def _modulestream_upgrade_ext(mod_stream, version):
"""
For the time being we happen to be in a transition state when
`Modulemd.ModuleStream.upgrade` is deprecated and throws warnings on
Fedora but we still use old libmodulemd (2.9.4) on RHEL8, which doesn't
provide its replacement in the form of `Modulemd.ModuleStream.upgrade_ext`.
"""
if Version(Modulemd.get_version()) < Version("2.10"):
return mod_stream.upgrade(version)
mod_upgraded = mod_stream.upgrade_ext(version)
return mod_upgraded.get_stream_by_NSVCA(
mod_stream.get_stream_name(),
mod_stream.get_version(),
mod_stream.get_context(),
mod_stream.get_arch())
|
from unittest.mock import Mock
from prices import Money, TaxedMoney
from ....plugins.manager import PluginsManager
from ....product.models import ProductVariant
from ....product.utils.availability import get_variant_availability
from ...tests.utils import get_graphql_content
QUERY_GET_VARIANT_PRICING = """
query ($channel: String, $address: AddressInput) {
products(first: 1, channel: $channel) {
edges {
node {
variants {
pricing(address: $address) {
onSale
discount {
currency
net {
amount
}
}
priceUndiscounted {
currency
net {
amount
}
}
price {
currency
net {
amount
}
}
}
}
}
}
}
}
"""
def test_get_variant_pricing_on_sale(api_client, sale, product, channel_USD):
price = product.variants.first().channel_listings.get().price
sale_discounted_value = sale.channel_listings.get().discount_value
discounted_price = price.amount - sale_discounted_value
variables = {"channel": channel_USD.slug, "address": {"country": "US"}}
response = api_client.post_graphql(QUERY_GET_VARIANT_PRICING, variables)
content = get_graphql_content(response)
pricing = content["data"]["products"]["edges"][0]["node"]["variants"][0]["pricing"]
# ensure the availability was correctly retrieved and sent
assert pricing
# check availability
assert pricing["onSale"] is True
# check the discount
assert pricing["discount"]["currency"] == price.currency
assert pricing["discount"]["net"]["amount"] == discounted_price
# check the undiscounted price
assert pricing["priceUndiscounted"]["currency"] == price.currency
assert pricing["priceUndiscounted"]["net"]["amount"] == price.amount
# check the discounted price
assert pricing["price"]["currency"] == price.currency
assert pricing["price"]["net"]["amount"] == discounted_price
def test_get_variant_pricing_not_on_sale(api_client, product, channel_USD):
price = product.variants.first().channel_listings.get().price
variables = {"channel": channel_USD.slug, "address": {"country": "US"}}
response = api_client.post_graphql(QUERY_GET_VARIANT_PRICING, variables)
content = get_graphql_content(response)
pricing = content["data"]["products"]["edges"][0]["node"]["variants"][0]["pricing"]
# ensure the availability was correctly retrieved and sent
assert pricing
# check availability
assert pricing["onSale"] is False
# check the discount
assert pricing["discount"] is None
# check the undiscounted price
assert pricing["priceUndiscounted"]["currency"] == price.currency
assert pricing["priceUndiscounted"]["net"]["amount"] == price.amount
# check the discounted price
assert pricing["price"]["currency"] == price.currency
assert pricing["price"]["net"]["amount"] == price.amount
def test_variant_pricing(
variant: ProductVariant, monkeypatch, settings, stock, channel_USD
):
taxed_price = TaxedMoney(Money("10.0", "USD"), Money("12.30", "USD"))
monkeypatch.setattr(
PluginsManager, "apply_taxes_to_product", Mock(return_value=taxed_price)
)
product = variant.product
product_channel_listing = product.channel_listings.get()
variant_channel_listing = variant.channel_listings.get()
pricing = get_variant_availability(
variant=variant,
variant_channel_listing=variant_channel_listing,
product=product,
product_channel_listing=product_channel_listing,
collections=[],
discounts=[],
channel=channel_USD,
)
assert pricing.price == taxed_price
assert pricing.price_local_currency is None
monkeypatch.setattr(
"django_prices_openexchangerates.models.get_rates",
lambda c: {"PLN": Mock(rate=2)},
)
settings.DEFAULT_COUNTRY = "PL"
settings.OPENEXCHANGERATES_API_KEY = "fake-key"
pricing = get_variant_availability(
variant=variant,
variant_channel_listing=variant_channel_listing,
product=product,
product_channel_listing=product_channel_listing,
collections=[],
discounts=[],
channel=channel_USD,
local_currency="PLN",
country="US",
)
assert pricing.price_local_currency.currency == "PLN" # type: ignore
pricing = get_variant_availability(
variant=variant,
variant_channel_listing=variant_channel_listing,
product=product,
product_channel_listing=product_channel_listing,
collections=[],
discounts=[],
channel=channel_USD,
)
assert pricing.price.tax.amount
assert pricing.price_undiscounted.tax.amount
assert pricing.price_undiscounted.tax.amount
|
# -*- coding: utf-8 -*-
import pandas as pd
from windpyplus.utils.tradedate import tradedate
from windpyplus.stockSector.StockSector import allAstock, MSCIAStock
from windpyplus.fundamental.foreCastWind import foreCastWind
from windpyplus.utils.convertToWindCode import convertBQCode, convertCode
from windpyplus.utils.dfToExcel import dftoSameWorkbook, dfToExcel
from windpyplus.fundamental.valucation import valucationWind
from windpyplus.fundamental.fundamentalWind import financialDataWind, netProfit_filter, ROE_filter, growth_filter, cashFlow_filter, multi_filter
allastocks = list(allAstock().index.values)
def fiter_ForecastValucation(qt = '20170930', CHG_MIN= 7):
allastocks = list(allAstock().index.values)
df = foreCastWind(allastocks, qt)
df = df[df['PROFITNOTICE_CHANGEMEAN'] > CHG_MIN]
print(df.head(10))
stocklists = df.index.values
df_V = valucationWind(stocklists)
print(df_V.head(10))
df_f = pd.merge(df_V,df, how='left')
dfToExcel(df_f, "filter_valucation_forecast_"+ str(qt)+"_"+ str(CHG_MIN))
print('num of filter_forecastValucation : {}'.format(df_f))
return df_f
if __name__ == '__main__':
print(tradedate())
df = fiter_ForecastValucation(qt= '20170930',CHG_MIN= 30)
#stocks_muiltfilter = multi_filter(list(df.index.values), )
#df = financialDataWind(stocks_muiltfilter)
#print(df)
|
#!/usr/bin/env python
# Copyright 2017 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from simulation.msg import Control
from sensor_msgs.msg import Joy
STEERING_AXIS = 0
THROTTLE_AXIS = 4
class Translator:
def __init__(self):
self.sub = rospy.Subscriber("joy", Joy, self.callback)
self.pub = rospy.Publisher('prius', Control, queue_size=1)
self.last_published_time = rospy.get_rostime()
self.last_published = None
self.timer = rospy.Timer(rospy.Duration(1./20.), self.timer_callback)
def timer_callback(self, event):
if self.last_published and self.last_published_time < rospy.get_rostime() + rospy.Duration(1.0/20.):
self.callback(self.last_published)
def callback(self, message):
rospy.logdebug("joy_translater received axes %s",message.axes)
command = Control()
command.header = message.header
if message.axes[THROTTLE_AXIS] >= 0:
command.throttle = message.axes[THROTTLE_AXIS]
command.brake = 0.0
else:
command.brake = message.axes[THROTTLE_AXIS] * -1
command.throttle = 0.0
if message.buttons[3]:
command.shift_gears = Control.FORWARD
elif message.buttons[1]:
command.shift_gears = Control.NEUTRAL
elif message.buttons[0]:
command.shift_gears = Control.REVERSE
else:
command.shift_gears = Control.NO_COMMAND
command.steer = message.axes[STEERING_AXIS]
self.last_published = message
self.pub.publish(command)
if __name__ == '__main__':
rospy.init_node('joy_translator')
t = Translator()
rospy.spin()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: consensus_submit_message.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import basic_types_pb2 as basic__types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='consensus_submit_message.proto',
package='proto',
syntax='proto3',
serialized_options=b'\n\"com.hederahashgraph.api.proto.javaP\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1e\x63onsensus_submit_message.proto\x12\x05proto\x1a\x11\x62\x61sic_types.proto\"n\n\x19\x43onsensusMessageChunkInfo\x12\x32\n\x14initialTransactionID\x18\x01 \x01(\x0b\x32\x14.proto.TransactionID\x12\r\n\x05total\x18\x02 \x01(\x05\x12\x0e\n\x06number\x18\x03 \x01(\x05\"\x8e\x01\n%ConsensusSubmitMessageTransactionBody\x12\x1f\n\x07topicID\x18\x01 \x01(\x0b\x32\x0e.proto.TopicID\x12\x0f\n\x07message\x18\x02 \x01(\x0c\x12\x33\n\tchunkInfo\x18\x03 \x01(\x0b\x32 .proto.ConsensusMessageChunkInfoB&\n\"com.hederahashgraph.api.proto.javaP\x01\x62\x06proto3'
,
dependencies=[basic__types__pb2.DESCRIPTOR,])
_CONSENSUSMESSAGECHUNKINFO = _descriptor.Descriptor(
name='ConsensusMessageChunkInfo',
full_name='proto.ConsensusMessageChunkInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='initialTransactionID', full_name='proto.ConsensusMessageChunkInfo.initialTransactionID', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total', full_name='proto.ConsensusMessageChunkInfo.total', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='proto.ConsensusMessageChunkInfo.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=170,
)
_CONSENSUSSUBMITMESSAGETRANSACTIONBODY = _descriptor.Descriptor(
name='ConsensusSubmitMessageTransactionBody',
full_name='proto.ConsensusSubmitMessageTransactionBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='topicID', full_name='proto.ConsensusSubmitMessageTransactionBody.topicID', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='proto.ConsensusSubmitMessageTransactionBody.message', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chunkInfo', full_name='proto.ConsensusSubmitMessageTransactionBody.chunkInfo', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=315,
)
_CONSENSUSMESSAGECHUNKINFO.fields_by_name['initialTransactionID'].message_type = basic__types__pb2._TRANSACTIONID
_CONSENSUSSUBMITMESSAGETRANSACTIONBODY.fields_by_name['topicID'].message_type = basic__types__pb2._TOPICID
_CONSENSUSSUBMITMESSAGETRANSACTIONBODY.fields_by_name['chunkInfo'].message_type = _CONSENSUSMESSAGECHUNKINFO
DESCRIPTOR.message_types_by_name['ConsensusMessageChunkInfo'] = _CONSENSUSMESSAGECHUNKINFO
DESCRIPTOR.message_types_by_name['ConsensusSubmitMessageTransactionBody'] = _CONSENSUSSUBMITMESSAGETRANSACTIONBODY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConsensusMessageChunkInfo = _reflection.GeneratedProtocolMessageType('ConsensusMessageChunkInfo', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSMESSAGECHUNKINFO,
'__module__' : 'consensus_submit_message_pb2'
# @@protoc_insertion_point(class_scope:proto.ConsensusMessageChunkInfo)
})
_sym_db.RegisterMessage(ConsensusMessageChunkInfo)
ConsensusSubmitMessageTransactionBody = _reflection.GeneratedProtocolMessageType('ConsensusSubmitMessageTransactionBody', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSSUBMITMESSAGETRANSACTIONBODY,
'__module__' : 'consensus_submit_message_pb2'
# @@protoc_insertion_point(class_scope:proto.ConsensusSubmitMessageTransactionBody)
})
_sym_db.RegisterMessage(ConsensusSubmitMessageTransactionBody)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from tkinter import *
from videomaker.functions.startThread import startThread
def initButton(focus):
focus.startButton = Button(focus.master, text="Start!", width=42, height=5, command= lambda: startThread(focus))
# Cant make the text bigger or bolder without distorting the size of the button, even if the button is larger, the text is still
# proportionate with the button
focus.startButton.place(x=750, y=400)
|
from Runner import Runner
from Scheduler import Scheduler
from model import Player, PlayerHadouken
def main():
runner = Runner(
players=(
Player(0),
),
state='ryu_vs_ken_highest_difficulty'
)
scheduler = Scheduler(runner.on_frame, fps=40)
scheduler.run()
if __name__ == '__main__':
main()
|
'''OpenGL extension OES.fbo_render_mipmap
This module customises the behaviour of the
OpenGL.raw.GLES2.OES.fbo_render_mipmap to provide a more
Python-friendly API
Overview (from the spec)
OES_framebuffer_object allows rendering to the base level of a
texture only. This extension removes this limitation by
allowing implementations to support rendering to any mip-level
of a texture(s) that is attached to a framebuffer object(s).
If this extension is supported, FramebufferTexture2DOES, and
FramebufferTexture3DOES can be used to render directly into
any mip level of a texture image
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/fbo_render_mipmap.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.fbo_render_mipmap import *
from OpenGL.raw.GLES2.OES.fbo_render_mipmap import _EXTENSION_NAME
def glInitFboRenderMipmapOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
from abc import ABC, abstractmethod
from ungenetico.gene import Gene
#from ungenetico.ga import GeneticAlgorithm
from ungenetico.population import Population
import random
from dataclasses import dataclass
from itertools import accumulate
import copy
import numpy as np
class Mutation(ABC):
"""Abstract class"""
@abstractmethod
def mutate(self, gen: Gene, ag):
pass
class Crossover(ABC):
"""Abstract class"""
@abstractmethod
def exchange(self, gen1: Gene, gen2: Gene, ag):
pass
class Probability(ABC):
"""Abstract class"""
@abstractmethod
def assign_probability(self, pop: Population, ag):
pass
class Selection(ABC):
"""Abstract class"""
@abstractmethod
def select(self, pop: Population, ag):
pass
class Pairing(ABC):
"""Abstract class"""
@abstractmethod
def match(self, pop: Population, ag):
pass
class Reproduction(ABC):
"""Abstract class"""
@abstractmethod
def reproduce(self, pop: Population, ag):
pass
class MutationUniform(Mutation):
def mutate(self, gen: Gene, ag):
"""
:param gen:
:param ag:
:return:
"""
gen.value = [random.uniform(gen.min_val, gen.max_val) for _ in range(gen.length)]
@dataclass
class MutationNotUniform(Mutation):
b: float = 0.5
def mutate(self, gen: Gene, ag):
"""
Parameters
----------
gen
ag
Returns
-------
"""
t = ag.generation
tmax = ag.generation_max
values = [None]*gen.length
for i in range(gen.length):
beta = random.randint(0, 1)
r = random.uniform(0, 1)
delta = (gen.max_val-gen.value[i]) * (1 - r**(1-t/tmax)**self.b)
if beta == 0:
value = gen.value[i] + delta
else:
value = gen.value[i] - delta
values[i] = value
gen.value = values
class CrossoverSimple(Crossover):
def exchange(self, gen1: Gene, gen2: Gene, ag):
return gen2.value, gen1.value
class CrossoverRandom(Crossover):
def exchange(self, gen1: Gene, gen2: Gene, ag):
child1 = [None]*gen1.length
child2 = [None]*gen1.length
for i in range(gen1.length):
child1[i] = random.uniform(gen1.value[i], gen2.value[i])
child2[i] = random.uniform(gen1.value[i], gen2.value[i])
return child1, child2
@dataclass
class CrossoverArithmetic(Crossover):
alpha: float = 0.7
def exchange(self, gen1: Gene, gen2: Gene, ag):
child1 = [None]*gen1.length
child2 = [None]*gen1.length
for i in range(gen1.length):
child1[i] = self.alpha*gen1.value[i] + (1-self.alpha)*gen2.value[i]
child2[i] = self.alpha*gen2.value[i] + (1-self.alpha)*gen1.value[i]
return child1, child2
@dataclass
class CrossoverBLX(Crossover):
alpha: float = 0.7
def exchange(self, gen1: Gene, gen2: Gene, ag):
child1 = [None]*gen1.length
child2 = [None]*gen1.length
for i in range(gen1.length):
cmin = min(gen1.value[i], gen2.value[i])
cmax = max(gen1.value[i], gen2.value[i])
d = cmax-cmin
child1[i] = random.uniform(cmin - d*self.alpha, cmax + d*self.alpha)
child2[i] = random.uniform(cmin - d*self.alpha, cmax + d*self.alpha)
return child1, child2
class ProbabilityUniform(Probability):
def assign_probability(self, pop: Population, ag):
prob = 1/pop.size
for ind in pop.population:
ind.survival_probability = prob
class ProbabilityProportional(Probability):
def assign_probability(self, pop: Population, ag):
ga_min = ag.objective_min
sov_max = sum([ind.objective_value + (1-ga_min) for ind in pop.population])
sov_min = sum([1/(ind.objective_value + (1 - ga_min)) for ind in pop.population])
if ag.optimization == 'maximization':
for ind in pop.population:
ind.survival_probability = (ind.objective_value + (1-ga_min)) / sov_max
else:
for ind in pop.population:
ind.survival_probability = (1/(ind.objective_value + (1-ga_min))) / sov_min
class ProbabilityLineal(Probability):
def assign_probability(self, pop: Population, ag):
n = pop.size
if ag.optimization == 'maximization':
pop.sort_population('descending', 'objective_value')
else:
pop.sort_population('ascending', 'objective_value')
for i in range(n):
pop.population[i].survival_probability = 2*(n-i) / (n*(n+1))
class SelectionStochastic(Selection):
def select(self, pop: Population, ag):
pop.sort_population('descending', 'survival_probability')
prob = [ind.survival_probability for ind in pop.population]
angle = list(accumulate(prob))
new_population = Population()
for i in range(pop.size):
roulette = random.uniform(0, 1)
pos = len([1 for jind in angle if roulette >= jind])
new_population.append_individual(copy.deepcopy(pop.population[pos]))
pop.population = new_population.population
class PairingRandom(Pairing):
def match(self, pop: Population, ag):
pop.parents = random.sample(range(pop.size), pop.size)
class PairingAdjacent(Pairing):
def match(self, pop: Population, ag):
n = list(range(1, pop.size))
n.append(pop.size-1)
pop.partners = n
class PairingExtremes(Pairing):
def match(self, pop: Population, ag):
pop.partners = list(range(pop.size-1, -1, -1))
class ReproductionSimple(Reproduction):
def reproduce(self, pop: Population, ag):
for ind in pop.population:
ind.paired = False
for index in range(pop.size):
parent1 = pop.population[index]
parent2 = pop.population[pop.partners[index]]
if not parent1.paired and not parent2.paired:
parent1.paired = True
parent2.paired = True
exchange_point = random.randint(0, len(parent1.genome))
for i in range(exchange_point, len(parent1.genome)):
parent1.genome[i].value, parent2.genome[i].value = parent1.genome[i].exchange(parent2.genome[i], ag)
class ReproductionTwoParentsTwoChildren(Reproduction):
def reproduce(self, pop: Population, ag):
for ind in pop.population:
ind.paired = False
for index in range(pop.size):
parent1 = pop.population[index]
parent2 = pop.population[pop.partners[index]]
if not parent1.paired and not parent2.paired:
parent1.paired = True
parent2.paired = True
for i in range(len(parent1.genome)):
parent1.genome[i].value, parent2.genome[i].value = parent1.genome[i].exchange(parent2.genome[i], ag)
class ReproductionBestParentBestChild(Reproduction):
def reproduce(self, pop: Population, ag):
for ind in pop.population:
ind.paired = False
for index in range(pop.size):
parent1 = pop.population[index]
parent2 = pop.population[pop.partners[index]]
if not parent1.paired and not parent2.paired:
parent1.paired = True
parent2.paired = True
child1 = copy.deepcopy(parent1)
child2 = copy.deepcopy(parent2)
for i in range(len(parent1.genome)):
child1.genome[i].value, child2.genome[i].value = parent1.genome[i].exchange(parent2.genome[i], ag)
parent1.calculate_objective_function(ag.objective_function)
parent2.calculate_objective_function(ag.objective_function)
child1.calculate_objective_function(ag.objective_function)
child2.calculate_objective_function(ag.objective_function)
if ag.optimization == 'maximization':
if parent2.objective_value > parent1.objective_value:
#pop.population[index] = copy.deepcopy(parent2)
pop.population[index] = parent2
if child1.objective_value > child2.objective_value:
pop.population[pop.partners[index]] = child1
else:
pop.population[pop.partners[index]] = child2
else:
if parent2.objective_value < parent1.objective_value:
# pop.population[index] = copy.deepcopy(parent2)
pop.population[index] = parent2
if child1.objective_value < child2.objective_value:
pop.population[pop.partners[index]] = child1
else:
pop.population[pop.partners[index]] = child2
class ReproductionBestBetweenParentsChildren(Reproduction):
def reproduce(self, pop: Population, ag):
for ind in pop.population:
ind.paired = False
for index in range(pop.size):
parent1 = pop.population[index]
parent2 = pop.population[pop.partners[index]]
if not parent1.paired and not parent2.paired:
parent1.paired = True
parent2.paired = True
child1 = copy.deepcopy(parent1)
child2 = copy.deepcopy(parent2)
for i in range(len(parent1.genome)):
child1.genome[i].value, child2.genome[i].value = parent1.genome[i].exchange(parent2.genome[i], ag)
parent1.calculate_objective_function(ag.objective_function)
parent2.calculate_objective_function(ag.objective_function)
child1.calculate_objective_function(ag.objective_function)
child2.calculate_objective_function(ag.objective_function)
group = [parent1, parent2, child1, child2]
obj_vals = np.array([parent1.objective_value, parent2.objective_value, child1.objective_value, child2.objective_value])
arg_obj_vals = np.argsort(obj_vals)
if ag.optimization == 'maximization':
pop.population[index] = group[arg_obj_vals[-1]]
pop.population[pop.partners[index]] = group[arg_obj_vals[-2]]
else:
pop.population[index] = group[arg_obj_vals[0]]
pop.population[pop.partners[index]] = group[arg_obj_vals[1]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.