content
stringlengths 5
1.05M
|
|---|
import re #regular expression library import
def __checkPassword(password):
checker = 0
while True:
if (len(password)<8):
checker = -1
break
elif not re.search("[a-z]", password):
checker = -1
break
elif not re.search("[A-Z]", password):
checker = -1
break
elif not re.search("[0-9]", password):
checker = -1
break
elif re.search("\s", password):
checker = -1
break
else:
checker = 0
return True
if checker ==-1:
return False
def __main__():
password = input("Enter the password: ")
if(__checkPassword(password)):
print("this is a valid password")
else:
print("this is a invalid password")
__main__()
|
# Copyright 2015 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import TEST_OUTPUT_DIR
import os
import keras
import keras.initializations
from keras.layers.core import Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.engine.topology import Input
from keras.engine.training import Model
import math
import pytest
import numpy as np
from diktya.gan import GAN
from diktya.func_api_helpers import sequential
def sample_circle(nb_samples):
center = (0.2, 0.2)
r = np.random.normal(0.5, 0.1, (nb_samples, ))
angle = np.random.uniform(0, 2*math.pi, (nb_samples,))
X = np.zeros((nb_samples, 2))
X[:, 0] = r*np.cos(angle) + center[0]
X[:, 1] = r*np.sin(angle) + center[1]
return X
class Plotter(keras.callbacks.Callback):
def __init__(self, X, outdir):
super().__init__()
self.X = X
self.outdir = outdir
os.makedirs(outdir, exist_ok=True)
def on_epoch_begin(self, epoch, logs={}):
if epoch == 0:
self._plot("on_begin_0.png")
def on_epoch_end(self, epoch, logs={}):
self._plot("on_end_{}.png".format(epoch))
def _plot(self, outname):
import matplotlib.pyplot as plt
ys = []
for i in range(32):
ys.append(self.model.generate(nb_samples=64))
Y = np.concatenate(ys)
fig = plt.figure()
plt.ylim(-1, 1.5)
plt.xlim(-1, 1.5)
plt.scatter(self.X[:, 0], self.X[:, 1], marker='.', c='b', alpha=0.2)
plt.scatter(Y[:, 0], Y[:, 1], marker='.', c='r', alpha=0.2)
fig.savefig(os.path.join(self.outdir, outname))
plt.close()
simple_gan_batch_size = 64
simple_gan_nb_z = 20
simple_gan_nb_out = 2
simple_gan_z_shape = (simple_gan_batch_size, simple_gan_nb_z)
simple_gan_real_shape = (simple_gan_batch_size, simple_gan_nb_out)
@pytest.fixture()
def simple_gan():
z = Input(batch_shape=simple_gan_z_shape, name='z')
generator = sequential([
Dense(4*simple_gan_nb_z, activation='relu', name='g1'),
Dense(4*simple_gan_nb_z, activation='relu', name='g2'),
Dense(simple_gan_nb_out, name='g_loss'),
])(z)
d_input = Input(batch_shape=simple_gan_real_shape, name='data')
discriminator = sequential([
Dense(400, input_dim=2, name='d1'),
LeakyReLU(0.3),
Dense(400, name='d2'),
LeakyReLU(0.3),
Dense(1, activation='sigmoid', name='d_loss')
])(d_input)
g = Model(z, generator)
g.compile(Adam(lr=0.0002, beta_1=0.5), {'g_loss': 'binary_crossentropy'})
d = Model(d_input, discriminator)
d.compile(Adam(lr=0.0002, beta_1=0.5), {'d_loss': 'binary_crossentropy'})
return GAN(g, d)
def test_metrics_names(simple_gan):
assert simple_gan.metrics_names == ['g_loss', 'd_loss']
def test_input_names(simple_gan):
assert simple_gan.input_names == ['z', 'data']
def test_gan_learn_simple_distribution(simple_gan):
gan = simple_gan
def sample_multivariate(nb_samples):
mean = (0.2, 0)
cov = [[0.1, 0.03],
[0.02, 0.04]]
return np.random.multivariate_normal(mean, cov, (nb_samples,))
# dataset = sample_multivariate
dataset = sample_circle
def generator(bs=32):
while True:
X = dataset(bs)
z = np.random.uniform(-1, 1, (bs, simple_gan_nb_z))
yield {'data': X, 'z': z}
X = dataset(5000)
callbacks = [Plotter(X, TEST_OUTPUT_DIR + "/epoches_plot")]
bs = 64
gan.fit_generator(generator(bs), nb_batches_per_epoch=100, nb_epoch=1, verbose=1,
callbacks=callbacks, batch_size=bs)
def test_gan_utility_funcs(simple_gan: GAN):
xy_shp = simple_gan_z_shape[1:]
x = np.zeros(xy_shp, dtype=np.float32)
y = np.zeros(xy_shp, dtype=np.float32)
simple_gan.interpolate(x, y)
z_point = simple_gan.random_z_point()
neighbors = simple_gan.neighborhood(z_point, std=0.05)
diff = np.stack([neighbors[0]]*len(neighbors)) - neighbors
assert np.abs(diff).mean() < 0.1
|
"""InstanceFile and InstanceFileManager."""
import os
import re
import itertools
from pathlib import Path
from schema_enforcer.utils import find_files, load_file
SCHEMA_TAG = "jsonschema"
class InstanceFileManager: # pylint: disable=too-few-public-methods
"""InstanceFileManager."""
def __init__(self, config):
"""Initialize the interface File manager.
The file manager will locate all potential instance files in the search directories.
Args:
config (string): The pydantec config object.
"""
self.instances = []
self.config = config
# Find all instance files
# TODO need to load file extensions from the config
files = find_files(
file_extensions=config.data_file_extensions,
search_directories=config.data_file_search_directories,
excluded_filenames=config.data_file_exclude_filenames,
excluded_directories=[config.main_directory],
return_dir=True,
)
# For each instance file, check if there is a static mapping defined in the config
# Create the InstanceFile object and save it
for root, filename in files:
matches = []
if filename in config.schema_mapping:
matches.extend(config.schema_mapping[filename])
instance = InstanceFile(root=root, filename=filename, matches=matches)
self.instances.append(instance)
def print_schema_mapping(self):
"""Print in CLI the matches for all instance files."""
print("{:50} Schema ID".format("Structured Data File"))
print("-" * 80)
print_strings = []
for instance in self.instances:
filepath = f"{instance.path}/{instance.filename}"
print_strings.append(f"{filepath:50} {instance.matches}")
print("\n".join(sorted(print_strings)))
class InstanceFile:
"""Class to manage an instance file."""
def __init__(self, root, filename, matches=None):
"""Initializes InstanceFile object.
Args:
root (string): Absolute path to the directory where the schema file is located.
filename (string): Name of the file.
matches (list, optional): List of schema IDs that matches with this Instance file. Defaults to None.
"""
self.data = None
self.path = root
self.full_path = os.path.realpath(root)
self.filename = filename
if matches:
self.matches = matches
else:
self.matches = []
self.matches.extend(self._find_matches_inline())
def _find_matches_inline(self, content=None):
"""Find addition matches using the Schema ID decorator comment.
Look for a line with # jsonschema: schema_id,schema_id
Args:
content (string, optional): Content of the file to analyze. Default to None.
Returns:
list(string): List of matches found in the file.
"""
if not content:
content = Path(os.path.join(self.full_path, self.filename)).read_text()
matches = []
if SCHEMA_TAG in content:
line_regexp = r"^#.*{0}:\s*(.*)$".format(SCHEMA_TAG)
match = re.match(line_regexp, content, re.MULTILINE)
if match:
matches = [x.strip() for x in match.group(1).split(",")]
return matches
def get_content(self):
"""Return the content of the instance file in structured format.
Content returned can be either dict or list depending on the content of the file
Returns:
dict or list: Content of the instance file.
"""
return load_file(os.path.join(self.full_path, self.filename))
def validate(self, schema_manager, strict=False):
"""Validate this instance file with all matching schema in the schema manager.
Args:
schema_manager (SchemaManager): A SchemaManager object.
strict (bool, optional): True is the validation should automatically flag unsupported element. Defaults to False.
Returns:
iterator: Iterator of ValidationErrors returned by schema.validate.
"""
# TODO need to add something to check if a schema is missing
# Create new iterator chain to be able to aggregate multiple iterators
errs = itertools.chain()
# Go over all schemas and skip any schema not present in the matches
for schema_id, schema in schema_manager.iter_schemas():
if schema_id not in self.matches:
continue
schema.validate(self.get_content(), strict)
results = schema.get_results()
errs = itertools.chain(errs, results)
schema.clear_results()
return errs
|
#!/usr/bin/env python
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This nodes job is to collect position info and sends it over /client_robot_data via rosbridge.
To grab data of all other robots as a client, subscribe to /client_robot_data and remap
it to /remapped_client_robot_data. If you are a server, you have all the data in
/client_robot_data already
"""
import rospy
import roslibpy
import json
from std_msgs.msg import String
import tf
from tf.transformations import euler_from_quaternion
from gazebo_msgs.msg import ModelStates
class SendData:
def __init__(self):
self.data_to_rosbridge = {}
self.data_to_rosbridge['name'] = rospy.get_param('ROBOT_NAME')
self.data_to_rosbridge['robot_sdf_file'] = rospy.get_param('ROBOT_SDF_FILE')
self.data_to_rosbridge['navigation_pose'] = {}
self.data_to_rosbridge['gazebo_pose'] = {}
self.rosbridge_ip = rospy.get_param('ROSBRIDGE_IP')
self.rosbridge_state = rospy.get_param('ROSBRIDGE_STATE')
self.client = roslibpy.Ros(host=self.rosbridge_ip, port=9090)
self.client.run()
if self.rosbridge_state == 'CLIENT':
self.remap_pub = rospy.Publisher('remapped_client_robot_data', String, queue_size=1)
self.robot_data_listener = roslibpy.Topic(self.client, 'client_robot_data', 'std_msgs/String')
self.robot_data_listener.subscribe(self.remap_subscriber)
self.gazebo_model_state_sub = rospy.Subscriber('gazebo/model_states', ModelStates, self.model_states_callback, queue_size=1)
self.current_model_state = None
self.init_rosbridge_talkers()
def model_states_callback(self, msg):
self.current_model_state = msg
def init_rosbridge_talkers(self):
self.talker = roslibpy.Topic(self.client, 'client_robot_data', 'std_msgs/String')
def remap_subscriber(self, msg):
data = msg['data']
self.remap_pub.publish(data)
def main(self):
rate = rospy.Rate(10.0)
listener = tf.TransformListener()
while not rospy.is_shutdown():
try:
(trans,rot) = listener.lookupTransform('map', '/base_link', rospy.Time(0))
nav_pose = {}
nav_pose['x'] = trans[0]
nav_pose['y'] = trans[1]
nav_pose['z'] = trans[2]
nav_pose['qx'] = rot[0]
nav_pose['qy'] = rot[1]
nav_pose['qz'] = rot[2]
nav_pose['qw'] = rot[3]
self.data_to_rosbridge['navigation_pose'] = nav_pose
gazebo_pose = {}
gazebo_model_index = self.current_model_state.name.index("/") # Looking for main robot which in under namespace "/"
gz_pose = self.current_model_state.pose[gazebo_model_index]
gazebo_pose['x'] = gz_pose.position.x
gazebo_pose['y'] = gz_pose.position.y
gazebo_pose['z'] = gz_pose.position.z
gazebo_pose['qx'] = gz_pose.orientation.x
gazebo_pose['qy'] = gz_pose.orientation.y
gazebo_pose['qz'] = gz_pose.orientation.z
gazebo_pose['qw'] = gz_pose.orientation.w
self.data_to_rosbridge['gazebo_pose'] = gazebo_pose
self.talker.publish(roslibpy.Message( {'data': json.dumps(self.data_to_rosbridge)} ))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("[client_rosbridge] TF exception in gathering current position")
rate.sleep()
if __name__ == '__main__':
rospy.init_node('client_rosbridge')
send_data = SendData()
send_data.main()
|
#!/usr/bin/env python3
with open("hello.txt", "w") as f:
f.write("Hello world.\n")
|
import pygame
def collide(s1: pygame.Surface, x1: float, y1: float, s2: pygame.Surface, x2: float, y2: float) -> bool:
l1, t1 = int(x1), int(y1)
l2, t2 = int(x2), int(y2)
w, h = s1.get_size()
r1, b1 = l1 + w, t1 + h
w, h = s2.get_size()
r2, b2 = l2 + w, t2 + h
if (((l2 < l1 < r2) or (l2 < r1 < r2)) and ((t2 < t1 < b2) or (t2 < b1 < b2)) or \
((l1 < l2 < r1) or (l1 < r2 < r1)) and ((t1 < t2 < b1) or (t1 < b2 < b1))):
for x in range(max(l1, l2), min(r1, r2)):
for y in range(max(t1, t2), min(b1, b2)):
if (s1.get_at((x - l1, y - t1)).a > 1) and (s2.get_at((x - l2, y - t2)).a > 1):
return True
return False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-03 16:41
from __future__ import unicode_literals
from django.db import migrations, models
def set_position(apps, schema_editor):
Question = apps.get_model('pretixbase', 'Question')
for q in Question.objects.all():
for i, option in enumerate(q.options.all()):
option.position = i
option.save()
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0083_auto_20180228_2102'),
]
operations = [
migrations.AlterModelOptions(
name='questionoption',
options={'ordering': ('position', 'id'), 'verbose_name': 'Question option', 'verbose_name_plural': 'Question options'},
),
migrations.AddField(
model_name='questionoption',
name='position',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='question',
name='position',
field=models.PositiveIntegerField(default=0, verbose_name='Position'),
),
migrations.RunPython(
set_position,
reverse_code=migrations.RunPython.noop,
),
]
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The junos l3_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_bytes
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.junipernetworks.junos.plugins.module_utils.network.junos.argspec.l3_interfaces.l3_interfaces import (
L3_interfacesArgs,
)
from ansible.module_utils.six import iteritems
from ansible.module_utils.six import string_types
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError:
HAS_XMLTODICT = False
class L3_interfacesFacts(object):
""" The junos l3_interfaces fact class
"""
def __init__(self, module, subspec="config", options="options"):
self._module = module
self.argument_spec = L3_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def get_config(self, connection, config_filter):
"""
:param connection:
:param config_filter:
:return:
"""
return connection.get_configuration(filter=config_filter)
def _get_xml_dict(self, xml_root):
if not HAS_XMLTODICT:
self._module.fail_json(msg=missing_required_lib("xmltodict"))
xml_dict = xmltodict.parse(
etree.tostring(xml_root), dict_constructor=dict
)
return xml_dict
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for l3_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not HAS_LXML:
self._module.fail_json(msg="lxml is not installed.")
if not data:
config_filter = """
<configuration>
<interfaces/>
</configuration>
"""
data = self.get_config(connection, config_filter)
if isinstance(data, string_types):
data = etree.fromstring(
to_bytes(data, errors="surrogate_then_replace")
)
resources = data.xpath("configuration/interfaces/interface")
config = []
if resources:
config = self.parse_l3_if_resources(resources)
facts = {}
facts["l3_interfaces"] = config
ansible_facts["ansible_network_resources"].update(facts)
return ansible_facts
def parse_l3_if_resources(self, l3_if_resources):
l3_ifaces = []
for iface in l3_if_resources:
int_have = self._get_xml_dict(iface)
int_dict = int_have["interface"]
if "unit" in int_dict.keys() and int_dict.get("unit") is not None:
unit_list = int_dict["unit"]
if isinstance(unit_list, list):
for item in unit_list:
fact_dict = self._render_l3_intf(item, int_dict)
if fact_dict:
l3_ifaces.append(fact_dict)
else:
fact_dict = self._render_l3_intf(unit_list, int_dict)
if fact_dict:
l3_ifaces.append(fact_dict)
return l3_ifaces
def _render_l3_intf(self, unit, int_dict):
"""
:param item:
:param int_dict:
:return:
"""
interface = {}
ipv4 = []
ipv6 = []
if "family" in unit.keys():
if "inet" in unit["family"].keys():
interface["name"] = int_dict["name"]
interface["unit"] = unit["name"]
inet = unit["family"].get("inet")
if inet is not None and "address" in inet.keys():
if isinstance(inet["address"], dict):
for key, value in iteritems(inet["address"]):
addr = {}
addr["address"] = value
ipv4.append(addr)
else:
for ip in inet["address"]:
addr = {}
addr["address"] = ip["name"]
ipv4.append(addr)
if "inet" in unit["family"].keys():
interface["name"] = int_dict["name"]
interface["unit"] = unit["name"]
inet = unit["family"].get("inet")
if inet is not None and "dhcp" in inet.keys():
addr = {}
addr["address"] = "dhcp"
ipv4.append(addr)
if "inet6" in unit["family"].keys():
interface["name"] = int_dict["name"]
interface["unit"] = unit["name"]
inet6 = unit["family"].get("inet6")
if inet6 is not None and "address" in inet6.keys():
if isinstance(inet6["address"], dict):
for key, value in iteritems(inet6["address"]):
addr = {}
addr["address"] = value
ipv6.append(addr)
else:
for ip in inet6["address"]:
addr = {}
addr["address"] = ip["name"]
ipv4.append(addr)
interface["ipv4"] = ipv4
interface["ipv6"] = ipv6
return utils.remove_empties(interface)
|
# ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
import sys
import os
import platform
import pandas as pd
import numpy as np
import time
# IMPORT / GUI AND MODULES AND WIDGETS
# ///////////////////////////////////////////////////////////////
from modules import *
from widgets import *
os.environ["QT_FONT_DPI"] = "96" # FIX Problem for High DPI and Scale above 100%
# SET AS GLOBAL WIDGETS
# ///////////////////////////////////////////////////////////////
widgets = None
class AlertWindow(QMessageBox):
def __init__(self, text):
super().__init__()
QMessageBox.about(self, "test",text)
class MainWindow(QMainWindow):
def __init__(self):
self.w = None
QMainWindow.__init__(self)
# SET AS GLOBAL WIDGETS
# ///////////////////////////////////////////////////////////////
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
global widgets
widgets = self.ui
# USE CUSTOM TITLE BAR | USE AS "False" FOR MAC OR LINUX
# ///////////////////////////////////////////////////////////////
Settings.ENABLE_CUSTOM_TITLE_BAR = True
# APP NAME
# ///////////////////////////////////////////////////////////////
title = "SEMA-VOC Analysis GUI"
description = "SEMA-VOC Analysis GUI tool"
# APPLY TEXTS
self.setWindowTitle(title)
widgets.titleRightInfo.setText(description)
# TOGGLE MENU
# ///////////////////////////////////////////////////////////////
widgets.toggleButton.clicked.connect(lambda: UIFunctions.toggleMenu(self, True))
# SET UI DEFINITIONS
# ///////////////////////////////////////////////////////////////
UIFunctions.uiDefinitions(self)
# QTableWidget PARAMETERS
# ///////////////////////////////////////////////////////////////
widgets.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# BUTTONS CLICK
# ///////////////////////////////////////////////////////////////
# LEFT MENUS
widgets.btn_home.clicked.connect(self.buttonClick)
widgets.btn_widgets.clicked.connect(self.buttonClick)
# widgets.btn_new.clicked.connect(self.buttonClick)
widgets.btn_save.clicked.connect(self.buttonClick)
widgets.pushButton.clicked.connect(self.buttonClick)
# EXTRA LEFT BOX
def openCloseLeftBox():
UIFunctions.toggleLeftBox(self, True)
# widgets.toggleLeftBox.clicked.connect(openCloseLeftBox)
# widgets.extraCloseColumnBtn.clicked.connect(openCloseLeftBox)
# EXTRA RIGHT BOX
def openCloseRightBox():
UIFunctions.toggleRightBox(self, True)
# widgets.settingsTopBtn.clicked.connect(openCloseRightBox)
# SHOW APP
# ///////////////////////////////////////////////////////////////
self.show()
# SET CUSTOM THEME
# ///////////////////////////////////////////////////////////////
useCustomTheme = False
themeFile = "themes\py_dracula_light.qss"
# SET THEME AND HACKS
if useCustomTheme:
# LOAD AND APPLY STYLE
UIFunctions.theme(self, themeFile, True)
# SET HACKS
AppFunctions.setThemeHack(self)
# SET HOME PAGE AND SELECT MENU
# ///////////////////////////////////////////////////////////////
widgets.stackedWidget.setCurrentWidget(widgets.home)
widgets.btn_home.setStyleSheet(UIFunctions.selectMenu(widgets.btn_home.styleSheet()))
widgets.home.setStyleSheet("background-image: url(images/images/sema-back.png);\n"
"background-position: center;\n"
"background-repeat: no-repeat;\n"
"background-color: #1449a2;")
# BUTTONS CLICK
# Post here your functions for clicked buttons
# ///////////////////////////////////////////////////////////////
def buttonClick(self):
# GET BUTTON CLICKED
btn = self.sender()
btnName = btn.objectName()
# SHOW HOME PAGE
if btnName == "btn_home":
widgets.stackedWidget.setCurrentWidget(widgets.home)
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
# SHOW WIDGETS PAGE
if btnName == "btn_widgets":
widgets.stackedWidget.setCurrentWidget(widgets.widgets)
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
# # SHOW NEW PAGE
# if btnName == "btn_new":
# widgets.stackedWidget.setCurrentWidget(widgets.new_page) # SET PAGE
# UIFunctions.resetStyle(self, btnName) # RESET ANOTHERS BUTTONS SELECTED
# btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet())) # SELECT MENU
if btnName == "btn_save":
print("Save BTN clicked!")
inference(self.filename)
QMessageBox.about(self, "합성", """
합성 종료
""")
if btnName == "pushButton":
print("Open BTN clicked!")
self.filename = QFileDialog.getOpenFileName(self)[0]
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
print(self.filename)
widgets.lineEdit.setText(self.filename)
df = pd.read_excel(self.filename)
columns2show = ['아이디', '조사시작시간', 'VOC1', 'VOC2']
df2fill = np.empty(df[columns2show].shape)
df2fill.flags.writeable = True
df2show = df[['아이디', '조사시작시간', 'VOC1', 'VOC2']].values.tolist()
# df[['아이디','조사시작시간','VOC1','VOC2']]
for i in range(df2fill.shape[0]):
for j in range(df2fill.shape[1]):
# df2fill[i][j] = df2show[i][j]
widgets.tableWidget.setItem(i+1,j, QTableWidgetItem(str(df2show[i][j])))
print(str(df2show[i][j]))
# PRINT BTN NAME
print(f'Button "{btnName}" pressed!')
# RESIZE EVENTS
# ///////////////////////////////////////////////////////////////
def resizeEvent(self, event):
# Update Size Grips
UIFunctions.resize_grips(self)
# MOUSE CLICK EVENTS
# ///////////////////////////////////////////////////////////////
def mousePressEvent(self, event):
# SET DRAG POS WINDOW
self.dragPos = event.globalPos()
# PRINT MOUSE EVENTS
if event.buttons() == Qt.LeftButton:
print('Mouse click: LEFT CLICK')
if event.buttons() == Qt.RightButton:
print('Mouse click: RIGHT CLICK')
def inference(file_name):
import cli
cli.SEMA(file_path=file_name).process_analysis_gui()
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setWindowIcon(QIcon("icon.ico"))
window = MainWindow()
sys.exit(app.exec_())
|
"""
Input the account's username and password here.
"""
USERNAME = "USERNAME"
PASSWORD = "PASSWORD"
|
import asyncio
import commands
class ConsoleInterface:
def __init__(self, guild=None):
self.guild = guild # Unused
async def send_text_to_channel(self, msg, channel_name):
print(f"#{channel_name}: {msg}")
return True
async def send_embed_to_channel(self, embed_msg, channel_name):
print(f"#{channel_name}: {embed_msg}")
return True
async def create_channel(self, channel_name):
print(f"{channel_name} created!")
return True
async def delete_channel(self, channel_name):
print(f"{channel_name} deleted!")
return True
async def add_user_to_channel(self, player_id, channel_name, is_read=True, is_send=True):
print(f"Added {player_id} to channel #{channel_name} {is_read} {is_send}")
return True
class DiscordInterface:
def __init__(self, guild, client):
self.guild = guild
self.client = client
async def send_text_to_channel(self, msg, channel_name):
return await commands.admin.send_text_to_channel(self.guild, msg, channel_name)
async def send_embed_to_channel(self, embed_msg, channel_name):
return await commands.admin.send_embed_to_channel(self.guild, embed_msg, channel_name)
async def create_channel(self, channel_name):
return await commands.admin.create_channel(self.guild, self.client.user, channel_name)
async def delete_channel(self, channel_name):
return await commands.admin.delete_channel(self.guild, self.client.user, channel_name)
async def add_user_to_channel(self, player_id, channel_name, is_read=True, is_send=True):
return await commands.admin.add_user_to_channel(self.guild, self.client.get_user(player_id), channel_name, is_read, is_send)
|
from typing import List
import torch
from torch.sparse import FloatTensor
from . import BaseSparseNdArray
if False:
import numpy as np
__all__ = ['SparseNdArray']
class SparseNdArray(BaseSparseNdArray):
"""Pytorch powered sparse ndarray, i.e. FloatTensor
.. seealso::
https://pytorch.org/docs/stable/sparse.html
"""
def __init__(self, transpose_indices: bool = True, *args, **kwargs):
"""
:param transpose_indices: in torch, the input to LongTensor is NOT a list of index tuples.
If you want to write your indices this way, you should transpose before passing them to the sparse constructor
.. note::
To comply with Tensorflow, `transpose_indices` is set to True by default
"""
super().__init__(*args, **kwargs)
self.transpose_indices = transpose_indices
def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'FloatTensor':
return FloatTensor(torch.LongTensor(indices).T if self.transpose_indices else torch.LongTensor(indices),
torch.FloatTensor(values),
torch.Size(shape))
def sparse_parser(self, value: 'FloatTensor'):
indices = value._indices().numpy()
if self.transpose_indices:
indices = indices.T
return {'indices': indices,
'values': value._values().numpy(),
'shape': list(value.shape)}
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
from math import exp,log,sqrt
from random import random
import numpy as np
import pylab as P
X=lambda: log(random()*(exp(1)-1)+1 )
av=lambda o: float(sum(o))/len(o)
m=10000
print "\n\nEjercicio 1 pag 81"
print "Se puede obtener la función de acumulación para esta distribución."
print "Luego mediante el metodo de la transformada inversa obtener la formula"
print "que se debe emplear para generar una observación:"
print " log(random()*(exp(1)-1)+1 )"
print "\nEstan a punto de generarse",m,"observaciones de la distribución."
observaciones=[X() for i in range(m)]
promedio=av(observaciones)
print "El promedio de las observaciones es:",promedio
P.hist(observaciones,sqrt(m))
P.show()
|
# This file is part of sequencing.
#
# Copyright (c) 2021, The Sequencing Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
from tqdm import tqdm
import qutip
from .basic import CompiledPulseSequence
from .common import (
Operation,
SyncOperation,
DelayOperation,
DelayChannelsOperation,
ValidatedList,
delay_channels,
get_sequence,
)
class PulseSequence(ValidatedList):
"""A list-like container of Operation, SyncOperation,
DelayOperation, and DelayChannelOperation objects,
which can be compiled into a CompiledPulseSequence at a later time.
Args:
system (optional, System): The System associated with
the PulseSequence. Default: None.
t0 (optional, int): The start time of the PulseSequence.
Default: 0.
items (optional, iterable): Iterable containing initial
values with which to populate the PulseSequence. Default: None.
"""
VALID_TYPES = (
Operation,
SyncOperation,
DelayOperation,
DelayChannelsOperation,
)
def __init__(self, system=None, t0=0, items=None):
self.system = None
self.t0 = None
super().__init__(items)
self.set_system(system=system, t0=t0)
def set_system(self, system=None, t0=0):
"""Sets the System and start time for the PulseSequence.
Args:
system (optional, System): The System associated with
the PulseSequence. Default: None.
t0 (optional, int): The start time of the PulseSequence.
Default: 0.
"""
self.system = system
self.t0 = t0
self.clear()
def compile(self):
"""Compiles the PulseSequence into a CompiledPulseSequence.
Returns:
CompiledPulseSequence: A new CompiledPulseSequence.
"""
seq = CompiledPulseSequence(system=self.system, t0=self.t0)
for item in self:
item = self._validate(item)
if isinstance(item, Operation):
seq.add_operation(item)
elif isinstance(item, SyncOperation):
seq.sync()
elif isinstance(item, DelayOperation):
seq.delay(
item.length,
sync_before=item.sync_before,
sync_after=item.sync_after,
)
else:
# item is a DelayChannelsOperation
delay_channels(item.channels, item.length, seq=seq)
return seq
@property
def times(self):
"""Array of times."""
return self.compile().times
@property
def channels(self):
"""Dict of Hamiltonian channels."""
return self.compile().channels
def run(
self,
init_state,
c_ops=None,
e_ops=None,
options=None,
progress_bar=None,
):
"""Simulate the sequence using qutip.mesolve.
Args:
init_state (qutip.Qobj): Inital state of the system.
c_ops (optional, list): List of additional collapse operators.
Default: None.
e_ops (optional, list): List of expectation operators.
Default: None.
options (optional, qutip.Options): qutip solver options.
Note: defaults to max_step = 1.
progress_bar (optional, None): Whether to use qutip's progress bar.
Default: None (no progress bar).
Returns:
``qutip.solver.Result``: qutip.solver.Result instance.
"""
return self.compile().run(
init_state,
c_ops=c_ops,
e_ops=e_ops,
options=options,
progress_bar=progress_bar,
)
def propagator(
self,
c_ops=None,
options=None,
unitary_mode="batch",
parallel=False,
progress_bar=None,
):
"""Calculate the propagator using ``qutip.propagator()``.
Args:
c_ops (list[qutip.Qobj]): List of collapse operators.
options (optional, qutip.Options): qutip solver options.
Note: defaults to max_step = 1.
progress_bar (optional, None): Whether to use qutip's progress bar.
Default: None (no progress bar).
unitary_mode (optional, "batch" or "single"): Solve all basis vectors
simulaneously ("batch") or individually ("single").
Default: "batch".
parallel (optional, bool): Run the propagator in parallel mode.
This will override the unitary_mode settings if set to True.
Default: False.
Returns:
np.ndarray[qutip.Qobj]: Array of Qobjs representing the propagator U(t).
"""
return self.compile().propagator(
c_ops=c_ops,
options=options,
unitary_mode=unitary_mode,
parallel=parallel,
progress_bar=progress_bar,
)
def plot_coefficients(self, subplots=True, plot_imag=False, step=False):
"""Plot the Hamiltionian coefficients for all channels.
Args:
subplots (optional, bool): If True, plot each channel
on a different axis. Default: True.
plot_imag (optional, bool): If True, plot both real and imag.
Default: False.
step (optional, bool): It True, use axis.step()
instead of axis.plot(). Default: False.
Returns:
tuple: (fig, ax): matplotlib Figure and axes.
"""
return self.compile().plot_coefficients(
subplots=subplots, plot_imag=plot_imag, step=step
)
class Sequence(ValidatedList):
"""A list-like container of PulseSequence, Operation, and unitary objects,
which can be used to evolve an initial state.
Args:
system (System): System upon which the
sequence will act.
operations (optional, list[qutip.Qobj, PulseSequence, Operation]):
Initial list of Operations or unitaries. Default: None.
"""
VALID_TYPES = (
qutip.Qobj,
Operation,
PulseSequence,
)
def __init__(self, system, operations=None):
self.system = system
self.pulse_sequence = get_sequence(system)
self._t = 0
super().__init__(operations)
def _validate(self, item):
"""Enforces that item is an instance of
one of the types in VALID_TYPES.
If item is the global PulseSequence, this function will
make a deepcopy and then reset the PulseSequence.
Returns:
`qutip.Qobj``, Operation, or PulseSequence:
Returns the item if it is a valid type.
"""
item = super()._validate(item)
if item is self.pulse_sequence:
# If we are appending pulse_sequence,
# make a deepcopy and then reset it.
item = copy.deepcopy(item)
self.reset_pulse_sequence()
elif len(self.pulse_sequence):
# Otherwise, if pulse_sequence is not empty,
# capture its contents and reset it before returning
# the current item.
self.capture()
return item
def reset_pulse_sequence(self):
"""Reset the current pulse sequence."""
self.pulse_sequence = get_sequence(self.system)
def capture(self):
"""Appends the current pulse sequence
if it is not empty.
"""
if len(self.pulse_sequence):
self.append(self.pulse_sequence)
def run(
self,
init_state,
e_ops=None,
options=None,
full_evolution=True,
progress_bar=False,
):
"""Evolves init_state using each PulseSequence, Operation,
or unitary applied in series.
Args:
init_state (qutip.Qobj): Initial state to evolve.
options (optional, qutip.Options): qutip solver options.
Default: None.
full_evolution (optional, bool): Whether to store the states
for every time point in the included Sequences. If False,
only the final state will be stored. Default: True.
progress_bar (optional, bool): If True, displays a progress bar
when iterating through the Sequence. Default:True.
Returns:
SequenceResult: SequenceResult containing the
time evolution of the system.
"""
self.capture()
progbar = tqdm if progress_bar else lambda x, **kw: x
e_ops = e_ops or []
self._t = 0
times = [self._t]
states = [init_state]
for item in progbar(self):
item = self._validate(item)
if isinstance(item, PulseSequence):
if item.system != self.system:
raise ValueError("All operations must have the same system.")
# The CompiledPulseSequence created by PulseSequence.run()
# should start at the current Sequence time.
item.t0 = self._t
seq = item.compile()
seq.sync()
result = seq.run(states[-1], options=options)
if full_evolution:
new_states = result.states
new_times = result.times
else:
new_states = result.states[-1:]
new_times = result.times[-1:]
self._t = seq._t
elif isinstance(item, Operation):
seq = CompiledPulseSequence(self.system, t0=self._t)
seq.add_operation(item)
seq.sync()
result = seq.run(states[-1], options=options)
if full_evolution:
new_states = result.states
new_times = result.times
else:
new_states = result.states[-1:]
new_times = result.times[-1:]
self._t = seq._t
else:
# item is a Qobj
state = states[-1]
if state.isket:
state = item * state
else:
# state is a density matrix
state = item * state * item.dag()
new_states = [state]
new_times = [times[-1]]
# Unitaries take zero time, so self._t
# should be the latest sequence time.
states.extend(new_states)
times.extend(new_times)
times = np.array(times)
ix = np.argsort(times)
times = times[ix]
states = [states[i] for i in ix]
expect = []
for op in e_ops:
expect.append(qutip.expect(op, states))
num_collapse = len(self.system.c_ops(clean=True))
result = SequenceResult(
times=times, states=states, expect=expect, num_collapse=num_collapse
)
return result
def plot_coefficients(self, subplots=True, sharex=True, sharey=True):
"""Plot the Hamiltionian coefficients for all channels.
Unitaries are represented by vertical lines.
Args:
subplots (optional, bool): If True, plot each channel
on a different axis. Default: True.
sharex (optional, bool): Share x axes if subplots is True.
Default: True.
sharey (optional, bool): Share y axes if subplots is True.
Default: True.
Returns:
tuple: (fig, ax): matplotlib Figure and axes.
"""
import matplotlib.pyplot as plt
self._t = 0
channels = {}
for item in self:
item = self._validate(item)
if isinstance(item, PulseSequence):
if item.system != self.system:
raise ValueError("All operations must have the same system.")
# The CompiledPulseSequence created by PulseSequence.run()
# should start at the current Sequence time.
item.t0 = self._t
seq = item.compile()
seq.sync()
new_times = seq.times
self._t = new_times.max()
new_channels = seq.channels
elif isinstance(item, Operation):
seq = CompiledPulseSequence(self.system, t0=self._t)
seq.add_operation(item)
seq.sync()
new_times = seq.times
self._t = new_times.max()
new_channels = seq.channels
else:
new_channels = {"unitary": {"coeffs": np.array([1.0])}}
new_times = [self._t]
# Unitaries take zero time, so self._t
# remains unchanged.
# Assemble the results for this time step
for name, info in new_channels.items():
if name in [] or "coeffs" not in info:
continue
if name in channels:
channel_times = np.concatenate(
(channels[name][0], np.asarray(new_times))
)
channel_coeffs = np.concatenate((channels[name][1], info["coeffs"]))
channels[name] = (channel_times, channel_coeffs)
else:
channels[name] = (new_times, info["coeffs"])
channel_names = [n for n in channels if n not in ["delay", "unitary"]]
if not channel_names:
raise ValueError("There are no channels with coefficients to plot.")
if subplots:
fig, axes = plt.subplots(len(channel_names), sharex=sharex, sharey=sharey)
else:
fig, ax = plt.subplots(1, 1)
axes = [ax] * len(channel_names)
for name, ax in zip(channel_names, axes):
ctimes, coeffs = channels[name]
ax.plot(ctimes, coeffs, label=name)
ax.legend(loc=0)
ax.grid(True)
for ctimes, _ in zip(*channels["unitary"]):
if subplots:
for a in axes:
a.axvline(ctimes, ls="--", lw=1.5, color="k", alpha=0.25)
else:
ax.axvline(ctimes, ls="--", lw=1.5, color="k", alpha=0.25)
fig.suptitle("Hamiltonian coefficients")
fig.tight_layout()
fig.subplots_adjust(top=0.9)
axes[-1].set_xlabel("Time")
if subplots:
return fig, axes
return fig, ax
class SequenceResult(object):
"""An object that mimics qutip.solver.Result
for Sequences.
Attributes:
times (np.ndarray): Array of times.
states (list[qutip.Qobj]): List of states.
expect (list[np.ndarray]): List of arrays of expectation
values.
num_expect (int): Number of expectation operators,
``num_expect == len(expect)``.
num_collapse (int): Number of collapse operators involved
in the Sequence.
solver (str): Name of the 'solver' used to generate the
SequenceResult. This is always 'sequencing.Sequence'.
"""
def __init__(self, times=None, states=None, expect=None, num_collapse=0):
if times is None:
times = np.array([])
self.times = times
if expect is None:
expect = []
self.expect = expect
if states is None:
states = []
self.states = states
self.num_collapse = num_collapse
self.solver = "sequencing.Sequence"
@property
def num_expect(self):
return len(self.expect)
def __str__(self):
s = ["SequenceResult"]
s.append("-" * len(s[-1]))
if self.times is not None and len(self.times) > 0:
s.append(f"Number of times: {len(self.times)}")
if self.states is not None and len(self.states) > 0:
s.append("states = True")
if self.expect is not None and len(self.expect) > 0:
s.append(f"expect = True, num_expect = {self.num_expect}")
s.append(f"num_collapse = {self.num_collapse}")
return "\n".join(s)
def __repr__(self):
return self.__str__()
_global_pulse_sequence = PulseSequence()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Implementation of SS-QCCN algorithm
-------------
Based on paper:
Steganalysis of QIM Steganography in Low-Bit-Rate Speech Signals
-------------
Author: Zinan Lin
Email: zinanl@andrew.cmu.edu
'''
import os, random, pickle, csv, sys
import numpy as np
from sklearn.decomposition import PCA
from sklearn import svm
from tqdm import tqdm
FOLD = 3 # = NUM_SAMPLE / number of testing samples
NUM_PCA_FEATURE = 300 # number of PCA features
NUM_SAMPLE = 3000 # total number of samples used for training
'''
SS-QCCN feature extraction
-------------
input
file
The path to an ASCII file.
Each line contains three integers: x1 x2 x3, which are the three codewords of the frame.
There are (number of frame) lines in total.
output
A numpy vector, which contains the features determined by SS-QCCN algorithm.
'''
def G729_SS_QCCCN(file):
data = []
with open(file, "r") as f:
for line in f:
line = [int(i) for i in line.split()]
data.append(line)
a = np.zeros(shape = (128, 128))
c1 = np.zeros(shape = 128)
p = np.zeros(shape = (32, 32))
c2 = np.zeros(shape = 32)
for i in range(len(data) - 1):
data1 = data[i]
data2 = data[i + 1]
c1[data1[0]] += 1
c2[data1[1]] += 1
a[data1[0], data2[0]] += 1
p[data1[1], data1[2]] += 1
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if c1[i] != 0:
a[i, j] /= c1[i]
for i in range(p.shape[0]):
for j in range(p.shape[1]):
if c2[i] != 0:
p[i, j] /= c2[i]
return np.concatenate([a.reshape(128 * 128), p.reshape(32 * 32)])
'''
SS-QCCN training and testing
-------------
input
positive_data_folder
The folder that contains positive data files.
negative_data_folder
The folder that contains negative data files.
result_folder
The folder that stores the results.
'''
def main(positive_data_folder, negative_data_folder, result_folder):
build_model = G729_SS_QCCCN
positive_data_files = [os.path.join(positive_data_folder, path) for path in os.listdir(positive_data_folder)]
negative_data_files = [os.path.join(negative_data_folder, path) for path in os.listdir(negative_data_folder)]
random.shuffle(positive_data_files)
random.shuffle(negative_data_files)
positive_data_files = positive_data_files[0 : NUM_SAMPLE] # The positive samples for training and testing
negative_data_files = negative_data_files[0 : NUM_SAMPLE] # The negative samples for training and testing
num_files = len(positive_data_files)
with open(os.path.join(result_folder, "file_list.pkl"), "wb") as f:
pickle.dump(positive_data_files, f)
pickle.dump(negative_data_files, f)
test_positive_data_files = positive_data_files[0 : num_files / FOLD] # The positive samples for testing
test_negative_data_files = negative_data_files[0 : num_files / FOLD] # The negative samples for testing
train_positive_data_files = positive_data_files[num_files / FOLD :] # The positive samples for training
train_negative_data_files = negative_data_files[num_files / FOLD :] # The negative samples for training
num_train_files = len(train_negative_data_files)
num_test_files = len(test_negative_data_files)
# calculate PCA matrix
print("Calculating PCA matrix")
feature = []
for i in tqdm(range(num_train_files)):
new_feature = build_model(train_negative_data_files[i])
feature.append(new_feature)
for i in tqdm(range(num_train_files)):
new_feature = build_model(train_positive_data_files[i])
feature.append(new_feature)
feature = np.row_stack(feature)
pca = PCA(n_components = NUM_PCA_FEATURE)
pca.fit(feature)
with open(os.path.join(result_folder, "pca.pkl"), "wb") as f:
pickle.dump(pca, f)
# load train data
print("Loading train data")
X = []
Y = []
for i in tqdm(range(num_train_files)):
new_feature = build_model(train_negative_data_files[i])
X.append(pca.transform(new_feature.reshape(1, -1)))
Y.append(0)
for i in tqdm(range(num_train_files)):
new_feature = build_model(train_positive_data_files[i])
X.append(pca.transform(new_feature.reshape(1, -1)))
Y.append(1)
X = np.row_stack(X)
# train SVM
print("Training SVM")
clf = svm.SVC()
clf.fit(X, Y)
with open(os.path.join(result_folder, "svm.pkl"), "wb") as f:
pickle.dump(clf, f)
# test
print("Testing")
X = []
Y = []
for i in tqdm(range(num_test_files)):
new_feature = build_model(test_negative_data_files[i])
X.append(pca.transform(new_feature.reshape(1, -1)))
Y.append(0)
for i in tqdm(range(num_test_files)):
new_feature = build_model(test_positive_data_files[i])
X.append(pca.transform(new_feature.reshape(1, -1)))
Y.append(1)
X = np.row_stack(X)
Y_predict = clf.predict(X)
with open(os.path.join(result_folder, "Y_predict.pkl"), "wb") as f:
pickle.dump(Y_predict, f)
# output result
correct_negative = 0
correct_positive = 0
print("Outputing result")
with open(os.path.join(result_folder, "result.csv"), "wb") as f:
writer = csv.writer(f)
writer.writerow(["file", "real class", "predict class"])
for i in range(num_test_files):
writer.writerow([test_negative_data_files[i], 0, Y_predict[i]])
if Y_predict[i] == 0:
correct_negative += 1
for i in range(num_test_files):
writer.writerow([test_positive_data_files[i], 1, Y_predict[i + num_test_files]])
if Y_predict[i + num_test_files] == 1:
correct_positive += 1
writer.writerow(["False Positive", 1 - float(correct_negative) / num_test_files])
writer.writerow(["False Negative", 1 - float(correct_positive) / num_test_files])
writer.writerow(["Precision", float(correct_negative + correct_positive) / (num_test_files * 2)])
if __name__ == "__main__":
main('/data1/linzn/data/ch_g729a_100_10000ms_FEAT', '/data1/linzn/data/ch_g729a_0_10000ms_FEAT', '.')
|
#!/usr/bin/env python3
print("Print alphabets ")
lastNumber = 6
asciiNumber = 65
for i in range(0, lastNumber):
for j in range(0, i+1):
character = chr(asciiNumber)
print(character, end=' ')
asciiNumber+=1
print(" ")
|
import torch
import argparse
from utils import *
from models import *
from mode_validation import *
from train_and_eval import *
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
import numpy as np
import time
import pandas as pd
RESULTS_PATH = "Results/"
"""
The parameters to use when the file is called
Create each parameter with type and default (with a help comment)
Args: void
Returns: the parser
"""
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--arch', default='resnet18',
choices=['resnet18', 'resnet50', 'vgg19_bn','inception_v3',
'resnetusm'],
help='The network architecture')
parser.add_argument('--val_mode', default='k-fold',
choices=['once', 'k-fold', 'randomsampler'],
help='Type of validation you want to use: f.e: k-fold')
parser.add_argument('--batch_size', type=int, default=4,
help='Input batch size for using the model')
parser.add_argument('--workers', type=int, default=4,
help='Number of data loading workers')
parser.add_argument('--k_fold_num', type=int, default=5,
help='Number of folds you want to use for k-fold validation')
parser.add_argument('--val_num', type=int, default=20,
help='Number of times you want to run the model to get a mean')
parser.add_argument('--random_seed', type=int, default=16031997,
help='Random seed to shuffle the dataset')
parser.add_argument('--data_dir', default='/media/Data/saul/PSP_dataset12/',
help='Directory were you take images, they have to be separeted by classes')
parser.add_argument('--cuda', type=str2bool, default= torch.cuda.is_available(),
help='Use gpu by cuda')
parser.add_argument('--shuffle_dataset', type=str2bool, default=True,
help='Number of folds you want to use for k-fold validation')
parser.add_argument('--pretrained', type=str2bool, default=True,
help='The model will be pretrained with ')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weights', default="",
help='The .pth doc to load as weights')
parser.add_argument('--train', type=str2bool, default=True,
help='The .pth doc to load as weights')
parser.add_argument('--num_epochs', type=int, default=7, help='number of epochs to train for')
parser.add_argument('--save', type=str2bool, default=False,
help='If you want to save weights and csvs from the trained model or evaluation')
parser.add_argument('--image', default="",
help='The source file path of image you want to process')
parser.add_argument('--folder', default="",
help='The folder where you want to save ')
return parser
"""
Stats for all experiments reported
"""
def writeReport(sensitivities, specificities, accuracies, folds, experiments, args):
# dictionary of lists
mean_acc = np.mean(accuracies)
std_acc = np.std(accuracies)
mean_specificity = np.mean(specificities)
std_specificity = np.std(specificities)
mean_sensitivity = np.mean(sensitivities)
std_sensitivity = np.std(sensitivities)
dictionary = {"Experiment":experiments, "Fold":folds, 'Sensitivity': sensitivities, 'Specificity': specificities, 'Accuracy': accuracies}
timestr = time.strftime("%Y%m%d-%H%M%S")
fileNameReportCSV = RESULTS_PATH + str(args.arch) + "_" + timestr + ".csv"
fileNameReportTXT = RESULTS_PATH + str(args.arch) + "_" + timestr + ".txt"
#write the report
fileReportTXT = open(fileNameReportTXT, "w")
fileReportTXT.write(str(args))
fileReportTXT.close()
dataFrame = pd.DataFrame(dictionary)
dataFrame.to_csv(fileNameReportCSV)
def main():
newTotalAccuracy = []
meanAccuracyExperimentsList = []
meanSensitivityExperimentsList = []
meanSpecificityExperimentsList = []
parser = make_parser() #creates the parser
args = parser.parse_args()
#writeReport([20, 3, 4], [200, 30, 40], [2000, 300, 400], args)
#takes the arguments from console (with the help of the parser)
if(args.weights != "" and args.image != ""): #we have weights and an image -> that means evaluate an image
print("Soon")
else:
size = size_by_archs(args.arch) #get the correspondant size for the architecture
batch_size = args.batch_size #setting variables
num_workers = args.workers
random_seed = args.random_seed
data_transforms = transforms.Compose([ #transformations applied to all images
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize([0.0466, 0.1761, 0.3975], [0.8603, 0.8790, 0.8751])
])
data_dir = args.data_dir #where the dataset is located (needs to be separated by class)
dataset = datasets.ImageFolder(data_dir, data_transforms) #A generic data loader where the images are arranged by classes
numberTotalExperiments = args.val_num
print("Total number of experiments: ", numberTotalExperiments)
total = 0
final_acc =0
std = 0 #variables to report
sensitivity = 0
specificity = 0
folds = []
experiments = []
#accumulate accuracies
total = []
for experimentNum in range(0, numberTotalExperiments):
print("Experiment number ", experimentNum, " ----------------------------")
dataset_size = len(dataset)
indices = list(range(dataset_size)) #make an index to later associate them to each item
final_indices = get_indices(indices, args)
fold = 0
specificity = []
sensitivity = [] #depending in the validation mode, there will be different distribuitions and how many sets will be used
for train_indices, val_indices in final_indices: #final indices is a generator item, that has train indices and validation indices (if is default k fold will be 4 sets of each and default random subsampling will be 10 sets )
experiments += [experimentNum]
folds += [fold]
print("Fold number: ", fold, ".........................")
fold += 1
#print(train_indices)
if(args.val_mode=="k-fold"):
np.random.seed(args.random_seed) #the indices are shuffled again so the training is random (with the seed) because it's in order
np.random.shuffle(train_indices)
np.random.shuffle(val_indices)
train_sampler = SubsetSampler(train_indices) #creates samplers for the dataloaders
valid_sampler = SubsetSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset, #takes the sampler and the dataset to make it for pytorch
batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers)
dataloaders = {'train':train_loader, "val":validation_loader} #we used the dataloaders in dictionaries for better management
dataset_sizes = {'train':len(train_sampler), "val":len(valid_sampler)}#we used the datasets sizes in dictionaries for better management
print(dataset_sizes)
class_names = ['no', 'yes'] #important to define the classes for prediction
model_ft = get_cnn(len(class_names), args) #retrieves the cnn - architecture to be used
criterion = nn.CrossEntropyLoss() #creates the criterion (used in training and testing)
optimizer_ft = get_optimizer(model_ft, args) #changes the weights based on error (using Stochastic Gradient Descent)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)#helps with the learning rate, to be zigzagging to get into the right function
print(args.train)
if(args.train==True):
print("About to train for one epoch ")
#bug correction 1
#model, best_acc, epochs_details_list
model_ft, best_acc, epochs_details_list, results = train_model(dataloaders, dataset_sizes, model_ft, criterion, optimizer_ft, exp_lr_scheduler, args) #trains the model
print("Yielded results ")
#save epochs results to harddrive
#save_epochs_results(epochs_details_list, dataset_size, args)
print("best_acc: " + str(best_acc))
print("el acc directo")
acc, sen, spe, confusion_matrix = print_stats(results, class_names) #prints acc, sensitivity, specificity, confusion_matrix
results_detailed_list, acc = eval_model_dataloader(model_ft, dataloaders['val'], dataset_sizes, criterion, class_names, args) #evaluates the model to get acc and lists
total.append(best_acc) #adding to create mean value from: accuracy
sensitivity.append(sen) #sensitivity
specificity.append(spe) #specificity
if(args.save == True): #we save in files the weights(.pth) and epochs results(.csv)
save_model(model_ft, dataset_size, args)
save_epochs_results(epochs_details_list, dataset_size, args)
else:
if(args.weights != "" and args.image == ""): #evaluate the given wieghts with the validation set
model_ft = load_model(model_ft, args.weights) #load the model with weights
results_detailed_list, acc = eval_model_dataloader(model_ft, dataloaders['val'], dataset_sizes, criterion, class_names, args)#evaluate the model
total.append(best_acc) #adding to create mean value from: accuracy
acc, sen, spe, confusion_matrix = print_stats(results_detailed_list, class_names)#prints acc, sensitivity, specificity, confusion_matrix
final_acc = np.mean(total)
std = np.std(total)
if(args.save == True):
save_best_epoch_results(results_detailed_list, dataset_size, args)#save a csv with the best epoch results
else:
print("There are no source file path for the weights")
exit(0)
#estimates the mean depending in the validation model
if(args.val_mode == "once"):
print("once")
elif(args.val_mode == "k-fold"):
print("kfold: " + str(args.k_fold_num))
#totalNp = np.array(total.cpu())
print("Total ................")
print(total)
#results are in the cpu
print("newTotalAccuracy ................")
print(newTotalAccuracy)
mean_acc = np.mean(newTotalAccuracy)
std_acc = np.std(newTotalAccuracy)
specificity_mean = np.mean(specificity)
sensitivity_mean = np.mean(sensitivity)
meanSensitivityExperimentsList += specificity
meanSpecificityExperimentsList += sensitivity
else:
print("Final acc: " + str(mean_acc))
print("Standard deviation: " + str(std))
print("Final specificity: " + str(np.mean(specificity)))
print("Final sensitivity: " + str(np.mean(sensitivity)))
#empty memory
del model_ft
torch.cuda.empty_cache()
# write results
for i in total:
element = i.cpu().numpy()
# print(type(element[0]))
#bug, conversion!!
newTotalAccuracy += [float(element)]
print("Accuracies to send........................")
meanAccuracyExperimentsList += newTotalAccuracy
print(meanAccuracyExperimentsList)
writeReport(meanSensitivityExperimentsList, meanSpecificityExperimentsList, meanAccuracyExperimentsList, folds, experiments, args)
main()
|
from graphviz import Digraph
import numpy as np
import xlrd
import openpyxl
import pandas as pd
def build_dot():
dot = Digraph()
dot.attr(label='服务调用热点监控 Service HotSpot Monitor', labelloc="t")
sheet = pd.read_excel("data/services.xlsx", sheet_name='services')
print(sheet)
for index, row in sheet.iterrows():
print(row)
from_node = str(row['from'])
dot.node(from_node, from_node, shape='circle', color='blue')
to_node = str(row['to'])
dot.node(to_node, to_node, shape='circle', color='blue')
count = str(row['count'])
color = 'blue'
if int(count) > 1000:
color = 'red'
dot.edge(from_node, to_node, label=count, constraint='true', color=color)
dot.render('output/services.gv', view=True)
if __name__ == '__main__':
build_dot()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from conf import load_config
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine(load_config().DB_ENGINE)
DBSession = sessionmaker(bind=engine)
def transactional(fn):
def transact(self,*args):
session = DBSession()
try:
fn(self,session,*args)
session.commit()
except:
session.rollback()
raise
transact.__name__ = fn.__name__
return transact
|
"""
@brief Base robot class
@author Josip Delic (delijati.net)
@author Rowland O'Flaherty (rowlandoflaherty.com)
@date 04/23/2014
@version: 1.0
@copyright: Copyright (C) 2014, see the LICENSE file
"""
import sys
import time
import re
import socket
import threading
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
LEFT = 0
RIGHT = 1
MIN = 0
MAX = 1
DEBUG = False
VERBOSE = True
class BaseBot(object):
"""
Base robot class. Mainly handles initialization and messages passing.
"""
# Parameters
sample_time = 20.0 / 1000.0
pwm_freq = 2000
# Variables
led_flag = True
cmdBuffer = ''
# Motor Pins -- (LEFT, RIGHT)
dir1Pin = ("", "")
dir2Pin = ("", "")
pwmPin = ("", "")
# Led pin
led = ""
# State PWM -- (LEFT, RIGHT)
pwm = [0, 0]
# Constraints
pwmLimits = [-100, 100] # [min, max]
# UDP
port = 5005
robotSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
robotSocket.setblocking(False)
def __init__(self, base_ip, robot_ip):
# Run flag
self.run_flag = True
# Initialize GPIO pins
self._setup_gpio()
# Initialize PWM pins: PWM.start(channel, duty, freq=2000, polarity=0)
self._init_pwm()
# Set motor speed to 0
self.set_pwm([0, 0])
# Set IP addresses
self.base_ip = base_ip
self.robot_ip = robot_ip
self.robotSocket.bind((self.robot_ip, self.port))
# Initialize command parsing thread
self.cmd_parsing_thread = threading.Thread(target=parse_cmd,
args=(self,))
self.cmd_parsing_thread.daemon = True
def _setup_gpio(self):
"""Initialize GPIO pins"""
GPIO.setup(self.dir1Pin[LEFT], GPIO.OUT)
GPIO.setup(self.dir2Pin[LEFT], GPIO.OUT)
GPIO.setup(self.dir1Pin[RIGHT], GPIO.OUT)
GPIO.setup(self.dir2Pin[RIGHT], GPIO.OUT)
GPIO.setup(self.led, GPIO.OUT)
def _init_pwm(self):
"""
Initialize PWM pins
"""
# It is currently not possible to set frequency for two PWM
# a maybe solution patch pwm_test.c
# https://github.com/SaadAhmad/beaglebone-black-cpp-PWM
PWM.start(self.pwmPin[LEFT], 0)
PWM.start(self.pwmPin[RIGHT], 0)
def set_pwm(self, pwm):
""" Set motor PWM values """
# [leftSpeed, rightSpeed]: 0 is off, caps at min and max values
self.set_pwm_left(pwm[LEFT])
self.set_pwm_right(pwm[RIGHT])
def set_pwm_left(self, pwm_left):
""" Set left motor PWM value """
self.pwm[LEFT] = min(
max(pwm_left, self.pwmLimits[MIN]), self.pwmLimits[MAX])
if self.pwm[LEFT] > 0:
GPIO.output(self.dir1Pin[LEFT], GPIO.LOW)
GPIO.output(self.dir2Pin[LEFT], GPIO.HIGH)
PWM.set_duty_cycle(self.pwmPin[LEFT], abs(self.pwm[LEFT]))
elif self.pwm[LEFT] < 0:
GPIO.output(self.dir1Pin[LEFT], GPIO.HIGH)
GPIO.output(self.dir2Pin[LEFT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[LEFT], abs(self.pwm[LEFT]))
else:
GPIO.output(self.dir1Pin[LEFT], GPIO.LOW)
GPIO.output(self.dir2Pin[LEFT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[LEFT], 0)
def set_pwm_right(self, pwm_right):
""" Set right motor PWM value """
self.pwm[RIGHT] = min(
max(pwm_right, self.pwmLimits[MIN]), self.pwmLimits[MAX])
if self.pwm[RIGHT] > 0:
GPIO.output(self.dir1Pin[RIGHT], GPIO.LOW)
GPIO.output(self.dir2Pin[RIGHT], GPIO.HIGH)
PWM.set_duty_cycle(self.pwmPin[RIGHT], abs(self.pwm[RIGHT]))
elif self.pwm[RIGHT] < 0:
GPIO.output(self.dir1Pin[RIGHT], GPIO.HIGH)
GPIO.output(self.dir2Pin[RIGHT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[RIGHT], abs(self.pwm[RIGHT]))
else:
GPIO.output(self.dir1Pin[RIGHT], GPIO.LOW)
GPIO.output(self.dir2Pin[RIGHT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[RIGHT], 0)
def get_pwm(self):
""" Get motor PWM values """
return self.pwm
def start_threads(self):
""" Start all threads """
self.cmd_parsing_thread.start()
def update(self):
""" Update which occures once per cycle of the run loop """
# Flash BBB LED
if self.led_flag is True:
self.led_flag = False
GPIO.output(self.led, GPIO.HIGH)
else:
self.led_flag = True
GPIO.output(self.led, GPIO.LOW)
def run(self):
""" The run loop """
# Start threads
self.start_threads()
# Run loop
while self.run_flag:
self.update()
time.sleep(self.sample_time)
self.cleanup()
return
def end_run(self):
""" End the run loop. Gives time for threads to receive run_flag. """
self.run_flag = False
time.sleep(2*self.sample_time)
def cleanup(self):
""" Clean up before shutting down. """
sys.stdout.write("Shutting down...")
self.set_pwm([0, 0])
self.robotSocket.close()
GPIO.cleanup()
PWM.cleanup()
if DEBUG:
pass
# tictocPrint()
# self.writeBuffersToFile()
sys.stdout.write("Done\n")
# def writeBuffersToFile(self):
# matrix = map(list, zip(*[self.encTimeRec[LEFT], self.encValRec[LEFT],
# self.encPWMRec[LEFT], self.encNNewRec[LEFT],
# self.encTickStateRec[LEFT],
# self.enc_posRec[LEFT],
# self.encVelRec[LEFT],
# self.encThresholdRec[LEFT],
# self.encTimeRec[RIGHT],
# self.encValRec[RIGHT],
# self.encPWMRec[RIGHT], self.encNNewRec[RIGHT],
# self.encTickStateRec[RIGHT],
# self.enc_posRec[RIGHT], self.encVelRec[RIGHT],
# self.encThresholdRec[RIGHT]]))
# s = [[str(e) for e in row] for row in matrix]
# lens = [len(max(col, key=len)) for col in zip(*s)]
# fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
# table = [fmt.format(*row) for row in s]
# f = open('output.txt', 'w')
# f.write('\n'.join(table))
# f.close()
# print "Wrote buffer to output.txt"
def parse_cmd(self):
""" Command parser """
try:
while self.run_flag:
try:
line = self.robotSocket.recv(1024)
except socket.error as msg:
continue
self.cmdBuffer += line
# String contained within $ and * (with no $ or * symbols in it)
buf_pattern = r'\$[^\$\*]*?\*'
buf_regex = re.compile(buf_pattern)
buf_result = buf_regex.search(self.cmdBuffer)
if buf_result:
msg = buf_result.group()
print msg
self.cmdBuffer = ''
cmd_pattern = r'(?P<CMD>[A-Z]{3,})'
set_pattern = r'(?P<SET>=?)'
query_pattern = r'(?P<QUERY>\??)'
arg_pattern = r'(?(2)(?P<ARGS>.*))'
msg_pattern = r'\$' + \
cmd_pattern + \
set_pattern + \
query_pattern + \
arg_pattern + \
r'.*\*'
msg_regex = re.compile(msg_pattern)
msg_result = msg_regex.search(msg)
if msg_result.group('CMD') == 'CHECK':
self.robotSocket.sendto(
'Hello from QuickBot\n', (self.base_ip, self.port))
elif msg_result.group('CMD') == 'PWM':
if msg_result.group('QUERY'):
if VERBOSE:
print str(self.get_pwm())
self.robotSocket.sendto(str(self.get_pwm()) + '\n',
(self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
pwm_pattern = r'(?P<LEFT>[-]?\d+),(?P<RIGHT>[-]?\d+)'
pwm_regex = re.compile(pwm_pattern)
pwm_result = pwm_regex.match(args)
if pwm_result:
pwm = [int(pwm_result.group('LEFT')), \
int(pwm_result.group('RIGHT'))]
self.set_pwm(pwm)
self.robotSocket.sendto(str(self.get_pwm()) + '\n',
(self.base_ip, self.port))
elif msg_result.group('CMD') == 'IRVAL':
if msg_result.group('QUERY'):
reply = '[' + ', '.join(map(str, self.get_ir())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('CMD') == 'ULTRAVAL':
if msg_result.group('QUERY'):
reply = '[' + ', '.join(map(str, self.ultraVal)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('CMD') == 'WHEELANG':
if msg_result.group('QUERY'):
print 'Sending: ' + str(self.get_wheel_ang())
self.robotSocket.sendto(
str(self.get_wheel_ang()) +
'\n', (self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
arg_pattern = \
r'(?P<LEFT>[-]?\d+[\.]?\d*),(?P<RIGHT>[-]?\d+[\.]?\d*)'
regex = re.compile(arg_pattern)
result = regex.match(args)
if result:
pos = [float(regex.match(args).group('LEFT')), \
float(regex.match(args).group('RIGHT'))]
self.set_wheel_ang(pos)
elif msg_result.group('CMD') == 'ENVAL':
if msg_result.group('QUERY'):
reply = \
'[' + ', '.join(map(str, self.get_enc_val())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
arg_pattern = \
r'(?P<LEFT>[-]?\d+[\.]?\d*),(?P<RIGHT>[-]?\d+[\.]?\d*)'
regex = re.compile(arg_pattern)
result = regex.match(args)
if result:
enc_pos = [float(regex.match(args).group('LEFT')), \
float(regex.match(args).group('RIGHT'))]
self.set_enc_val(enc_pos)
elif msg_result.group('CMD') == 'ENRAW':
if msg_result.group('QUERY'):
reply = \
'[' + ', '.join(map(str, self.get_enc_raw())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('CMD') == 'ENOFFSET':
if msg_result.group('QUERY'):
reply = '[' + \
', '.join(map(str, self.get_enc_offset())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
arg_pattern = \
r'(?P<LEFT>[-]?\d+[\.]?\d*),(?P<RIGHT>[-]?\d+[\.]?\d*)'
regex = re.compile(arg_pattern)
result = regex.match(args)
if result:
offset = [float(regex.match(args).group('LEFT')), \
float(regex.match(args).group('RIGHT'))]
self.set_enc_offset(offset)
elif msg_result.group('CMD') == 'ENVEL':
if msg_result.group('QUERY'):
reply = \
'[' + ', '.join(map(str, self.get_enc_vel())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
arg_pattern = \
r'(?P<LEFT>[-]?\d+[\.]?\d*),(?P<RIGHT>[-]?\d+[\.]?\d*)'
regex = re.compile(arg_pattern)
result = regex.match(args)
if result:
enc_vel = [float(regex.match(args).group('LEFT')), \
float(regex.match(args).group('RIGHT'))]
self.set_enc_vel(enc_vel)
elif msg_result.group('CMD') == 'WHEELANGVEL':
if msg_result.group('QUERY'):
reply = \
'[' + ', '.join(map(str, self.get_wheel_ang_vel())) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
arg_pattern = \
r'(?P<LEFT>[-]?\d+[\.]?\d*),(?P<RIGHT>[-]?\d+[\.]?\d*)'
regex = re.compile(arg_pattern)
result = regex.match(args)
if result:
wheel_ang_vel = [float(regex.match(args).group('LEFT')), \
float(regex.match(args).group('RIGHT'))]
self.set_wheel_ang_vel(wheel_ang_vel)
elif msg_result.group('CMD') == 'ENRESET':
self.reset_enc_val()
reply = \
'[' + ', '.join(map(str, self.get_enc_val())) + ']'
print 'Encoder values reset to ' + reply
elif msg_result.group('CMD') == 'UPDATE':
if msg_result.group('SET') and msg_result.group('ARGS'):
args = msg_result.group('ARGS')
pwm_pattern = r'(?P<LEFT>[-]?\d+),(?P<RIGHT>[-]?\d+)'
pwm_regex = re.compile(pwm_pattern)
pwm_result = pwm_regex.match(args)
if pwm_result:
pwm = [int(pwm_regex.match(args).group('LEFT')), \
int(pwm_regex.match(args).group('RIGHT'))]
self.set_pwm(pwm)
reply = '[' + ', '.join(map(str, self.enc_pos)) + ', ' \
+ ', '.join(map(str, self.encVel)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(
reply + '\n', (self.base_ip, self.port))
elif msg_result.group('CMD') == 'END':
self.end_run()
else:
print 'Invalid: ' + msg
except:
self.end_run()
raise
|
import sys
import atexit
import traceback
import threading
def wait_exit():
#Wait for shutdown signal if running in service mode
print("Press Ctrl-C to quit...")
if sys.platform == "win32":
_win32_wait_exit()
else:
import signal
signal.sigwait([signal.SIGTERM,signal.SIGINT])
def wait_exit_callback(callback):
def t_func():
try:
#Wait for shutdown signal if running in service mode
print("Press Ctrl-C to quit...")
if sys.platform == "win32":
hwnd = _win32_create_message_hwnd()
def _stop_loop():
_win32_post_hwnd_close(hwnd)
try:
atexit.register(_stop_loop)
_win32_wait_message_hwnd(hwnd)
finally:
try:
atexit.unregister(_stop_loop)
except Exception: pass
else:
import signal
signal.sigwait([signal.SIGTERM,signal.SIGINT])
except Exception:
traceback.print_exc()
callback()
t = threading.Thread(target=t_func)
t.setDaemon(True)
t.start()
def wait_exit_stop_loop(loop):
wait_exit_callback(lambda: loop.call_soon_threadsafe(loop.stop))
if sys.platform == "win32":
# https://gist.github.com/mouseroot/6128651
import ctypes
import ctypes.wintypes
WNDPROCTYPE = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.wintypes.HWND, ctypes.c_uint, ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM)
CtrlCHandlerRoutine = ctypes.WINFUNCTYPE(ctypes.wintypes.BOOL, ctypes.wintypes.DWORD)
WM_DESTROY = 2
WM_CLOSE = 16
HWND_MESSAGE = ctypes.wintypes.HWND(-3)
class WNDCLASSEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_uint),
("style", ctypes.c_uint),
("lpfnWndProc", WNDPROCTYPE),
("cbClsExtra", ctypes.c_int),
("cbWndExtra", ctypes.c_int),
("hInstance", ctypes.wintypes.HANDLE),
("hIcon", ctypes.wintypes.HANDLE),
("hCursor", ctypes.wintypes.HANDLE),
("hBrush", ctypes.wintypes.HANDLE),
("lpszMenuName", ctypes.wintypes.LPCWSTR),
("lpszClassName", ctypes.wintypes.LPCWSTR),
("hIconSm", ctypes.wintypes.HANDLE)]
def _PyWndProcedure(hWnd, Msg, wParam, lParam):
if Msg == WM_DESTROY:
ctypes.windll.user32.PostQuitMessage(0)
elif Msg == WM_CLOSE:
ctypes.windll.user32.DestroyWindow(hWnd)
else:
try:
# TODO: Why is this raising on error?
return ctypes.windll.user32.DefWindowProcW(hWnd, Msg, wParam, 0)
except ctypes.ArgumentError:
pass
return 0
WndProc = WNDPROCTYPE(_PyWndProcedure)
def _ctrl_c_empty_handler(code):
return True
_ctrl_c_empty_handler_ptr = CtrlCHandlerRoutine(_ctrl_c_empty_handler)
def _win32_wait_exit():
hwnd = _win32_create_message_hwnd()
_win32_wait_message_hwnd(hwnd)
def _win32_create_message_hwnd():
hInst = ctypes.windll.kernel32.GetModuleHandleW(0)
wclassName = 'pyri_message_window'
wname = 'pyri_hidden_window'
wndClass = WNDCLASSEX()
wndClass.cbSize = ctypes.sizeof(WNDCLASSEX)
wndClass.lpfnWndProc = WndProc
wndClass.lpszClassName = wclassName
wndClass.hInstance = hInst
regRes = ctypes.windll.user32.RegisterClassExW(ctypes.byref(wndClass))
assert regRes, "Could not create win32 message wnd class"
hWnd = ctypes.windll.user32.CreateWindowExW( 0, wclassName, wname, 0, 0, 0, 0, 0, HWND_MESSAGE, None, None, None )
assert hWnd, "Could not create win32 message hwnd"
return hWnd
def _win32_wait_message_hwnd(hWnd):
# Install a ctrl-c handler to send WM_QUIT
def ctrl_c_handler(code):
_win32_post_hwnd_close(hWnd)
return True
ctrl_c_handler_ptr = CtrlCHandlerRoutine(ctrl_c_handler)
ctypes.windll.kernel32.SetConsoleCtrlHandler(ctrl_c_handler_ptr, 1)
msg = ctypes.wintypes.MSG()
lpmsg = ctypes.pointer(msg)
while ctypes.windll.user32.GetMessageA(lpmsg, 0, 0, 0) != 0:
ctypes.windll.user32.TranslateMessage(lpmsg)
ctypes.windll.user32.DispatchMessageA(lpmsg)
ctypes.windll.kernel32.SetConsoleCtrlHandler(_ctrl_c_empty_handler_ptr, 1)
def _win32_post_hwnd_close(hWnd):
ctypes.windll.user32.PostMessageW(hWnd,WM_CLOSE,0,0)
|
# Parse the input from the input file
input_file = 'example_input.txt'
polymer_template = ''
rules = {}
with open(input_file) as input:
# Template line
polymer_template = input.readline().rstrip()
# Empty line
input.readline()
# Rest is rules
for line in input.readlines():
pair, insert = line.rstrip().split(' -> ')
rules.update({pair: insert})
# Do a pair insertion
def pairInsert(polymer, rules):
# Using a list instead of a string is faster but uses more memory
result = [polymer[0]]
for i, second in enumerate(polymer[1:]):
first = polymer[i]
pair = first + second
# Get elemet to be inserted from rules
insert = rules.get(pair)
# Add to result
result.extend([insert, second])
return ''.join(result)
# Task 1
# Do 10 pair insertions
polymer = polymer_template
for i in range(10):
polymer = pairInsert(polymer, rules)
# Find every unique element of the result polymer
unique_elemets = set(polymer)
# Count every unique elemet
counts = {}
for element in unique_elemets:
count = polymer.count(element)
counts.update({element: count})
# Find most and least common elements
most_common = max(counts, key=counts.get)
least_common = min(counts, key=counts.get)
substracted = counts.get(most_common) - counts.get(least_common)
print(f'After 10 pair insertions the most common element is {most_common} and the least common element is {least_common}.')
print(f'Substracting the amount of least common elements from the amount of most common elements produces {substracted}.')
print()
# Task 2
# This method is not very efficint but it get the job ton in reasonable amount of time
print('This takes a moment')
print()
# Do multiple consecutive inserts for a single pair of elements
# Add the count of each element to counts after the inserts
def multiPairInsert(pair, rules, counts, inserts):
# Do inserts for one pair at a time
part_result = pair
for i in range(inserts):
part_result = pairInsert(part_result, rules)
# Remove duplicate
part_result = part_result[:-1]
# Find every unique element of the part result polymer
unique_elemets = set(part_result)
# Add to count
for element in unique_elemets:
count = part_result.count(element) + counts.get(element, 0)
counts.update({element: count})
# Do 10 more pair insertions
for i in range(10):
polymer = pairInsert(polymer, rules)
# Get counts for each pair after 20 more insertions
pair_counts = {}
for pair in rules.keys():
pair_count = {}
multiPairInsert(pair, rules, pair_count, 20)
pair_counts.update({pair: pair_count})
# Add counts
# Last element of the epolumer needs to be added manully here
counts = {polymer[-1]: 1}
for index, second in enumerate(polymer[1:]):
first = polymer[index]
pair = first + second
for element, value in pair_counts.get(pair).items():
count = value + counts.get(element, 0)
counts.update({element: count})
# Find most and least common elements
most_common = max(counts, key=counts.get)
least_common = min(counts, key=counts.get)
substracted = counts.get(most_common) - counts.get(least_common)
print(f'After 40 pair insertions the most common element is {most_common} and the least common element is {least_common}.')
print(f'Substracting the amount of least common elements from the amount of most common elements produces {substracted}.')
|
# Create a function called kwargs_length() that can receive some keyword arguments and return their length.
# Submit only the function in the judge system.
def kwargs_length(**kwargs):
return len(kwargs)
|
class Node:
def __init__(self, data=None, labels=None,
is_leaf=False, split_feature=None, split_kind=None, split_criteria=None,
left=None, right=None,
depth=0):
"""
:param pandas.Dataframe data: features
:param pandas.Dataframe labels: labels
:param bool is_leaf: True if the node is a leaf of the tree
:param int split_feature: column of the feature
:param str split_kind: ["<=" or "="]
:param split_criteria: value of the criteria used to split data
:param Node left: node child where criteria is True
:param Node right: node child where criteria is False
:param int depth: depth level of the node in the tree
"""
# data
self.X = data
self.y = labels
# split_info
self.is_leaf = is_leaf
self.split_feature = split_feature
self.split_kind = split_kind
self.split_criteria = split_criteria
if self.is_leaf:
self.content = "Leaf"
else:
self.content = "Feature {} {} {}".format(self.split_feature, self.split_kind, self.split_criteria)
# children
self.left_child = left
self.right_child = right
# meta
self.depth = depth
def __str__(self):
output_print = """{}\nNode depth = {}\n\n""".format(self.content, self.depth)
if self.is_leaf:
output_print += """X =\n{}\n\ny = \n{}\n""".format(self.X, self.y)
return output_print
|
# same imports as earlier.
from vtkmodules.util.vtkAlgorithm import VTKPythonAlgorithmBase
# new module for ParaView-specific decorators.
from paraview.util.vtkAlgorithm import smproxy, smproperty, smhint, smdomain
@smproxy.source(name="MagnetovisPlane", label="MagnetovisPlane")
@smhint.xml('<ShowInMenu category="Magnetovis"/>')
class CoordinatePlanePlugin(VTKPythonAlgorithmBase):
from magnetovis import extract_script
from magnetovis.Objects.Plane import Script, ScriptRequestInformation
Script = extract_script(Script, None, xml_encode=True)
def __init__(self, **default_values):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1,
outputType='vtkStructuredGrid')
def RequestData(self, request, inInfo, outInfo):
exec(self.Script)
return 1
def RequestInformation(self, request, inInfoVec, outInfoVec):
from magnetovis.Objects.Plane import ScriptRequestInformation
ScriptRequestInformation(self, Nx=2, Ny=2, Nz=1)
return 1
@smproperty.intvector(name="Npts", label="Npts", documentation="Text", default_values=9)
def SetNpts(self, Npts):
self.Npts = Npts
self.Modified()
@smproperty.stringvector(name="Script", command="SetScript", default_values=Script)
@smhint.xml(r"<Widget type='multi_line' syntax='python'/>")
def SetScript(self, Script):
self.Script = Script
self.Modified()
|
# Generated by Django 2.2 on 2019-04-08 05:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('weight', models.FloatField(default=1.0)),
('points_possible', models.IntegerField(default=100)),
('category', models.CharField(choices=[('nbch', 'Notebook Check'), ('essay', 'Essay'), ('test', 'Test'), ('quiz', 'Quiz'), ('hmwk', 'Homework'), ('excr', 'Extra Credit'), ('proj', 'Project')], max_length=20)),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section_id', models.IntegerField(default=0)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.IntegerField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('f', 'F'), ('m', 'M')], max_length=1)),
('race_ethnicity', models.CharField(choices=[('asian', 'Asian'), ('white', 'White'), ('native', 'American Indian or Alaska Native'), ('black', 'Black or African American'), ('two+', 'Two or more races'), ('hisp', 'Hispanic/Latino'), ('nhpi', 'Native Hawaiian or Other Pacific Islander')], max_length=100)),
('sped_status', models.BooleanField()),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Section')),
],
),
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('points', models.FloatField()),
('assignment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Assignment')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Student')),
],
),
migrations.AddField(
model_name='assignment',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Section'),
),
]
|
import click
from sgp30_exporter import app
@click.command()
@click.option(
"--listen-address",
default="0.0.0.0",
help="The address on which to listen for HTTP requests.",
show_default=True,
)
@click.option(
"--listen-port",
default=9895,
help="The port on which to listen for HTTP requests.",
show_default=True,
)
def main(listen_address, listen_port):
app.create_app().run(host=listen_address, port=listen_port)
|
#!/usr/bin/python
from gi.repository import Gtk, WebKit, Notify
import json
import traceback
from pickle import Pickler, Unpickler
import os
import sys
from os.path import expanduser
home = expanduser("~")
gskype_settings_file = os.path.join(home, ".gskype", "settings")
gskype_state_file = os.path.join(home, ".gskype", "state")
class Application:
def __init__(self):
self._restore_settings()
self._restore_state()
if self.state['logged_out']:
self.state['logged_out'] = False
self._save_state()
self.input_username = ""
self.input_password = ""
self.logged_in = False
self.icon = Gtk.Image.new_from_file("skype_icon.png").get_pixbuf()
self.builder = self.create_builder()
self.win = self.builder.get_object("applicationwindow")
self._make_window_smaller()
self.scroll_view = self.builder.get_object("scrolledwindow")
self.web_view = WebKit.WebView()
self.scroll_view.add(self.web_view)
self._connect_signals()
def _connect_signals(self):
self.win.connect("delete-event", self.stop)
self.builder.get_object("switch_autologin").connect("toggled", self._on_autologin_toggled)
self.builder.get_object("btn_logout").connect("activate", self._on_logout_clicked)
def show(self):
self.win.present()
def start(self):
Notify.init("GSkype")
self.win.show_all()
def stop(self):
Notify.uninit("GSkype")
Gtk.main_quit()
def load_skype_url(self):
self.web_view.connect("resource-load-finished", self._on_resource_load_finished)
self.web_view.connect("load-finished", self._on_page_load_finished)
self.web_view.connect("user-changed-contents", self._on_user_changed_contents)
self.web_view.load_uri("https://web.skype.com/en/")
def _make_window_smaller(self):
self.win.set_size_request(512, 512)
def _make_window_bigger(self):
self.win.set_size_request(1024, 768)
def _restore_settings(self):
try:
with open(gskype_settings_file, "rb") as f:
self.settings = Unpickler(f).load()
except:
self.settings = {'autologin': True, 'username': '', 'password': ''}
def _restore_state(self):
try:
with open(gskype_state_file, "rb") as f:
self.state = Unpickler(f).load()
except:
self.state = {'logged_out': False}
def _save_settings(self):
os.makedirs(os.path.dirname(gskype_settings_file), exist_ok=True)
with open(gskype_settings_file, "wb+") as f:
Pickler(f).dump(self.settings)
def _save_state(self):
os.makedirs(os.path.dirname(gskype_state_file), exist_ok=True)
with open(gskype_state_file, "wb+") as f:
Pickler(f).dump(self.state)
def _on_logout_clicked(self, btn):
self.state['logged_out'] = True
self._save_state()
os.execv(sys.executable, [sys.executable] + sys.argv)
def _on_autologin_toggled(self, toggle):
self.settings['autologin'] = toggle.get_active()
self._save_settings()
def _on_user_changed_contents(self, web_view):
username, password, btn_sign_in = self._get_login_form_parts(web_view)
self.input_username = username.get_value()
self.input_password = password.get_value()
def _save_autofill_data(self):
self.settings['username'] = self.input_username
self.settings['password'] = self.input_password
self._save_settings()
def _get_login_form_parts(self, web_view):
doc = web_view.get_dom_document()
username = doc.get_element_by_id("username")
if not username:
username = doc.get_element_by_id("i0116")
password = doc.get_element_by_id("password")
if not password:
password = doc.get_element_by_id("i0118")
btn_sign_in = doc.get_element_by_id("signIn")
if not btn_sign_in:
btn_sign_in = doc.get_element_by_id("idSIButton9")
return username, password, btn_sign_in
def _on_page_load_finished(self, web_view, frame):
uri = str(web_view.get_uri())
if "login.skype.com" in uri or "login.live.com" in uri:
self._autofill_data(web_view)
if uri.startswith("https://web.skype.com"):
if self.input_username and self.input_password:
self.logged_in = True
self.state['logged_out'] = False
self._save_autofill_data()
self._save_state()
if self.logged_in:
self._make_window_bigger()
def _autofill_data(self, web_view):
if self.logged_in:
return
if not self.settings or not self.settings['username'] or not self.settings['password']:
return
username, password, btn_sign_in = self._get_login_form_parts(web_view)
try:
username.set_value(self.settings['username'])
password.set_value(self.settings['password'])
if not self.state['logged_out']:
btn_sign_in.click()
self.logged_in = True
else:
password.focus()
except:
pass
# print("Can't autofill credentials")
# print(traceback.format_exc())
def _on_resource_load_finished(self, web_view, web_frame, web_resource):
uri = str(web_resource.get_uri())
if uri.endswith("poll"):
response_json = web_resource.get_data().str
parsed_response = json.loads(response_json)
if parsed_response:
msg = parsed_response["eventMessages"][0]
if msg["resourceType"] == "NewMessage":
res = msg["resource"]
if res["messagetype"] == "Text":
self.notify_new_message(res["imdisplayname"], res["content"], res["originalarrivaltime"])
print(response_json)
def notify_new_message(self, author, message, time):
notification = Notify.Notification.new(author, message, None)
notification.add_action("1337", "", self.show)
notification.set_image_from_pixbuf(self.icon)
notification.show()
@staticmethod
def create_builder():
builder = Gtk.Builder()
builder.add_from_file("layout.glade")
return builder
app = Application()
app.start()
app.load_skype_url()
Gtk.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Script to generate the ASAP7 dummy SRAM cache.
#
# See LICENSE for licence details.
import sys
import re
from typing import List
def main(args: List[str]) -> int:
if len(args) != 3:
print("Usage: ./sram-cache-gen.py list-of-srams-1-per-line.txt output-file.json")
print("E.g.: ./sram-cache-gen.py srams.txt sram-cache.json")
return 1
list_of_srams = [] # type: List[str]
with open(sys.argv[1]) as f:
for line in f:
list_of_srams.append(line)
print(str(len(list_of_srams)) + " SRAMs to cache")
json = [] # type: List[str]
for sram_name in list_of_srams:
# DFFRAM-generated 1-port rams
if sram_name.startswith("RAM"):
match = re.match(r"RAM(\d+)x(\d+)", sram_name)
if match:
json.append("""{{
"type" : "sram",
"name" : "{n}",
"depth" : "{d}",
"width" : {w},
"family" : "1rw",
"mask" : "true",
"ports" : [ {{
"address port name" : "A",
"address port polarity" : "active high",
"clock port name" : "CLK",
"clock port polarity" : "positive edge",
"output port name" : "Do",
"output port polarity" : "active high",
"input port name" : "Di",
"input port polarity" : "active high",
"chip enable port name" : "EN",
"chip enable port polarity" : "active high",
"mask port name" : "WE",
"mask port polarity" : "active high",
"mask granularity" : 8
}} ],
"extra ports" : []
}}""".format(n=sram_name.strip(), d=match.group(1), w=match.group(2)))
else:
print("Unsupported memory: {n}".format(n=sram_name), file=sys.stderr)
return 1
# OpenRAM-generated 2-port rams
elif sram_name.startswith("sky130_sram"):
match = re.match(r"sky130_sram_(\d+)kbyte_1rw1r_(\d+)x(\d+)_(\d+)", sram_name)
if match:
json.append("""
{{
"type" : "sram",
"name" : "{n}",
"depth" : "{d}",
"width" : {w},
"family" : "1rw1r",
"mask" : "true",
"ports": [ {{
"address port name" : "addr0",
"address port polarity" : "active high",
"clock port name" : "clk0",
"clock port polarity" : "positive edge",
"write enable port name" : "web0",
"write enable port polarity" : "active low",
"output port name" : "dout0",
"output port polarity" : "active high",
"input port name" : "din0",
"input port polarity" : "active high",
"chip enable port name" : "csb0",
"chip enable port polarity" : "active low",
"mask port name" : "wmask0",
"mask port polarity" : "active high",
"mask granularity" : {m}
}}, {{
"address port name" : "addr1",
"address port polarity" : "active high",
"clock port name" : "clk1",
"clock port polarity" : "positive edge",
"output port name" : "dout1",
"output port polarity" : "active high",
"chip enable port name" : "csb1",
"chip enable port polarity" : "active low"
}} ],
"extra ports" : []
}}""".format(n=sram_name.strip(), w=match.group(2), d=match.group(3), m=match.group(4)))
else:
print("Unsupported memory: {n}".format(n=sram_name), file=sys.stderr)
return 1
else:
print("Unsupported memory: {n}".format(n=sram_name), file=sys.stderr)
return 1
json_str = "[\n" + ",\n".join(json) + "]\n"
with open(sys.argv[2], "w") as f:
f.write(json_str)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup
from pip.download import PipSession
from pip.req import parse_requirements
# work around the combination of http://bugs.python.org/issue8876 and
# https://www.virtualbox.org/ticket/818 since it doesn't really have ill
# effects and there will be a lot of virtualbox users.
del os.link
# As of pip 6, parse_requirements requires a 'session' argument. This is required
# for remote files, but not local ones. In prior versions of pip, a blank
# PipSession object was used if no 'session' object was passed.
reqs = [str(r.req) for r in parse_requirements('requirements.txt', session=PipSession()) if r.req is not None]
if sys.version_info < (3, 3):
reqs.append('backports.lzma')
if sys.version_info < (3, 4):
reqs.append('enum34')
setup(
name='libweasyl',
description='common code across weasyl projects',
author='Weasyl LLC',
packages=[
'libweasyl', 'libweasyl.models',
'libweasyl.test', 'libweasyl.models.test',
],
package_data={
'libweasyl': [
'alembic/*.py', 'alembic/versions/*.py',
'test/data/*',
],
},
install_requires=reqs,
extras_require={
'development': [
'coverage',
'flake8',
'pytest',
'sphinx',
'sphinxcontrib-napoleon',
'tox',
'vcversioner',
],
},
setup_requires=['vcversioner'],
vcversioner={
'version_module_paths': ['libweasyl/_version.py'],
# The git repo root is one directory above this setup.py.
'root': os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
},
)
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import random
from functionaltests.api.v2.models.blacklist_model import BlacklistModel
from functionaltests.api.v2.models.pool_model import PoolModel
from functionaltests.api.v2.models.transfer_requests_model import \
TransferRequestsModel
from functionaltests.api.v2.models.transfer_accepts_model import \
TransferAcceptsModel
from functionaltests.api.v2.models.recordset_model import RecordsetModel
from functionaltests.api.v2.models.zone_model import ZoneModel
from functionaltests.api.v2.models.tld_model import TLDModel
def random_ip():
return ".".join(str(random.randrange(0, 256)) for _ in range(4))
def random_ipv6():
def hexes(n):
return "".join(random.choice("1234567890abcdef") for _ in range(n))
result = ":".join(hexes(4) for _ in range(8))
return result.replace("0000", "0")
def random_uuid():
return uuid.uuid4()
def random_string(prefix='rand', n=8, suffix=''):
"""Return a string containing random digits
:param prefix: the exact text to start the string. Defaults to "rand"
:param n: the number of random digits to generate
:param suffix: the exact text to end the string
"""
digits = "".join(str(random.randrange(0, 10)) for _ in range(n))
return prefix + digits + suffix
def random_zone_data(name=None, email=None, ttl=None, description=None):
"""Generate random zone data, with optional overrides
:return: A ZoneModel
"""
if name is None:
name = random_string(prefix='testdomain', suffix='.com.')
if email is None:
email = ("admin@" + name).strip('.')
if description is None:
description = random_string(prefix='Description ')
if ttl is None:
ttl = random.randint(1200, 8400),
return ZoneModel.from_dict({
'name': name,
'email': email,
'ttl': random.randint(1200, 8400),
'description': description})
def random_transfer_request_data(description=None, target_project_id=None):
"""Generate random zone data, with optional overrides
:return: A TransferRequestModel
"""
data = {}
if description is None:
data['description'] = random_string(prefix='Description ')
if target_project_id:
data['target_project_id'] = target_project_id
return TransferRequestsModel.from_dict(data)
def random_transfer_accept_data(key=None, zone_transfer_request_id=None):
"""Generate random zone data, with optional overrides
:return: A TransferRequestModel
"""
if key is None:
key = random_string()
if zone_transfer_request_id is None:
zone_transfer_request_id = random_uuid()
return TransferAcceptsModel.from_dict({
'key': key,
'zone_transfer_request_id': zone_transfer_request_id})
def random_recordset_data(record_type, zone_name, name=None, records=None,
ttl=None):
"""Generate random recordset data, with optional overrides
:return: A RecordsetModel
"""
if name is None:
name = random_string(prefix=record_type, suffix='.' + zone_name)
if records is None:
records = [random_ip()]
if ttl is None:
ttl = random.randint(1200, 8400)
return RecordsetModel.from_dict({
'type': record_type,
'name': name,
'records': records,
'ttl': ttl})
def random_a_recordset(zone_name, ip=None, **kwargs):
if ip is None:
ip = random_ip()
return random_recordset_data('A', zone_name, records=[ip], **kwargs)
def random_aaaa_recordset(zone_name, ip=None, **kwargs):
if ip is None:
ip = random_ipv6()
return random_recordset_data('AAAA', zone_name, records=[ip], **kwargs)
def random_cname_recordset(zone_name, cname=None, **kwargs):
if cname is None:
cname = zone_name
return random_recordset_data('CNAME', zone_name, records=[cname], **kwargs)
def random_mx_recordset(zone_name, pref=None, host=None, **kwargs):
if pref is None:
pref = str(random.randint(0, 65535))
if host is None:
host = random_string(prefix='mail', suffix='.' + zone_name)
data = "{0} {1}".format(pref, host)
return random_recordset_data('MX', zone_name, records=[data], **kwargs)
def random_blacklist_data():
data = {
"pattern": random_string()
}
return BlacklistModel.from_dict(data)
def random_pool_data():
ns_zone = random_zone_data().name
data = {
"name": random_string(),
}
records = []
for i in range(0, 2):
records.append("ns%s.%s" % (i, ns_zone))
ns_records = [{"hostname": x, "priority": random.randint(1, 999)}
for x in records]
data["ns_records"] = ns_records
return PoolModel.from_dict(data)
def random_zonefile_data(name=None, ttl=None):
"""Generate random zone data, with optional overrides
:return: A ZoneModel
"""
zone_base = ('$ORIGIN &\n& # IN SOA ns.& nsadmin.& # # # # #\n'
'& # IN NS ns.&\n& # IN MX 10 mail.&\nns.& 360 IN A 1.0.0.1')
if name is None:
name = random_string(prefix='testdomain', suffix='.com.')
if ttl is None:
ttl = str(random.randint(1200, 8400))
return zone_base.replace('&', name).replace('#', ttl)
def random_spf_recordset(zone_name, data=None, **kwargs):
data = data or "v=spf1 +all"
return random_recordset_data('SPF', zone_name, records=[data], **kwargs)
def random_srv_recordset(zone_name, data=None):
data = data or "10 0 8080 %s.%s" % (random_string(), zone_name)
return random_recordset_data('SRV', zone_name,
name="_sip._tcp.%s" % zone_name,
records=[data])
def random_sshfp_recordset(zone_name, algorithm_number=None,
fingerprint_type=None, fingerprint=None,
**kwargs):
algorithm_number = algorithm_number or 2
fingerprint_type = fingerprint_type or 1
fingerprint = fingerprint or \
"123456789abcdef67890123456789abcdef67890"
data = "%s %s %s" % (algorithm_number, fingerprint_type, fingerprint)
return random_recordset_data('SSHFP', zone_name, records=[data], **kwargs)
def random_txt_recordset(zone_name, data=None, **kwargs):
data = data or "v=spf1 +all"
return random_recordset_data('TXT', zone_name, records=[data], **kwargs)
def random_tld_data():
data = {
"name": random_string(prefix='tld')
}
return TLDModel.from_dict(data)
def wildcard_ns_recordset(zone_name):
name = "*.{0}".format(zone_name)
records = ["ns.example.com."]
return random_recordset_data('NS', zone_name, name, records)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used in kubeflow_v2_run_executor.py."""
import hashlib
from typing import Any, Dict, List, Mapping, MutableMapping, Optional
from absl import logging
from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2
from tfx.components.evaluator import constants
from tfx.orchestration.kubeflow.v2 import compiler_utils
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import import_utils
import yaml
from google.protobuf import struct_pb2
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
# Old execution property name. This is mapped to utils.INPUT_BASE_KEY.
_OLD_INPUT_BASE_PROPERTY_NAME = 'input_base_uri'
# Max value for int64
_INT64_MAX = 1 << 63
def parse_raw_artifact_dict(
inputs_dict: Any,
name_from_id: Optional[MutableMapping[int, str]] = None
) -> Dict[str, List[artifact.Artifact]]:
"""Parses a map from key to a list of a single Artifact from pb objects.
Parses a mapping field in a protobuf message, whose value is an
ExecutorInput.ArtifactList message, to a Python dict, whose value is a list of
TFX Artifact Python objects.
Args:
inputs_dict: the mapping field in the proto message.
name_from_id: the dict used to store the id to string-typed name mapping.
Returns:
dictionary of the parsed Python Artifact objects.
"""
if name_from_id is None:
name_from_id = {}
result = {}
for k, v in inputs_dict.items():
result[k] = [
_parse_raw_artifact(single_artifact, name_from_id)
for single_artifact in v.artifacts
]
return result
def _get_hashed_id(full_name: str, name_from_id: MutableMapping[int,
str]) -> int:
"""Converts the string-typed name to int-typed ID."""
# Built-in hash function will not exceed the range of int64, which is the
# type of id in metadata artifact proto.
result = int(hashlib.sha256(full_name.encode('utf-8')).hexdigest(),
16) % _INT64_MAX
name_from_id[result] = full_name
return result
def _get_full_name(artifact_id: int, name_from_id: Mapping[int, str]) -> str:
"""Converts the int-typed id to full string name."""
return name_from_id[artifact_id]
# TODO(b/169583143): Remove this workaround when TFX migrates to use str-typed
# id/name to identify artifacts.
# Currently the contract is:
# - In TFX stack, artifact IDs are integers.
# - In pipeline stack, artifact IDs are strings, with format determined by the
# type and implementation of the metadata store in use.
# Therefore conversion is needed when parsing RuntimeArtifact populated by
# pipeline and also when writing out ExecutorOutput.
# This function is expected to be executed right before the TFX container
# writes out ExecutorOutput pb message. It converts the int-typed ID fields to
# string-typed ones conforming the contract with the metadata store being used.
def refactor_model_blessing(model_blessing: artifact.Artifact,
name_from_id: Mapping[int, str]) -> None:
"""Changes id-typed custom properties to string-typed runtime artifact name."""
if model_blessing.has_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY):
model_blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY,
_get_full_name(
artifact_id=model_blessing.get_int_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY),
name_from_id=name_from_id))
if model_blessing.has_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY):
model_blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
_get_full_name(
artifact_id=model_blessing.get_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY),
name_from_id=name_from_id))
def parse_execution_properties(exec_properties: Any) -> Dict[str, Any]:
"""Parses a map from key to Value proto as execution properties.
Parses a mapping field in a protobuf message, whose value is a Kubeflow Value
proto messages, to a Python dict, whose value is a Python primitive object.
Args:
exec_properties: the mapping field in the proto message, representing the
execution properties of the component.
Returns:
dictionary of the parsed execution properties.
"""
result = {}
for k, v in exec_properties.items():
# TODO(b/159835994): Remove this once pipeline populates INPUT_BASE_KEY
if k == _OLD_INPUT_BASE_PROPERTY_NAME:
k = standard_component_specs.INPUT_BASE_KEY
# Translate each field from Value pb to plain value.
result[k] = getattr(v, v.WhichOneof('value'))
if result[k] is None:
raise TypeError('Unrecognized type encountered at field %s of execution'
' properties %s' % (k, exec_properties))
return result
def translate_executor_output(
output_dict: Mapping[str, List[artifact.Artifact]],
name_from_id: Mapping[int,
str]) -> Dict[str, pipeline_pb2.ArtifactList]:
"""Translates output_dict to a Kubeflow ArtifactList mapping."""
result = {}
for k, v in output_dict.items():
result[k] = pipeline_pb2.ArtifactList(artifacts=[
to_runtime_artifact(
artifact_utils.get_single_instance(v), name_from_id)
])
return result
def _get_json_value_mapping(
mlmd_value_mapping: Dict[str, metadata_store_pb2.Value]) -> Dict[str, Any]:
"""Converts a mapping field with MLMD Value to JSON Value."""
def get_json_value(
mlmd_value: metadata_store_pb2.Value) -> artifact.JsonValueType:
if not mlmd_value.HasField('value'):
return None
elif mlmd_value.WhichOneof('value') == 'int_value':
return float(mlmd_value.int_value)
elif mlmd_value.WhichOneof('value') == 'double_value':
return mlmd_value.double_value
elif mlmd_value.WhichOneof('value') == 'string_value':
return mlmd_value.string_value
elif mlmd_value.WhichOneof('value') == 'struct_value':
return artifact._decode_struct_value(mlmd_value.struct_value) # pylint: disable=protected-access
else:
raise TypeError('Get unknown type of value: {}'.format(mlmd_value))
return {k: get_json_value(v) for k, v in mlmd_value_mapping.items()}
def _get_json_metadata_mapping(
artifact_instance: artifact.Artifact) -> Dict[str, Any]:
"""Converts Artifact properties to a JSON dictionary."""
properties_json = _get_json_value_mapping(
artifact_instance.mlmd_artifact.properties)
custom_properties_json = _get_json_value_mapping(
artifact_instance.mlmd_artifact.custom_properties)
metadata_dict = {}
for key, value in properties_json.items():
metadata_dict[key] = value
for key, value in custom_properties_json.items():
if key in artifact_instance.artifact_type.properties:
key = artifact.CUSTOM_PROPERTIES_PREFIX + key
metadata_dict[key] = value
return metadata_dict
def to_runtime_artifact(
artifact_instance: artifact.Artifact,
name_from_id: Mapping[int, str]) -> pipeline_pb2.RuntimeArtifact:
"""Converts TFX artifact instance to RuntimeArtifact proto message."""
metadata = struct_pb2.Struct()
json_format.ParseDict(_get_json_metadata_mapping(artifact_instance), metadata)
result = pipeline_pb2.RuntimeArtifact(
uri=artifact_instance.uri, metadata=metadata)
# TODO(b/135056715): Change to a unified getter/setter of Artifact type
# once it's ready.
# Try convert tfx artifact id to string-typed name. This should be the case
# when running on an environment where metadata access layer is not running
# in user space.
id_or_none = getattr(artifact_instance, 'id', None)
if (id_or_none is not None and id_or_none in name_from_id):
result.name = name_from_id[id_or_none]
else:
logging.warning('Cannot convert ID back to runtime name for artifact %s',
artifact_instance)
return result
def _retrieve_class_path(type_schema: pipeline_pb2.ArtifactTypeSchema) -> str:
"""Gets the class path from an artifact type schema."""
if type_schema.WhichOneof('kind') == 'schema_title':
title = type_schema.schema_title
if type_schema.WhichOneof('kind') == 'instance_schema':
data = yaml.safe_load(type_schema.instance_schema)
title = data.get('title', 'tfx.Artifact')
if title in compiler_utils.TITLE_TO_CLASS_PATH:
# For first party types, the actual import path is maintained in
# TITLE_TO_CLASS_PATH map.
return compiler_utils.TITLE_TO_CLASS_PATH[title]
else:
# For custom types, the import path is encoded as the schema title.
return title
def _parse_raw_artifact(
artifact_pb: pipeline_pb2.RuntimeArtifact,
name_from_id: MutableMapping[int, str]) -> artifact.Artifact:
"""Parses RuntimeArtifact proto message without artifact_type."""
# This parser can only reserve what's inside the RuntimeArtifact pb message.
# Recovers the type information from artifact type schema.
# TODO(b/170261670): Replace this workaround by a more resilient
# implementation. Currently custom artifact type can hardly be supported.
assert artifact_pb.type, 'RuntimeArtifact is expected to have a type.'
# 1. Import the artifact class from preloaded TFX library.
type_path = _retrieve_class_path(artifact_pb.type)
artifact_cls = import_utils.import_class_by_path(type_path)
# 2. Copy properties and custom properties to the MLMD artifact pb.
mlmd_artifact = metadata_store_pb2.Artifact()
# TODO(b/135056715): Change to a unified getter/setter of Artifact type
# once it's ready.
if artifact_pb.name:
# TODO(b/169583143): Remove this workaround when TFX migrates to use
# str-typed id/name to identify artifacts.
# Convert and populate the MLMD artifact ID.
mlmd_artifact.id = _get_hashed_id(artifact_pb.name, name_from_id)
mlmd_artifact.uri = artifact_pb.uri
for k, v in artifact_pb.properties.items():
mlmd_artifact.properties[k].CopyFrom(compiler_utils.get_mlmd_value(v))
for k, v in artifact_pb.custom_properties.items():
mlmd_artifact.custom_properties[k].CopyFrom(
compiler_utils.get_mlmd_value(v))
# Translate metadata items into properties and custom properties.
mlmd_artifact_type = artifact_cls().artifact_type
metadata_dict = json_format.MessageToDict(artifact_pb.metadata)
for k, v in metadata_dict.items():
if k in mlmd_artifact_type.properties:
property_type = mlmd_artifact_type.properties[k]
if property_type == metadata_store_pb2.INT and isinstance(v, float):
mlmd_artifact.properties[k].int_value = int(v)
continue
elif property_type == metadata_store_pb2.DOUBLE and isinstance(v, float):
mlmd_artifact.properties[k].double_value = v
continue
elif property_type == metadata_store_pb2.STRING and isinstance(v, str):
mlmd_artifact.properties[k].string_value = v
continue
elif property_type == metadata_store_pb2.STRUCT:
mlmd_artifact.properties[k].struct_value.CopyFrom(
artifact._encode_struct_value(v)) # pylint: disable=protected-access
continue
# We fell through, which means the property doesn't actually fit the
# schema. Therefore, we treat it as a custom property.
# First, we drop the custom property prefix if we had to drop it because
# of a property name conflict.
if k.startswith(artifact.CUSTOM_PROPERTIES_PREFIX):
stripped_k = k[len(artifact.CUSTOM_PROPERTIES_PREFIX):]
if stripped_k in mlmd_artifact_type.properties:
k = stripped_k
mlmd_artifact.custom_properties[k].struct_value.CopyFrom(
artifact._encode_struct_value(v)) # pylint: disable=protected-access
# 3. Instantiate the artifact Python object.
result = artifact_cls()
result.set_mlmd_artifact(mlmd_artifact)
return result
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import mock
import pytest
from datadog_checks.snowflake import SnowflakeCheck, queries
from .conftest import CHECK_NAME
PROXY_CONFIG = {'http': 'http_host', 'https': 'https_host', 'no_proxy': 'uri1,uri2;uri3,uri4'}
INVALID_PROXY = {'http': 'unused', 'https': 'unused', 'no_proxy': 'unused'}
def test_config():
# Test missing account
user_config = {
'username': 'TestGuy',
'password': 'badpass',
}
with pytest.raises(Exception, match='Must specify an account'):
SnowflakeCheck(CHECK_NAME, {}, [user_config])
# Test missing user and pass
account_config = {'account': 'TEST123'}
with pytest.raises(Exception, match='Must specify a user and password'):
SnowflakeCheck(CHECK_NAME, {}, [account_config])
def test_default_authentication(instance):
# Test default auth
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
assert check.config.authenticator == 'snowflake'
def test_invalid_auth(instance):
# Test oauth
oauth_inst = copy.deepcopy(instance)
oauth_inst['authenticator'] = 'oauth'
with pytest.raises(Exception, match='If using OAuth, you must specify a token'):
SnowflakeCheck(CHECK_NAME, {}, [oauth_inst])
oauth_inst['authenticator'] = 'testauth'
with pytest.raises(Exception, match='The Authenticator method set is invalid: testauth'):
SnowflakeCheck(CHECK_NAME, {}, [oauth_inst])
def test_default_auth(instance):
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
check._conn = mock.MagicMock()
check._query_manager = mock.MagicMock()
with mock.patch('datadog_checks.snowflake.check.sf') as sf:
check.check(instance)
sf.connect.assert_called_with(
user='testuser',
password='pass',
account='test_acct.us-central1.gcp',
database='SNOWFLAKE',
schema='ACCOUNT_USAGE',
warehouse=None,
role='ACCOUNTADMIN',
passcode_in_password=False,
passcode=None,
client_prefetch_threads=4,
login_timeout=60,
ocsp_response_cache_filename=None,
authenticator='snowflake',
token=None,
client_session_keep_alive=False,
proxy_host=None,
proxy_port=None,
proxy_user=None,
proxy_password=None,
)
def test_oauth_auth(instance):
# Test oauth
oauth_inst = copy.deepcopy(instance)
oauth_inst['authenticator'] = 'oauth'
oauth_inst['token'] = 'testtoken'
with mock.patch('datadog_checks.snowflake.check.sf') as sf:
check = SnowflakeCheck(CHECK_NAME, {}, [oauth_inst])
check._conn = mock.MagicMock()
check._query_manager = mock.MagicMock()
check.check(oauth_inst)
sf.connect.assert_called_with(
user='testuser',
password='pass',
account='test_acct.us-central1.gcp',
database='SNOWFLAKE',
schema='ACCOUNT_USAGE',
warehouse=None,
role='ACCOUNTADMIN',
passcode_in_password=False,
passcode=None,
client_prefetch_threads=4,
login_timeout=60,
ocsp_response_cache_filename=None,
authenticator='oauth',
token='testtoken',
client_session_keep_alive=False,
proxy_host=None,
proxy_port=None,
proxy_user=None,
proxy_password=None,
)
def test_proxy_settings(instance):
init_config = {
'proxy_host': 'testhost',
'proxy_port': 8000,
'proxy_user': 'proxyuser',
'proxy_password': 'proxypass',
}
with mock.patch('datadog_checks.snowflake.check.sf') as sf:
check = SnowflakeCheck(CHECK_NAME, init_config, [instance])
check._conn = mock.MagicMock()
check._query_manager = mock.MagicMock()
check.check(instance)
sf.connect.assert_called_with(
user='testuser',
password='pass',
account='test_acct.us-central1.gcp',
database='SNOWFLAKE',
schema='ACCOUNT_USAGE',
warehouse=None,
role='ACCOUNTADMIN',
passcode_in_password=False,
passcode=None,
client_prefetch_threads=4,
login_timeout=60,
ocsp_response_cache_filename=None,
authenticator='snowflake',
token=None,
client_session_keep_alive=False,
proxy_host='testhost',
proxy_port=8000,
proxy_user='proxyuser',
proxy_password='proxypass',
)
def test_default_metric_groups(instance):
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
assert check.config.metric_groups == [
'snowflake.query',
'snowflake.billing',
'snowflake.storage',
'snowflake.logins',
]
assert check.metric_queries == [
queries.WarehouseLoad,
queries.QueryHistory,
queries.CreditUsage,
queries.WarehouseCreditUsage,
queries.StorageUsageMetrics,
queries.LoginMetrics,
]
def test_mixed_metric_group(instance):
instance = copy.deepcopy(instance)
instance['metric_groups'] = ['fake.metric.group', 'snowflake.logins']
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
assert check.metric_queries == [queries.LoginMetrics]
def test_additional_metric_groups(instance):
instance = copy.deepcopy(instance)
instance['metric_groups'] = ['snowflake.logins', 'snowflake.data_transfer']
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
assert check.config.metric_groups == ['snowflake.logins', 'snowflake.data_transfer']
assert check.metric_queries == [
queries.LoginMetrics,
queries.DataTransferHistory,
]
def test_metric_group_exceptions(instance):
instance = copy.deepcopy(instance)
instance['metric_groups'] = ['fake.metric.group']
with pytest.raises(Exception, match='No valid metric_groups configured, please list at least one.'):
check = SnowflakeCheck(CHECK_NAME, {}, [instance])
check.log = mock.MagicMock()
check.log.warning.assert_called_once_with(
"Invalid metric_groups found in snowflake conf.yaml: fake.metric.group"
)
|
def paint_fill(image, target_color, location, init_color=None):
dims = image.shape
if location[0] >= dims[0] or location[1] >= dims[1] or location[0] < 0 or location[1] < 0:
return # end recursion (1) if the location is invalid
if init_color is None:
init_color = image[location[0], location[1]]
if image[location[0], location[1]] == target_color or image[location[0], location[1]] != init_color:
return # end recursion (2) if cell color is already the target color or (3) if cell color is not initial color
image[location[0], location[1]] = target_color
paint_fill(image, target_color, (location[0] - 1, location[1]), init_color)
paint_fill(image, target_color, (location[0] + 1, location[1]), init_color)
paint_fill(image, target_color, (location[0], location[1] - 1), init_color)
paint_fill(image, target_color, (location[0], location[1] + 1), init_color)
return
|
# coding=utf-8
import re
import sys
import pkuseg
def split(filename):
new_f = filename + '.new'
c = []
pattern = re.compile(r'[!?。\.\!\?]')
with open(filename) as f1, open(new_f, 'w') as f2:
for line in f1:
m = re.findall(pattern, line)
cc = re.split(pattern, line.strip('\n').strip())
for i, t in enumerate(cc):
if len(t) > 0 and len(m) > i:
c.append(t + m[i] + '\n')
elif len(t) > 0:
print(t)
c.append(t + '\n')
if len(c) > 300:
f2.writelines(c)
c = []
if len(c) > 0:
f2.writelines(c)
if __name__ == '__main__':
pkuseg.test(sys.argv[1], sys.argv[2], nthread=20)
# split("/home/user_data55/wangdq/code/mosesdecoder/scripts/ems/support/tt.txt")
|
s = 'azcbobobegghakl'
count = 0
threeCharStringCount = len(s)-2
for i in range(0, threeCharStringCount):
print i
print s[i:i+3]
if s[i:i+3] == "bob":
count += 1
print "yep"
else:
print "nope"
print "Number of times bob occurs is: " + str(count)
|
#!/usr/bin/python3
"""Simple Flask app, with additional route"""
from flask import Flask, abort, render_template
from models import storage
from models.state import State
from models.amenity import Amenity
from models.place import Place
from uuid import uuid4
app = Flask(__name__)
app.url_map.strict_slashes = False
# begins rendering
@app.route('/4-hbnb')
def filters():
"""load filters"""
cache_id = uuid4()
states = storage.all(State).values()
amenities = storage.all(Amenity).values()
places = storage.all(Place).values()
return render_template('4-hbnb.html',
states=states,
amenities=amenities,
places=places,
cache_id=cache_id)
@app.teardown_appcontext
def do_teardown(self):
"""Closes session"""
storage.close()
if __name__ == '__main__':
"""Main Flask App"""
app.run(host='0.0.0.0', port=5000)
|
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort, g, request, redirect, \
url_for, jsonify
from .models import Group, Member, GameLog
from fossil.fb import Graph
from fossil.auth.decorators import login_required, LoginRequired
from google.appengine.ext import db
import random
import json
blue_groups = Blueprint('groups', __name__, url_prefix='/groups')
@blue_groups.route('/')
@login_required
def group_list():
groups = Group.get_groups_by_user(g.user)
return render_template('group_list.html', groups=groups)
@blue_groups.route('/add', methods=['GET', 'POST'])
@login_required
def group_add():
graph = Graph(g.fb_session.token)
if request.method == 'GET':
id_list = []
for group in Group.get_groups_by_user(g.user):
id_list.append(group.group_id)
groups = graph.get_groups()
groups = filter(lambda x: x['id'] not in id_list, groups)
return render_template('group_add.html', groups=groups)
elif request.method == 'POST':
group_id = request.form.get('group_id', False)
if not group_id:
return abort(404)
group_info = graph.get('/{0}'.format(group_id), {})
group = Group()
group.name = group_info['name']
group.cover_image = 'http://nodong.org/files/attach/images/903928/904/088/46c43230115f3db7b254f1d0ec7b2e81.jpg'
group.group_id = group_info['id']
group.put()
group_members = graph.get('/{0}/members'.format(group_id), {})
# 실제로 절대하면안되는짓임 100% timeout
for user in group_members['data']:
user_info = graph.get('/{0}'.format(user['id']), {})
member = Member()
member.group = group
member.name = user_info['name']
image_url = graph.get('/{0}/picture'.format(user_info['id']), {
'redirect': False,
'type': 'large',
})['data']['url']
member.profile_image = image_url
member.facebook_id = user_info['id']
member.put()
return redirect(url_for('groups.group_list'))
else:
abort(403)
@blue_groups.route('/<int:group_id>')
@blue_groups.route('/<int:group_id>/members')
@login_required
def group_members(group_id):
group = Group.get_by_id_with_user(group_id, g.user)
if not group:
abort(404)
members = Member.all().filter('group =', group.key())
return render_template('group_members.html', group=group, members=members)
# @blue_groups.route('/<int:group_id>/rank')
# @login_required
# def group_rank(group_id):
# group = Group.get_by_id_with_user(group_id, g.user)
# if not group:
# abort(404)
# GameLog.all().filter('group =', group.key()).filter('')
# return render_template('/group_rank.html', group=group)
@blue_groups.route('/<int:group_id>/exam')
@login_required
def group_exam(group_id):
group = Group.get_by_id_with_user(group_id, g.user)
if not group:
abort(404)
return render_template('/group_exam.html', group=group)
@blue_groups.route('/<int:group_id>/exam/get_question')
@login_required
def group_exam_get_question(group_id):
group = Group.get_by_id_with_user(group_id, g.user)
if not group:
abort(404)
last_game = GameLog.all() \
.filter('group =', group.key()) \
.filter('user =', g.user.key()) \
.filter('status =', GameLog.NOT_SOLVED) \
.get()
if last_game:
print "######## GAME ALREADY EXISTS!!!! ############"
game = last_game
answers_ids = json.loads(game.answers)
answers_names = []
for member_facebook_id in answers_ids:
member = Member.all() \
.filter('group =', group.key()) \
.filter('facebook_id =', member_facebook_id) \
.get()
answers_names.append(member.name)
else:
members = []
for member in group.member_set:
members.append(member)
if members is None or len(members) == 0:
return 'No members!!'
while True:
t = random.choice(members)
question_member = random.choice(members)
if question_member.facebook_id == g.user.facebook_id:
continue
break
answer_members = []
for i in range(4):
while True:
member = random.choice(members)
if member.facebook_id == g.user.facebook_id:
continue
if member.facebook_id == question_member.facebook_id:
continue
if member in answer_members:
continue
break
answer_members.append(member)
if random.randint(1, 5) != 1:
# Case: Answer exist
answer_num = random.randint(0, 3)
answer_members[answer_num] = question_member
answers_ids = [
answer_member.facebook_id for answer_member in answer_members]
answers_names = [
answer_member.name for answer_member in answer_members]
game = GameLog(group=group,
user=g.user,
question_member=question_member,
answers=json.dumps(answers_ids),
status=GameLog.NOT_SOLVED)
game.put()
return jsonify({'game_id': game.key().id_or_name(),
'question_image': game.question_member.profile_image,
'answers': answers_names})
@db.transactional
def end_game(game, status, user_select_member=None):
if user_select_member:
game.user_select_member = user_select_member
game.status = status
game.put()
if status == GameLog.INCORRECT:
message = '에베베베베베 난 겁나 멍청멍청해서 선배이름이랑 얼굴도 모른다~~~~'
graph = Graph(g.fb_session['token'])
graph.post('/{0}/feed', {'message': message})
@blue_groups.route('/<int:group_id>/exam/<int:game_id>/check',
methods=['POST'])
@login_required
def group_exam_check(group_id, game_id):
group = Group.get_by_id_with_user(group_id, g.user)
if not group:
abort(404)
game = GameLog.get_by_id(game_id)
if not game or game.user.key() != g.user.key() or \
game.group.key() != group.key() or \
game.status != GameLog.NOT_SOLVED:
abort(404)
selected = int(request.form['selected']) - 1 # 0(Not exists) 1 2 3 4
question_member = game.question_member
answers = json.loads(game.answers)
if question_member.facebook_id in answers:
# If answer is exist
if selected >= 0 and answers[selected] == question_member.facebook_id:
# Correct!
end_game(game, GameLog.CORRECT, question_member)
return jsonify({'result': 'correct'})
else:
# Incorrect!
if selected >= 0:
user_select_member = Member.all() \
.filter('group =', group.key()) \
.filter('facebook_id =', answers[selected]) \
.get()
end_game(game, GameLog.INCORRECT, user_select_member)
else:
end_game(game, GameLog.INCORRECT, question_member)
return jsonify({'result': 'incorrect',
'answer_name': question_member.name})
else:
# If answer is NOT EXIST
if selected == -1:
# Correct!
end_game(game, GameLog.CORRECT, question_member)
return jsonify({'result': 'correct'})
else:
# Incorrect!
user_select_member = Member.all() \
.filter('group =', group.key()) \
.filter('facebook_id =', answers[selected]) \
.get()
end_game(game, GameLog.INCORRECT, user_select_member)
# TODO: Post article on group.
return jsonify({'result': 'incorrect',
'answer_name': question_member.name})
|
import numpy as np
def tau(pv: float, compr_total: float, pi: float):
""" Time Constant
Parameters
---
pv : float
pore volume
compr_total : float
total compressibility
pi : float
productivity index
"""
return pv*compr_total/pi
def pvct(pv: float, compr_total: float):
""" Pore Volume times Total Compressibility
Parameters
---
pv : float
pore volume
compr_total : float
total compressibility
Return
pvct : float
pore volume total compressibility
"""
return pv*compr_total
def icrm(rate: list, bhp: list, pres_init: float, tau: float, pvct: float):
""" Integrated Capacitance-Resistive Model (Nguyen, 2012)
Parameters
---
rate : list
production rate [V/T]
bhp : list
bottom-hole pressure [P]
pres_init : float
initial reservoir pressure [P]
tau : float
time constant [T]
pvct : float
pore volume total compressibility [V/P]
Return
---
cumprod : ndarray
cumulative production [V]
"""
rate = np.asarray(rate)
bhp = np.asarray(bhp)
cumprod = pvct*pres_init - tau*rate - pvct*bhp
return cumprod
|
#!/usr/bin/env python3
"""
Push file to robot
"""
import argparse
import requests
address = 'http://192.168.49.1:8080/'
javafilesave = 'java/file/save?f='
def main(args):
with open(args.file, 'r') as f:
code = f.read()
cookieReq = requests.get(address)
# TODO: cache this cookie
consoleSession = cookieReq.cookies;
print('Pushing:', address + javafileget + args.basepackage + args.file)
saveReq = requests.post(address + javafilesave + args.basepackage +
args.file, data={'data': code},
cookies=consoleSession)
# TODO: check for errors
print(saveReq)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-p', dest='basepackage',
default='/src/org/firstinspires/ftc/teamcode/',
help='package directory, default = src/org/firstinspires/ftc/teamcode/')
parser.add_argument('file', help='filename to push')
main(parser.parse_args())
|
# -*- coding: utf-8 -*-
"""
Main module.
"""
import os
import sys
from .util import click_funcs
from .util import shell_funcs
def run_conda_create(name):
"""
The program's main routine.
"""
# Defaults
env_path = shell_funcs.get_default_conda_env_path()
py_version = '3.6'
libs = {'pylint': 'y',
'requests': 'y'}
env_path = click_funcs.click_prompt_custom_var('Path to environment',
"" if env_path == '0' else env_path)
if not os.path.isdir(env_path):
print(env_path + ' is not a valid path. Exiting.')
sys.exit(1)
py_version = click_funcs.click_prompt_custom_var('Python version', py_version)
for key in libs.keys():
tmp = click_funcs.click_prompt_yes_no('Install {}?'.format(key), default=libs.get(key))
libs[key] = tmp
# add options for kivy
tmp_lst = [key for key in libs if libs[key] == True]
cmd = 'conda create -y -q -p {} -c conda-forge python={} {}'
shell_funcs.run_in_shell_without_output(cmd.format(os.path.join(env_path, name),
py_version,
" ".join(tmp_lst)))
create_dirs = click_funcs.click_prompt_yes_no('Create de-/activate.d directories?', 'y')
if create_dirs:
shell_funcs.create_env_dirs(env_path, name)
# print de-/activate-script directory
|
from datetime import datetime
import json
import unittest
from elasticsearch.helpers import bulk
from elasticsearch_dsl import Index
from elasticsearch_dsl.connections import connections
from elasticsearch_dsl.document import DocType
from elasticsearch_dsl.field import String
from pyspark.conf import SparkConf
from pyspark.rdd import RDD
from pyspark_elastic.context import EsSparkContext
class PysparkElasticTestCase(unittest.TestCase):
class TestDoc(DocType):
title = String()
@classmethod
def setUpClass(cls):
conf = SparkConf()
conf.set('spark.ui.showConsoleProgress', 'false')
cls.sc = EsSparkContext(conf=conf.setAppName("PySpark Elastic Test"))
@classmethod
def tearDownClass(cls):
cls.sc.stop()
def setUp(self):
self.index = index = Index('pyspark_elastic')
index.settings(number_of_shards=4)
index.create(ignore=400)
index.doc_type(self.TestDoc)
self.resource = self.index._name + '/' + self.TestDoc._doc_type.name
def tearDown(self):
self.index.delete()
def rdd(self, query='', doc_type=None, cache=True, as_json=True, **kwargs):
doc_type = doc_type or self.TestDoc._doc_type.name
rdd = self.sc.esRDD(self.index._name + '/' + doc_type, query, **kwargs)
if as_json:
rdd = rdd.loads()
if cache:
rdd = rdd.cache()
return rdd
class TestsWithData(PysparkElasticTestCase):
def setUp(self):
super(TestsWithData, self).setUp()
self.docs = [
self.TestDoc(title='doc-' + str(i))
for i in range(1000)
]
actions = [d.to_dict(include_meta=True) for d in self.docs]
inserted, errors = bulk(connections.get_connection(), actions=actions, refresh=True)
self.assertEqual(inserted, len(actions))
self.assertEqual(len(errors), 0)
class ReadTests(TestsWithData):
def test_first(self):
doc = self.rdd().first()
self.assertTrue(doc != None)
self.assertEqual(len(doc), 2)
k, v = doc
self.assertIsInstance(k, basestring)
self.assertIsInstance(v, dict)
self.assertEqual(len(v), 1)
self.assertTrue('title' in v)
title = v['title']
self.assertIsInstance(title, basestring)
def test_take(self):
self.assertEquals(len(self.rdd().take(10)), 10)
def test_count(self):
self.assertEquals(self.rdd().count(), len(self.docs))
def test_read_metadata(self):
read = self.rdd(
read_metadata=True,
read_metadata_field='_meta',
read_metadata_version=True,
).collect()
for _, doc in read:
self.assertIn('_meta', doc)
meta = doc['_meta']
self.assertIn('_score', meta)
self.assertIn('_index', meta)
self.assertIn('_type', meta)
self.assertIn('_id', meta)
self.assertIn('_version', meta)
def test_default_resource(self):
self.assertEqual(self.rdd(resource=self.resource).count(), len(self.docs))
# es.index.read.missing.as.empty
# def test_read_missing_index(self):
# es.field.read.empty.as.null
# def test_empty_fields(self):
# class QueryTests(PysparkElasticTestCase):
# test querying with ?uri_query
# def test_uri_query(self):
# test querying with { dsl }
# def test_dsl_query(self):
# test querying with an external json file containing the query dsl
# def test_ext_res_query(self):
# es.field.read.validate.presence
# def test_query_check(self):
class WriteTests(PysparkElasticTestCase):
def setUp(self):
super(WriteTests, self).setUp()
self.docs = self.sc.parallelize(xrange(100)).map(lambda i: dict(title='doc-' + str(i)))
def assertWritten(self, docs=None):
docs = docs or self.docs
if isinstance(docs, RDD):
docs = docs.collect()
read = self.rdd().collect()
self.assertEqual(set(str(d[1]['title']) for d in read), set(str(d['title']) for d in docs))
return read
def test_save_dicts(self):
self.docs.saveToEs(self.resource)
self.assertWritten()
def test_save_json(self):
self.docs.map(json.dumps).saveJsonToEs(self.resource)
self.assertWritten()
def test_save_binary_json(self):
self.docs.map(lambda d: json.dumps(d).encode()).saveJsonToEs(self.resource)
self.assertWritten()
def test_save_with_id(self):
self.docs = self.sc.parallelize(xrange(100)).map(
lambda i: dict(
id=str(i),
title='doc-' + str(i),
)
)
self.docs.saveToEs(
self.index._name + '/' + self.TestDoc._doc_type.name,
mapping_id='id'
)
self.assertWritten()
written = self.docs.collect()
read = self.rdd().collectAsMap()
self.assertEqual(len(written), len(read))
for doc in written:
self.assertEqual(str(doc['title']), read[doc['id']]['title'])
# def test_create(self):
# pass
#
# def test_update(self):
# pass
#
# def test_upsert(self):
# pass
#
# def test_save_with_parent(self):
# pass
#
# def test_save_with_version(self):
# pass
#
# def test_save_with_routing(self):
# pass
#
# def test_save_with_ttl(self):
# pass
#
# def test_save_with_timestamp(self):
# pass
#
# def test_save_include_fields(self):
# # es.mapping.include
# pass
#
def test_save_exclude_fields(self):
docs = [
dict(title='1', body='a'),
dict(title='2', body='b'),
dict(title='1', body='c'),
]
self.sc.parallelize(docs).saveToEs(self.resource, mapping_exclude='body')
read = self.rdd().collect()
self.assertEqual(len(read), 3)
for doc in read:
self.assertNotIn('body', doc)
# def test_save_with_script(self):
# # es.update.script
# # es.update.script.lang
# # es.update.script.params
# pass
#
# TODO
# def test_autocreate_index(self):
# index = Index('pyspark_elastic_non_existing')
# index.delete(ignore=404)
#
# def save():
# self.docs.saveToEs(index._name + '/doc_type', index_auto_create='no')
# self.assertRaises(Exception, save)
def test_default_resource(self):
self.docs.saveToEs(resource=self.resource)
self.assertWritten()
def test_dynamic_resource(self):
Index('test-1').delete(ignore=404)
Index('test-2').delete(ignore=404)
docs1 = [
dict(idx='test-1', body='something'),
dict(idx='test-1', body='else'),
]
docs2 = [
dict(idx='test-2', body='abra'),
dict(idx='test-2', body='ca'),
dict(idx='test-2', body='dabra'),
]
self.sc.parallelize(docs1 + docs2).saveToEs(resource_write='{idx}/docs')
self.assertEqual(self.sc.esRDD('test-1/docs').count(), 2)
self.assertEqual(self.sc.esRDD('test-2/docs').count(), 3)
self.assertEqual(
set(d['body'] for d in self.sc.esRDD('test-1/docs').loads().collectAsMap().values()),
set(d['body'] for d in docs1)
)
def test_dynamic_resource_timestamp(self):
Index('test-2015-11').delete(ignore=404)
Index('test-2015-12').delete(ignore=404)
docs_nov = [
dict(timestamp=datetime.fromtimestamp(1448363875).isoformat(), body='Lorem'),
dict(timestamp=datetime.fromtimestamp(1448363876).isoformat(), body='ipsum'),
dict(timestamp=datetime.fromtimestamp(1448363877).isoformat(), body='dolor'),
]
docs_dec = [
dict(timestamp=datetime.fromtimestamp(1449400621).isoformat(), body='fee'),
dict(timestamp=datetime.fromtimestamp(1449400622).isoformat(), body='fi'),
dict(timestamp=datetime.fromtimestamp(1449400623).isoformat(), body='fo'),
dict(timestamp=datetime.fromtimestamp(1449400623).isoformat(), body='fum'),
]
self.sc.parallelize(docs_nov + docs_dec).saveToEs(resource_write='test-{timestamp:YYYY-MM}/docs')
self.assertEqual(self.sc.esRDD('test-2015-11/docs').count(), 3)
self.assertEqual(self.sc.esRDD('test-2015-12/docs').count(), 4)
self.assertEqual(
set(d['body'] for d in self.sc.esRDD('test-2015-11/docs').loads().collectAsMap().values()),
set(d['body'] for d in docs_nov)
)
# def test_serialization_configuration(self):
# # es.batch.size.bytes
# # es.batch.size.entries
# # es.batch.write.refresh
# # es.batch.write.retry.count
# # es.batch.write.retry.wait
# pass
# class ConfTests(PysparkElasticTestCase):
#
# def test_timeout(self):
# # es.http.timeout
# pass
#
# def test_retries(self):
# # es.http.timeout
# pass
#
# def test_scroll_keepalive(self):
# # es.scroll.keepalive
# pass
#
# def test_scroll_size(self):
# # es.scroll.size
# pass
#
# def test_task_timeout(self):
# # es.action.heart.beat.lead
# pass
#
#
# class SecurityTests(PysparkElasticTestCase):
# def test_authentication(self):
# # es.net.http.auth.user
# # es.net.http.auth.pass
# pass
if __name__ == '__main__':
connections.create_connection()
unittest.main()
# suite = unittest.TestLoader().loadTestsFromTestCase(PushDownTests)
# unittest.TextTestRunner().run(suite)
|
"""
=============================================
Processess PPG Cycles (:mod:`pypg.cycles`)
=============================================
Identify characteristics and extract features
from PPG Cycles.
"""
import numpy as np
import pandas as pd
from scipy import signal
from .plots import marks_plot
def find_onset(ppg, sampling_frequency, factor=0.667, distance=None, height=None,
threshold=None, prominence=None, width=None, wlen=None,
rel_height=0.5, plateau_size=None, verbose=False):
"""
Finds the local minima that correspond to the onsets/start of the cardiac cycle(s).
Parameters
----------
ppg : pandas.Series, ndarray
The PPG signal.
sampling_frequency : int
The sampling frequency of the signal in Hz.
factor: float, optional
Number that is used to calculate the distance in relation to
the sampling_frequency, by default 0.667 (or 66.7%).
distance : number, optional
Minimum horizontal distance (>=1) between the cycles start points, by default None.
However, the function assumes (factor * sampling_frequency) when None is given.
For more check the SciPy documentation.
height : number or ndarray or sequence, optional
Required height of peaks. Either a number, None, an array matching x or
a 2-element sequence of the former. For more check the SciPy documentation.
threshold : number or ndarray or sequence, optional
Required threshold of peaks, the vertical distance to its neighboring samples.
Either a number, None, an array matching x or a 2-element sequence of the former,
by default None. For more check the SciPy documentation.
prominence : number or ndarray or sequence, optional
Required prominence of peaks. Either a number, None, an array matching x or
a 2-element sequence of the former, by default None.
For more check the SciPy documentation.
width : number or ndarray or sequence, optional
Required width of peaks in samples. Either a number, None, an array matching x
or a 2-element sequence of the former, by default None.
For more check the SciPy documentation.
wlen : int, optional
Used for calculation of the peaks prominences, thus it is only used if one of
the arguments prominence or width is given, by default None.
For more check the SciPy documentation.
rel_height : float, optional
Used for calculation of the peaks width, thus it is only used if width
is given, by default 0.5 as defined by SciPy.
For more check the SciPy documentation.
plateau_size : number or ndarray or sequence, optional
Required size of the flat top of peaks in samples. Either a number, None,
an array matching x or a 2-element sequence of the former. , by default None
For more check the SciPy documentation.
Returns
-------
minima: np.ndarray
Indices values in x with the start/onset of the PPG cycles.
Raises
----------
Exception
When PPG values are neither pandas.Series nor ndarray.
References
----------
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
"""
if isinstance(ppg, pd.core.series.Series):
signal_values = ppg.values
elif isinstance(ppg, np.ndarray):
signal_values = ppg
else:
raise Exception('PPG values not accepted, enter either'
+' pandas.Series or ndarray.')
if distance is None:
distance=factor*sampling_frequency
# inverts the signal to find the minimas
peaks_data = signal.find_peaks(-signal_values, distance=distance, height=height,
threshold=threshold, prominence=prominence, width=width,
wlen=wlen, rel_height=rel_height, plateau_size=plateau_size)
minima = peaks_data[0]
if verbose:
marks_plot(ppg, minima, figure_path=None)
return minima
|
import unittest
import os
import cProfile
import celescope.snp.variant_calling as vc
from tests.unittests.__init__ import TEST_DIR_ROOT
class Test_snp(unittest.TestCase):
def setUp(self):
os.chdir(f'{TEST_DIR_ROOT}/snp/')
self.sample = 'test1'
self.vcf_file = f'{self.sample}/07.variant_calling/{self.sample}_merged.vcf'
def test_cell_UMI(self):
cid = 7996
outdir = f'{self.sample}/07.variant_calling/'
df_UMI = vc.cell_UMI(cid, outdir, self.vcf_file)
print(df_UMI)
@unittest.skip('skip')
def test_cell_UMI_2(self):
cid = 2301
os.chdir('/SGRNJ03/randd/RD20081701_SCOPEv2_Dynaseq/20210820/')
sample = 'Mito_9geneMix_0812'
outdir = f'{sample}/07.variant_calling/'
vcf_file = f'{sample}/07.variant_calling/{sample}_merged.vcf'
df_UMI = vc.cell_UMI(cid, outdir, vcf_file)
print(df_UMI)
if __name__ == '__main__':
unittest.main()
|
"""
app.recipe_users.tests.test_recipe_users_admin
----------------------------------------------
Tests cases for recipe_users admin functionality
"""
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class TestRecipeUsersAdmin(TestCase):
"""
Test cases for recipe_users admin
"""
def setUp(self) -> None:
self.test_client = Client()
self.test_admin_user = get_user_model().objects.create_superuser(
email='test_admin.foo@bar.test',
password='Passwd123'
)
self.test_client.force_login(self.test_admin_user)
self.test_user = get_user_model().objects.create_user(
email='foo@bar.test',
password='Passwd123',
name='Foo Bar Test'
)
def test_users_admin_listed(self):
"""
Test that the users are listed on the users admin page
:return: None
"""
test_url = reverse('admin:recipe_users_user_changelist')
test_response = self.test_client.get(test_url)
self.assertContains(test_response, self.test_user.name)
self.assertContains(test_response, self.test_user.email)
def test_admin_users_page_change(self):
"""
Test the admin user page works
:return: None
"""
test_url = reverse(
'admin:recipe_users_user_change',
args=[self.test_user.id]
)
test_response = self.test_client.get(test_url)
self.assertEqual(test_response.status_code, 200)
def test_admin_create_user(self):
"""
Test that the admin create user page works
:return: None
"""
test_url = reverse('admin:recipe_users_user_add')
test_response = self.test_client.get(test_url)
self.assertEqual(test_response.status_code, 200)
|
from django.db import models
from django_extensions.db.fields.json import JSONField
from django_extensions.tests.fields import FieldTestCase
class TestModel(models.Model):
a = models.IntegerField()
j_field = JSONField()
class JsonFieldTest(FieldTestCase):
def testCharFieldCreate(self):
j = TestModel.objects.create(a=6, j_field=dict(foo='bar'))
self.assertEqual(j.a, 6)
def testDefault(self):
j = TestModel.objects.create(a=1)
self.assertEqual(j.j_field, {})
def testEmptyList(self):
j = TestModel.objects.create(a=6, j_field=[])
self.assertTrue(isinstance(j.j_field, list))
self.assertEqual(j.j_field, [])
|
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, make_scorer
from data_helper import DataHelper
import numpy as np
import pickle
import random
class TreeModelBase():
"""
Base class for all models based on Decision trees like Random Forest or Extra Trees
"""
def __init__(self, double_model=False):
"""
Constructor
:param double_model: Bool if model is single or double. Single model have one tree model for prediction of all days.
Double model have one model for weekdays prediction and another model for weekend days prediction.
"""
self.dh = DataHelper()
self.double_model = double_model
def load_model(self, pickle_path=None):
"""
Loads model from pickle in pickle_path. If pickle_path is None loads from default location defined by self.name
:param pickle_path: path where to load model from
"""
if pickle_path is None:
pickle_path = 'data/'+self.name+'.pickle'
if self.double_model:
with open(pickle_path, 'rb') as f:
self.weekend_model, self.weekday_model = pickle.load(f)
else:
with open(pickle_path, 'rb') as f:
self.model = pickle.load(f)
def save_model(self, pickle_path=None):
"""
Saves model to pickle in pickle_path. If pickle_path is None saves to default location defined by self.name
:param pickle_path: path where to save model with file name
"""
if pickle_path is None:
pickle_path = 'data/'+self.name+'.pickle'
if self.double_model:
with open(pickle_path, 'wb') as f:
pickle.dump([self.weekend_model, self.weekday_model], f)
else:
with open(pickle_path, 'wb') as f:
pickle.dump(self.model, f)
def fit_on_training_set(self):
"""
Fit model (or models for double predictor) on training data.
"""
if self.double_model:
self.fit_two_models()
else:
x_train, y_train, x_test, y_test = self.dh.generate_feature_vectors(self.columns, self.time_steps_back)
self.model.fit(x_train, y_train.ravel())
def get_mse(self):
"""
Computes mean square error on model
:return: mean square error
"""
if self.double_model:
columns_to_drop = self.dh.columns_to_drop_from_columns_to_keep(self.columns)
weekdays, weekends = self.split_data_to_weekend_and_weekday(self.dh.get_testing_days())
mse_weekday = list()
mse_weekend = list()
for day in weekdays:
x, y = self.dh.get_feature_vectors_from_days([day], columns_to_drop, self.time_steps_back,1,True)
mse_weekday.append(self.dh.mse_on_day(x, y, self.weekday_model, self.time_steps_back))
for day in weekends:
x, y = self.dh.get_feature_vectors_from_days([day], columns_to_drop, self.time_steps_back,1,True)
mse_weekend.append(self.dh.mse_on_day(x, y, self.weekend_model, self.time_steps_back))
print('Weekday MSE = %.1f, Weekend MSE = %.1f'%( (sum(mse_weekday)/len(weekdays)), (sum(mse_weekend)/len(weekends))))
return (sum(mse_weekday)+sum(mse_weekend))/(len(weekdays) + len(weekends))
else:
return self.dh.mse_on_testing_days(self.model, self.columns, self.time_steps_back)
def show_n_predictions(self, n):
"""
Plots `n` predictions from training data using this model.
:param n: If integer then represents number of random testing days. If list of integers
then represents day ids from testing days. Last possible option is
string `all` that will plot all testing days.
"""
if self.double_model:
weekdays = list()
weekends = list()
for i, day in enumerate(self.dh.get_testing_days()):
if day.data['day_of_week'].iloc[0] < 5:
weekdays.append(i)
else:
weekends.append(i)
random.shuffle(weekdays)
random.shuffle(weekends)
print('Weekdays')
self.dh.show_n_days_prediction(self.weekday_model, self.columns, weekdays[:n], self.time_steps_back, False, True)
print('\n\nWeekends')
self.dh.show_n_days_prediction(self.weekend_model, self.columns, weekends[:n], self.time_steps_back, False, True)
else:
self.dh.show_n_days_prediction(self.model, self.columns, n, self.time_steps_back, False, True)
def print_mse(self):
"""
Prints mean square error
"""
print('MSE = %.2f'%(self.get_mse()))
def fit_two_models(self):
all_days = self.dh.get_training_days(True)
weekdays, weekends = self.split_data_to_weekend_and_weekday(all_days)
# columns_to_drop = self.dh.columns_to_drop_from_columns_to_keep(self.columns)
columns_to_drop = ['time']
print('columns to drop: ',columns_to_drop)
x_weekdays, y_weekdays = self.dh.get_feature_vectors_from_days(weekdays, columns_to_drop, self.time_steps_back)
x_weekends, y_weekends = self.dh.get_feature_vectors_from_days(weekends, columns_to_drop, self.time_steps_back)
self.weekday_model.fit(x_weekdays, y_weekdays.ravel())
self.weekend_model.fit(x_weekends, y_weekends.ravel())
def split_data_to_weekend_and_weekday(self, days_list):
weekdays = list()
weekends = list()
for day in days_list:
if day.data['day_of_week'].iloc[0] < 5:
weekdays.append(day)
else:
weekends.append(day)
return weekdays, weekends
class MyExtraTreesRegressor(TreeModelBase):
def __init__(self):
super(MyExtraTreesRegressor, self).__init__()
self.model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)
self.time_steps_back = 9
self.columns = ['pool', 'lines_reserved', 'day_of_week', 'month', 'day', 'hour', 'minute', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Strahov Cup', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other', 'minute_of_day', 'year']
self.name = 'MyExtraTreesRegressor'
class MyExtraTreesClassifier(TreeModelBase):
def __init__(self):
super(MyExtraTreesClassifier, self).__init__()
self.model = ExtraTreesClassifier(random_state=17, n_estimators=10, max_depth=50, min_samples_split=5, min_samples_leaf=2)
self.time_steps_back = 10
self.columns = ['pool','day_of_week','month','minute_of_day', 'year', 'reserved_Vodnik','lines_reserved']
self.name = 'MyExtraTreesClassifier'
class MyRandomForestRegressor(TreeModelBase):
def __init__(self):
super(MyRandomForestRegressor, self).__init__()
self.model = RandomForestRegressor(random_state=17, n_estimators=30, max_depth=20, min_samples_split=5, min_samples_leaf=1, max_features=20, max_leaf_nodes=None)
self.time_steps_back = 10
self.columns = ['pool', 'lines_reserved', 'day_of_week', 'month', 'day', 'hour', 'minute', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Strahov Cup', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other', 'minute_of_day', 'year']
self.name = 'MyRandomForestRegressor'
class MyRandomForestClassifier(TreeModelBase):
def __init__(self):
super(MyRandomForestClassifier, self).__init__()
self.model = RandomForestClassifier(random_state=17, n_estimators=10, max_depth=30, min_samples_split=2, min_samples_leaf=2)
self.time_steps_back = 5
self.columns = ['pool','day_of_week','month','minute_of_day', 'year', 'reserved_Vodnik','lines_reserved']
self.name = 'MyRandomForestClassifier'
class DoubleExtraTreesRegressor(TreeModelBase):
def __init__(self):
super(DoubleExtraTreesRegressor, self).__init__(True)
self.weekend_model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)
self.weekday_model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)
self.time_steps_back = 9
self.columns = ['pool', 'lines_reserved', 'day_of_week', 'year', 'month', 'day', 'minute_of_day', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'reserved_SK Neptun', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other']
self.name = 'DoubleExtraTreesRegressor'
|
import torch.nn as nn
import torch.nn.functional as F
from layers.autogcn_layer import AUTOGCNLayer
class AUTOGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
graph_norm = net_params['graph_norm']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
dropout = net_params['dropout']
K = net_params['K']
n_layers = net_params['L']
num_filters = net_params['num_filters']
opt = net_params['opt']
gate = net_params['gate']
self.dropout = dropout
self.device = net_params['device']
self.layers = nn.ModuleList()
#self.layers.append(nn.Linear(in_dim,hidden_dim))
self.layers.append(AUTOGCNLayer(in_dim, hidden_dim, F.relu, dropout, graph_norm, batch_norm, num_filters=num_filters, K=K, residual=residual, gate=gate, opt=opt))
self.layers.extend(nn.ModuleList([AUTOGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, graph_norm, batch_norm, num_filters=num_filters, K=K, residual=residual, gate=gate, opt=opt) for _ in range(n_layers - 1)]))
self.layers.append(AUTOGCNLayer(hidden_dim, n_classes, None, dropout, graph_norm, batch_norm, num_filters=num_filters, K=K, residual=residual, gate=gate, opt=opt))
def forward(self, g, h, e, snorm_n, snorm_e):
# h = self.layers[0](h)
# h = self.layers[1](g, h, snorm_n)
for conv in self.layers:
h = conv(g, h, snorm_n)
return h
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
|
from code-katas import array_plus_array
import unittest
"""
Test.assert_equals(array_plus_array([1, 2, 3], [4, 5, 6]), 21)
Test.assert_equals(array_plus_array([-1, -2, -3], [-4, -5, -6]), -21)
Test.assert_equals(array_plus_array([0, 0, 0], [4, 5, 6]), 15)
Test.assert_equals(array_plus_array([100, 200, 300], [400, 500, 600]), 2100)
"""
class TestIt(unittest.TestCase):
def test_1(self):
self.assertEqual(array_plus_array([1, 2, 3], [4, 5, 6]), 21)
def test_2(self):
self.assertEqual(array_plus_array([-1, -2, -3], [-4, -5, -6]), -21)
def test_3(self):
self.assertEqual(array_plus_array([0, 0, 0], [4, 5, 6]), 15)
def test_4(self):
self.assertEqual(array_plus_array([100, 200, 300], [400, 500, 600]), 2100)
def test_5(self):
self.assertEqual(array_plus_array([10, 3, 2], [4, 6, 3]), 28)
def test_6(self):
self.assertEqual(array_plus_array([3, 20, 7], [6, 3, 1]), 40))
def test_7(self):
self.assertEqual(array_plus_array([2,10,8], [2, 4, 5]), 31)
def test_8(self):
self.assetEqual(array_plus_array([3, 7, 30], [1, 7, 9]), 57)
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
from OTLMOW.OTLModel.Classes.VerlichtingstoestelConnector import VerlichtingstoestelConnector
from OTLMOW.OTLModel.Classes.Verlichtingstoestel import Verlichtingstoestel
# Generated with OTLClassCreator. To modify: extend, do not edit
class VerlichtingstoestelHgLP(VerlichtingstoestelConnector, Verlichtingstoestel):
"""Het geheel van de lagedruk kwik lamp (of fluorescentielamp) (HgLP),, voorschakelapparatuur en de behuizing die werden samengesteld met als doel:
* de lichtstroom van de lichtbronnen hoofdzakelijk op het te verlichten oppervlak (doorlopende wegsectie, conflictgebied,...) te richten, teneinde de zichtbaarheid te verhogen;
* de lichtstroom te beheersen zodat de weggebruikers niet verblind worden en de lichthinder beperkt wordt;
* het optisch systeem, de lichtbronnen en de hulpapparatuur tegen uitwendige invloeden te beschermen"""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#VerlichtingstoestelHgLP'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
Verlichtingstoestel.__init__(self)
VerlichtingstoestelConnector.__init__(self)
|
'''
Dataset and DataLoader adapted from
https://www.kaggle.com/pinocookie/pytorch-dataset-and-dataloader
'''
import pickle
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
def rotate_img(img, rot):
if rot == 0: # 0 degrees rotation
return img
elif rot == 90: # 90 degrees rotation
return np.flipud(np.transpose(img, (1, 0, 2)))
elif rot == 180: # 90 degrees rotation
return np.fliplr(np.flipud(img))
elif rot == 270: # 270 degrees rotation / or -90
return np.transpose(np.flipud(img), (1, 0, 2))
else:
raise ValueError('rotation should be 0, 90, 180, or 270 degrees')
class RotateDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.transform = transforms.Compose([
transforms.ToTensor()
])
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
img = self.dataset[idx]
rotated_imgs = [
self.transform(img),
self.transform(rotate_img(img, 90).copy()),
self.transform(rotate_img(img, 180).copy()),
self.transform(rotate_img(img, 270).copy())
]
rotation_labels = torch.LongTensor([0, 1, 2, 3])
return torch.stack(rotated_imgs, dim=0), rotation_labels
def load_mnist(batch_size,
data_dir='./data',
val_size=0.1,
shuffle=True,
seed=1):
"""Load MNIST data into train/val/test data loader"""
num_workers = 4
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_mnist_all(
data_dir=data_dir, val_size=val_size, shuffle=shuffle, seed=seed)
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def load_mnist_all(data_dir='./data', val_size=0.1, shuffle=True, seed=1):
"""Load entire MNIST dataset into tensor"""
transform = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.MNIST(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST(
root=data_dir, train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=len(trainset), shuffle=False)
testloader = torch.utils.data.DataLoader(
testset, batch_size=len(testset), shuffle=False)
x, y = next(iter(trainloader))
x_test, y_test = next(iter(testloader))
x_train, x_valid, y_train, y_valid = train_test_split(
x.numpy(), y.numpy(), test_size=val_size, shuffle=shuffle,
random_state=seed, stratify=y)
# scale up
# scale = 2
# x_train = x_train.repeat(scale, axis=2).repeat(scale, axis=3)
# x_valid = x_valid.repeat(scale, axis=2).repeat(scale, axis=3)
# x_test = x_test.numpy().repeat(scale, axis=2).repeat(scale, axis=3)
# x_test = torch.tensor(x_test)
return ((torch.tensor(x_train), torch.tensor(y_train)),
(torch.tensor(x_valid), torch.tensor(y_valid)), (x_test, y_test))
def load_mnist_rot(batch_size, data_dir='./data', val_size=0.1, shuffle=True,
seed=1):
(x_train, _), (x_valid, _), (x_test, _) = load_mnist_all(
data_dir, val_size=val_size, seed=seed)
traindataset = RotateDataset(x_train.numpy().transpose(0, 2, 3, 1))
trainloader = torch.utils.data.DataLoader(
traindataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validdataset = RotateDataset(x_valid.numpy().transpose(0, 2, 3, 1))
validloader = torch.utils.data.DataLoader(
validdataset, batch_size=batch_size, shuffle=False, num_workers=4)
testdataset = RotateDataset(x_test.numpy().transpose(0, 2, 3, 1))
testloader = torch.utils.data.DataLoader(
testdataset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_cifar10(batch_size,
data_dir='./data',
val_size=0.1,
normalize=True,
augment=True,
shuffle=True,
seed=1):
"""Load CIFAR-10 data into train/val/test data loader"""
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
num_workers = 4
transform = transforms.Compose([
transforms.ToTensor()
])
if augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(
5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5),
transforms.ColorJitter(brightness=0.1),
transforms.ToTensor()
])
else:
transform_train = transform
if normalize:
transform = transforms.Compose([
transform,
transforms.Normalize(mean, std)
])
transform_train = transforms.Compose([
transform_train,
transforms.Normalize(mean, std)
])
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform_train)
validset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=True, transform=transform)
# Random split train and validation sets
num_train = len(trainset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def load_cifar10_all(data_dir='./data', val_size=0.1, shuffle=True, seed=1):
"""Load entire CIFAR-10 dataset into tensor"""
transform = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
validset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=True, transform=transform)
# Random split train and validation sets
num_train = len(trainset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
if shuffle:
np.random.seed(seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=(num_train - split), sampler=train_sampler)
validloader = torch.utils.data.DataLoader(
validset, batch_size=split, sampler=valid_sampler)
testloader = torch.utils.data.DataLoader(
testset, batch_size=len(testset), shuffle=False)
x_train = next(iter(trainloader))
x_valid = next(iter(validloader))
x_test = next(iter(testloader))
return x_train, x_valid, x_test
def load_cifar10_noise(batch_size, data_dir='./data', val_size=0.1, sd=0,
shuffle=True, seed=1):
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_cifar10_all(
data_dir, val_size=val_size, seed=seed)
x_train += torch.randn_like(x_train) * sd
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=4)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_cifar10_rot(batch_size, data_dir='./data', val_size=0.1, shuffle=True,
seed=1):
(x_train, _), (x_valid, _), (x_test, _) = load_cifar10_all(
data_dir, val_size=val_size, seed=seed)
traindataset = RotateDataset(x_train.numpy().transpose(0, 2, 3, 1))
trainloader = torch.utils.data.DataLoader(
traindataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
validdataset = RotateDataset(x_valid.numpy().transpose(0, 2, 3, 1))
validloader = torch.utils.data.DataLoader(
validdataset, batch_size=batch_size, shuffle=False, num_workers=4)
testdataset = RotateDataset(x_test.numpy().transpose(0, 2, 3, 1))
testloader = torch.utils.data.DataLoader(
testdataset, batch_size=batch_size, shuffle=False, num_workers=4)
return trainloader, validloader, testloader
def load_gtsrb(data_dir='./data', gray=False, train_file_name=None):
"""
Load GTSRB data as a (datasize) x (channels) x (height) x (width) numpy
matrix. Each pixel is rescaled to lie in [0,1].
"""
def load_pickled_data(file, columns):
"""
Loads pickled training and test data.
Parameters
----------
file : string
Name of the pickle file.
columns : list of strings
List of columns in pickled data we're interested in.
Returns
-------
A tuple of datasets for given columns.
"""
with open(file, mode='rb') as f:
dataset = pickle.load(f)
return tuple(map(lambda c: dataset[c], columns))
def preprocess(x, gray):
"""
Preprocess dataset: turn images into grayscale if specified, normalize
input space to [0,1], reshape array to appropriate shape for NN model
"""
if not gray:
# Scale features to be in [0, 1]
x = (x / 255.).astype(np.float32)
else:
# Convert to grayscale, e.g. single Y channel
x = 0.299 * x[:, :, :, 0] + 0.587 * x[:, :, :, 1] + \
0.114 * x[:, :, :, 2]
# Scale features to be in [0, 1]
x = (x / 255.).astype(np.float32)
x = x[:, :, :, np.newaxis]
return x
# Load pickle dataset
if train_file_name is None:
x_train, y_train = load_pickled_data(
data_dir + 'train.p', ['features', 'labels'])
else:
x_train, y_train = load_pickled_data(
data_dir + train_file_name, ['features', 'labels'])
x_val, y_val = load_pickled_data(
data_dir + 'valid.p', ['features', 'labels'])
x_test, y_test = load_pickled_data(
data_dir + 'test.p', ['features', 'labels'])
# Preprocess loaded data
x_train = preprocess(x_train, gray)
x_val = preprocess(x_val, gray)
x_test = preprocess(x_test, gray)
return x_train, y_train, x_val, y_val, x_test, y_test
class GtsrbDataset(torch.utils.data.Dataset):
def __init__(self, x_np, y_np, mean=None, std=None, augment=False):
self.x_pil = [Image.fromarray(
(x * 255).astype(np.uint8)) for x in x_np]
self.y_np = y_np.astype(np.int64)
if mean is None:
mean = (0, 0, 0)
std = (1, 1, 1)
if augment:
self.transform = transforms.Compose([
transforms.RandomCrop(32, padding=4, padding_mode='edge'),
transforms.RandomAffine(
5, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5),
transforms.ColorJitter(brightness=0.1),
transforms.ToTensor(),
# transforms.Normalize(mean, std),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize(mean, std),
])
def __getitem__(self, index):
# apply the transformations and return tensors
return self.transform(self.x_pil[index]), self.y_np[index]
def __len__(self):
return len(self.x_pil)
def load_gtsrb_dataloader(data_dir, batch_size, num_workers=4):
x_train, y_train, x_val, y_val, x_test, y_test = load_gtsrb(
data_dir=data_dir)
# Standardization
mean = np.mean(x_train, (0, 1, 2))
std = np.std(x_train, (0, 1, 2))
trainset = GtsrbDataset(x_train, y_train, mean, std, augment=True)
validset = GtsrbDataset(x_val, y_val, mean, std, augment=False)
testset = GtsrbDataset(x_test, y_test, mean, std, augment=False)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
def create_planes(d=1000, k=10, num_total=10000, bound=(0, 1), test_size=0.2,
val_size=0.1, seed=1):
"""
Create plane dataset: two planes with dimension k in space of dimension d.
The first k dimensions are random numbers within the bound, dimensions
k + 1 to d - 1 are 0, and d-th dimension is bound[0] or bound[1] which
determines the class.
"""
assert bound[0] < bound[1]
np.random.seed(seed)
planes = torch.zeros((num_total, d))
planes[:, :k] = torch.rand(num_total, k) * (bound[1] - bound[0]) + bound[0]
# planes[:num_total // 2, -1] = bound[0]
# planes[num_total // 2:, -1] = bound[1]
planes[:num_total // 2, -1] = 0.3
planes[num_total // 2:, -1] = 0.7
indices = np.arange(num_total)
np.random.shuffle(indices)
train_idx = int(num_total * (1 - test_size - val_size))
test_idx = int(num_total * (1 - test_size))
x_train = planes[indices[:train_idx]]
x_valid = planes[indices[train_idx:test_idx]]
x_test = planes[indices[test_idx:]]
y_train = torch.tensor(
(indices[:train_idx] >= num_total // 2).astype(np.int64))
y_valid = torch.tensor(
(indices[train_idx:test_idx] >= num_total // 2).astype(np.int64))
y_test = torch.tensor(
(indices[test_idx:] >= num_total // 2).astype(np.int64))
return (x_train, y_train), (x_valid, y_valid), (x_test, y_test)
def load_planes(batch_size, d=1000, k=10, num_total=10000, bound=(0, 1),
test_size=0.2, val_size=0.1, shuffle=True, seed=1):
num_workers = 4
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = create_planes(
d=d, k=k, num_total=num_total, bound=bound, test_size=test_size,
val_size=val_size, seed=seed)
trainset = torch.utils.data.TensorDataset(x_train, y_train)
validset = torch.utils.data.TensorDataset(x_valid, y_valid)
testset = torch.utils.data.TensorDataset(x_test, y_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
validset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, validloader, testloader
|
import itertools
import numpy as np
from typing import Dict, Optional
from autoconf import cached_property
from autoarray.inversion.mappers.abstract import AbstractMapper
from autoarray.structures.arrays.two_d.array_2d import Array2D
from autoarray.numba_util import profile_func
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.structures.grids.two_d import grid_2d_util
class MapperRectangular(AbstractMapper):
def __init__(
self,
source_grid_slim,
source_pixelization_grid,
data_pixelization_grid=None,
hyper_image=None,
profiling_dict: Optional[Dict] = None,
):
"""
Class representing a rectangular mapper, which maps unmasked pixels on a masked 2D array in the form of
a grid, see the *hyper_galaxies.array.grid* module to pixels discretized on a rectangular grid.
The and uniform geometry of the rectangular grid is used to perform efficient pixel pairings.
Parameters
----------
pixels
The number of pixels in the rectangular pixelization (y_pixels*x_pixels).
source_grid_slim : gridStack
A stack of grid describing the observed image's pixel coordinates (e.g. an image-grid, sub-grid, etc.).
shape_native
The dimensions of the rectangular grid of pixels (y_pixels, x_pixel)
geometry
The geometry (e.g. y / x edge locations, pixel-scales) of the rectangular pixelization.
"""
super().__init__(
source_grid_slim=source_grid_slim,
source_pixelization_grid=source_pixelization_grid,
data_pixelization_grid=data_pixelization_grid,
hyper_image=hyper_image,
profiling_dict=profiling_dict,
)
@property
def shape_native(self):
return self.source_pixelization_grid.shape_native
@cached_property
@profile_func
def pixelization_index_for_sub_slim_index(self):
"""
The `Mapper` contains:
1) The traced grid of (y,x) source pixel coordinate centres.
2) The traced grid of (y,x) image pixel coordinates.
The function below pairs every image-pixel coordinate to every source-pixel centre.
In the API, the `pixelization_index` refers to the source pixel index (e.g. source pixel 0, 1, 2 etc.) whereas
the sub_slim index refers to the index of a sub-gridded image pixel (e.g. sub pixel 0, 1, 2 etc.).
"""
return grid_2d_util.grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim=self.source_grid_slim,
shape_native=self.source_pixelization_grid.shape_native,
pixel_scales=self.source_pixelization_grid.pixel_scales,
origin=self.source_pixelization_grid.origin,
).astype("int")
def reconstruction_from(self, solution_vector):
"""
Given the solution vector of an inversion (see *inversions.LinearEqn*), determine the reconstructed
pixelization of the rectangular pixelization by using the mapper.
"""
recon = array_2d_util.array_2d_native_from(
array_2d_slim=solution_vector,
mask_2d=np.full(
fill_value=False, shape=self.source_pixelization_grid.shape_native
),
sub_size=1,
)
return Array2D.manual(
array=recon,
sub_size=1,
pixel_scales=self.source_pixelization_grid.pixel_scales,
origin=self.source_pixelization_grid.origin,
)
|
import logging
import os
from .utils import load_checkpoint, save_checkpoint
logger = logging.getLogger(__name__)
def train_loop(model, mutator, criterion, optimizer, scheduler,
train_loader, sanitize_loader, valid_loader,
train_fn, valid_fn, writer, args):
last_epoch = 0
auto_resume = os.path.join(args.output_dir, "checkpoints", "latest.pth.tar")
if os.path.exists(auto_resume):
auto_resume_ckpt = load_checkpoint(model, auto_resume, args=args, optimizer=optimizer, scheduler=scheduler)
if auto_resume_ckpt is not None:
last_epoch = auto_resume_ckpt["epoch"]
logger.info("Resume from checkpoint. Proceeding from epoch %d...", last_epoch)
for epoch in range(last_epoch + 1, args.epochs + 1):
train_fn(model, mutator, train_loader, criterion, optimizer, scheduler, writer, args, epoch)
if (args.eval_every and epoch % args.eval_every == 0) or epoch == args.epochs:
valid_fn(model, mutator, sanitize_loader, valid_loader, criterion, writer, args, epoch)
if epoch % 20 == 0:
save_checkpoint(args, model, auto_resume, optimizer=optimizer.state_dict(),
scheduler=scheduler.state_dict(), epoch=epoch)
save_checkpoint(args, model, os.path.join(args.output_dir, "checkpoints", "final.pth.tar"))
|
import serial #From pyserial package https://anaconda.org/anaconda/pyserial
import sqlite3 #From sqlite3 package https://anaconda.org/anaconda/sqlite3
from sqlite3 import Error
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
from drawnow import drawnow, figure
#System Variables
arduino_port = "COM5" #serial port of Arduino on local machine
baud = 115200 #Needs to match arduino
i=1
firstround=1
graphdata1=collections.deque(np.zeros(10))
timedata=collections.deque(np.zeros(10))
#create sqlite database (raw file location from my test machine)
con = sqlite3.connect(r'C:\Users\NBSwi\Documents\GitHub\Senior-Design-Project\Sqlite\graphtesting.sqlite3')
cur = con.cursor()
# cur.execute('''CREATE TABLE current
# (date text, current real)''')
try: #Add another column here for voltage: , voltage real
cur.execute('''CREATE TABLE PowerMeasurement
(date text, wattage real)''')
except Error as e:
print(e)
con.commit
#create_connection(r"C:\Users\NBSwi\Documents\GitHub\RaceTime\Sqlite\datatest1.db")
ser = serial.Serial(arduino_port, baud)
#plt.autoscale()
#plt.ion()
#plt.show(block=False)
def plotgraph(time,data):
plt.plot(currenttime.timestamp(),graphdata)
print('Press "a" to start recording data')
textin=input()
if textin =='a':
starttime=datetime.now()
print('Data started recording at: ', str(starttime))
while i==1:
#read data: add functionality here to parse string for voltage
Data=str(ser.readline())
currenttime=datetime.now()
Data=Data.replace("b'", "" )
Data=Data.replace("\'","")
Data=Data.replace("\'","")
Data=Data.replace("\\","")
Data=Data.replace("rn","")
#write data to file database:
print('DateTime: ',str(currenttime),'Serial: ', Data )
DataValue = "INSERT INTO PowerMeasurement VALUES ('" + str(currenttime) +"', '"+ Data +"')"
try:
graphdata=float(Data)
except ValueError as e:
print(e)
graphdata=0
timedata.append(currenttime.timestamp())
graphdata1.append(graphdata)
drawnow(plotgraph(currenttime.timestamp(),graphdata))
#plt.gca().relim
#plt.gca().autoscale_view()
#plt.show(block=False)
# if firstround==1:
# plt.show(block=False)
# firstround=0
cur.execute(DataValue)
con.commit()
con.close()
|
"""
This module produces the outputs/plots.
Marina von Steinkirch, spring/2013
"""
import pylab
import numpy
from matplotlib import pyplot
def plotPhaseSpace( b, aTheta, aOmega, t, power, k):
pylab.clf()
pylab.cla()
label = str(b)
pylab.subplot(221)
pylab.plot( aTheta, aOmega, color="m", lw = 2)
pylab.xlabel(r"$\theta$ (radians) ", fontsize=10)
pylab.ylabel('$\omega$ (radians/seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(222)
pylab.plot( t, aTheta, color="g", lw = 2)
pylab.ylabel(r"$\theta$ (radians)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(223)
pyplot.grid(True)
pyplot.plot(k, power, color="c", lw = 2)
pyplot.ylabel("|F(k)$|^{2}$", fontsize=10)
pyplot.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.subplot(224)
pyplot.yscale('log')
pyplot.plot(2.0*numpy.pi*k, power, color="b", lw = 1)
pylab.xlim(0,6)
pyplot.grid(True)
pyplot.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pyplot.ylabel("log |F(k)$|^{2}$", fontsize=10)
pylab.savefig("plots/b-%s_phase_space.png" % (label) )
return 0
def plotDFT( b, bDFT, k, bDFT_inv, aTheta, t):
pylab.clf()
pylab.cla()
label = str(b)
ymax = max( bDFT.real )
imax = numpy.where(bDFT.real == ymax )[0]
xmax = k[imax]
pylab.subplot(221)
pylab.plot(t, aTheta.real, color="g", lw = 2)
pylab.ylabel( r"$\theta$ (t)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(222)
pylab.annotate("Frequency has the most power", xy=(xmax, ymax), xycoords='data', xytext=(+10, +30), textcoords='offset points', fontsize=10, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
pylab.plot(k, bDFT.real, color="r", lw = 2, label="Real F(k)")
pylab.plot(k, bDFT.imag, color="b", lw = 1, label="Imaginary F(k)")
leg = pylab.legend(loc=4,labelspacing=0.0005)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
leg.draw_frame(0)
pylab.ylabel("F(k)", fontsize=10)
pylab.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.grid(True)
pylab.subplot(223)
pylab.plot(k, abs(bDFT.real), color="r", lw = 2)
pylab.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.ylabel("|F(k)|", fontsize=10)
pylab.grid(True)
pylab.subplot(224)
pylab.plot(t, bDFT_inv.real, color="y", lw = 2)
pylab.ylabel("Inverse F(k)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.savefig("plots/b-%s_dft.png" % (label) )
return 0
|
"""
Common
======
Expressions common to both :mod:`stream` and :mod:`sequence`.
"""
import numpy as np
from scipy.special import erf
from scipy.fftpack import dct
def saturation_distance(mean_mu_squared, wavenumber, scale):
"""Saturation distance according to Wenzel.
:param mean_mu_squared: Mean mu squared.
:param wavenumber: Wavenumber.
:param scale: Outer length scale.
See Daigle, 1987: equation 5
.. math:: r_s = \\frac{1}{2 \\langle \mu^2 \\rangle k^2 L}
"""
return 1.0 / (2.0 * mean_mu_squared * wavenumber*wavenumber * scale)
def saturation_factor(distance, wavenumber, scale, mean_mu_squared):
"""Factor to multiply log-amplitude (co-)variance with to include log-amplitude saturation.
..math:: x = \\frac{1}{1 + r/r_s}
"""
sat_distance = saturation_distance(mean_mu_squared, wavenumber, scale)
factor = ( 1.0 / (1.0 + distance/sat_distance) )
return factor
#def impulse_response_fluctuations(covariance, ntaps, window=None):
#"""Impulse response describing fluctuations.
#:param covariance: Covariance vector.
#:param fs: Sample frequency
#:param window: Window to apply to impulse response. If passed `None`, no window is applied.
#:returns: Impulse response of fluctuations filter.
#"""
#if window is not None:
#nsamples = covariance.shape[-1]
#covariance = covariance * window(nsamples)[...,:] # Not inplace!
## The covariance is a symmetric, real function.
#autospectrum = np.abs(np.fft.rfft(covariance, n=ntaps))#/df**2.0 # Autospectrum
##autospectrum[..., 0] = 0.0 # Remove DC component from spectrum.
## The autospectrum is real-valued. Taking the square root given an amplitude spectrum.
## Because we have a symmetric spectrum, taking the inverse DFT results in an even, real-valued
## impulse response. Furthermore, because we have zero phase the impulse response is even as well.
#ir = np.fft.ifftshift(np.fft.irfft(np.sqrt(autospectrum), n=ntaps).real)
#return ir
# Uses DCT
def impulse_response_fluctuations(covariance, ntaps, window=None):
"""Impulse response describing fluctuations.
:param covariance: Covariance vector.
:param fs: Sample frequency
:param window: Window to apply to impulse response. If passed `None`, no window is applied.
:returns: Impulse response of fluctuations filter.
"""
if window is not None:
nsamples = covariance.shape[-1]
covariance = covariance * window(nsamples)[...,:] # Not inplace!
# The covariance is a symmetric, real function.
autospectrum = np.abs(dct(covariance, type=1))#/df**2.0 # Autospectrum
#autospectrum[..., 0] = 0.0 # Remove DC component from spectrum.
# The autospectrum is real-valued. Taking the square root given an amplitude spectrum.
# Because we have a symmetric spectrum, taking the inverse DFT results in an even, real-valued
# impulse response. Furthermore, because we have zero phase the impulse response is even as well.
ir = np.fft.ifftshift(np.fft.irfft(np.sqrt(autospectrum), n=ntaps).real)
return ir
def tau(ntaps, fs):
"""Time lag :math:`\\tau` for autocorrelation :math:`B(\\tau)`.
:param ntaps: Amount of taps.
:param fs: Sample frequency.
"""
return np.fft.rfftfreq(ntaps, fs/ntaps)
def correlation_spherical_wave(spatial_separation, correlation_length):
"""Correlation of spherical waves.
:param spatial_separation: Spatial separation :math:`\\rho`.
:param correlation_length: Correlation length :math:`\\L_0`.
:returns: Correlation
.. math:: \\frac{\\sqrt{\\pi}}{2} \\frac{\\mathrm{erf}{(\\rho/L_0)}}{(\\rho/L_0)}
.. note:: Instead of spatial separation and correlation length, you can also use time lag and correlation time.
"""
x = np.atleast_1d(spatial_separation/correlation_length)
cor = np.sqrt(np.pi) / 2.0 * erf(x)/x
cor[x==0.0] = 1.0
return cor
def _correlation_spherical_wave(x):
cor = np.sqrt(np.pi) / 2.0 * erf(x)/x
cor[x==0.0] = 1.0
return cor
def _fluctuations_with_variance(variance_func, fluctuations, wavenumber, distance, correlation_length, mean_mu_squared, include_saturation):
"""Add correct variance to fluctuations.
:returns: Fluctuations with correct variance.
"""
variance = variance_func(distance, wavenumber, correlation_length, mean_mu_squared, include_saturation)
return fluctuations * np.sqrt(variance)
#def logamp_fluctuations(variance_func, fluctuations, wavenumber, distance, correlation_length, mean_mu_squared, include_saturation=True):
#"""Determine log-amplitude fluctuations.
#:param fluctuations: Fluctuations with variance of one.
#:param frequency: Frequency to compute fluctuations for.
#:returns: Log-amplitude fluctuations with correct variance.
#"""
#variance_logamp = variance_gaussian(distance, wavenumber, correlation_length, mean_mu_squared, include_saturation=include_saturation)
#logamp = fluctuations * np.sqrt(variance_logamp)
#return logamp
#def phase_fluctuations(fluctuations, wavenumber, distance, correlation_length, mean_mu_squared):
#"""Determine phase fluctuations for given frequency.
#:param fluctuations: Fluctuations with variance of one.
#:param frequency: Frequency to compute fluctuations for.
#:returns: Phase fluctuations with correct variance.
#"""
#variance_phase = variance_gaussian(distance, wavenumber, correlation_length,
#mean_mu_squared, include_saturation=False)
#phase = fluctuations * np.sqrt(variance_phase)
#return phase
def amplitude_fluctuations(logamp):
"""Return amplitude fluctuations given log-amplitude fluctuations.
:param logamp: Log-amplitude fluctuations.
:returns: Amplitude fluctuations.
.. math:: A = \\exp{\\chi}
"""
return np.exp(logamp)
def delay_fluctuations(phase, fs, frequency=1.0):
"""Return propagation delay fluctuations given phase fluctuations for the specified frequency.
:param phase: Phase fluctuations for given `frequency`.
:param frequency: Frequency.
:returns: Propagation delay fluctuations.
.. math:: \\mathrm{d}t = -S / (2 \\pi f)
.. note:: Note the minus sign! This is according to the definition.
"""
omega = (2.0*np.pi*frequency)
return phase/omega * (-1)
#return (phase/omega) / fs * (-1) # We explicitly do a multiplication because Stream does not yet support unary ops
def complex_fluctuations(logamp, phase):
"""Complex fluctuations.
:param logamp: Log-amplitude fluctuations :math:`\\chi`.
:param phase: Phase fluctuations :math:`S`.
:returns: Complex fluctuations :math:`\\Psi`.
.. math:: \\Psi = e^{\\chi} e^{j\\S}
"""
return amplitude_fluctuations(logamp) * np.exp(1j*phase)
def transverse_coherence_expected(variance, correlation):
"""Transverse coherence of a spherical waves and Gaussian fluctuations.
See Daigle, equation 11.
"""
return np.exp(-2.0*variance * (1.0 - correlation))
def transverse_coherence_expected_large_spatial_separation(variance):
"""Transverse coherence of a spherical waves and Gaussian fluctuations in case the spatial separation is much larger than the correlation length.
See Daigle, equation 12.
"""
return np.exp(-2.0 * variance)
def transverse_coherence(logamp_structure, phase_structure):
"""Transverse coherence as function of structure functions.
See Daigle, equation 6.
"""
return np.exp(-0.5 * (logamp_structure+phase_structure))
def longitudinal_coherence(logamp_variance, phase_variance):
"""longitudinal coherence.
See Daigle, equation 13.
"""
return np.exp(-logamp_variance - phase_variance)
def transverse_speed(velocity_source, orientation):
"""Transverse speed computed from source velocity and source-receiver orientation.
:param velocity_source: Source velocity.
:param orientation: Unit vector pointing from source to receiver
Each row is a sample and each column a spatial dimension.
The transverse speed is the cross product of the velocity and orientation.
.. note:: It does not matter whether source-receiver or receiver-source unit vector is given.
"""
return np.linalg.norm(np.cross(orientation, velocity_source))
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from copy import deepcopy
from openprocurement.api.utils import get_now
# ContractNoItemsChangeTest
def no_items_contract_change(self):
data = deepcopy(self.initial_data)
del data['items']
response = self.app.post_json('/contracts', {"data": data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
contract = response.json['data']
self.assertEqual(contract['status'], 'active')
self.assertNotIn('items', contract)
tender_token = data['tender_token']
response = self.app.patch_json('/contracts/{}/credentials?acc_token={}'.format(contract['id'], tender_token),
{'data': ''})
self.assertEqual(response.status, '200 OK')
token = response.json['access']['token']
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(contract['id'], token),
{'data': {'rationale': u'причина зміни укр',
'rationaleTypes': ['qualityImprovement']}})
self.assertEqual(response.status, '201 Created')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(contract['id'], change['id'], token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(contract['id'], token),
{"data": {"status": "terminated", "amountPaid": {"amount": 100, "valueAddedTaxIncluded": True, "currency": "UAH"}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'terminated')
response = self.app.get('/contracts/{}'.format(contract['id']))
self.assertNotIn('items', response.json['data'])
# ContactChangesResourceTest
def not_found(self):
response = self.app.get('/contracts/some_id/changes', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}
])
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/contracts/{}/changes/some_id'.format(self.contract['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'change_id'}
])
response = self.app.patch_json(
'/contracts/{}/changes/some_id'.format(self.contract['id']), {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'change_id'}
])
def get_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'Принцеси не какають.',
'rationale_ru': u'ff',
'rationale_en': 'asdf',
'contractNumber': 12,
'rationaleTypes': ['priceReduction']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertIn('date', change)
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
change_data = response.json['data']
self.assertEqual(change_data, change)
response = self.app.get('/contracts/{}'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertIn('changes', response.json['data'])
self.assertEqual(len(response.json['data']['changes']), 1)
self.assertEqual(set(response.json['data']['changes'][0].keys()),
set(['id', 'date', 'status', 'rationaleTypes', 'rationale', 'rationale_ru', 'rationale_en', 'contractNumber']))
self.app.authorization = None
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
self.assertEqual(set(response.json['data'][0].keys()),
set(['id', 'date', 'status', 'rationaleTypes', 'rationale', 'rationale_ru', 'rationale_en', 'contractNumber']))
def create_change_invalid(self):
response = self.app.post('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": ["This field is required."]},
{"location": "body", "name": "rationale", "description": ["This field is required."]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': "", 'rationaleTypes': ['volumeCuts']}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationale", "description": ["String value is too short."]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale_ua': ""}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationale_ua", "description": "Rogue field"}
])
self.app.authorization = None
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale_ua': "aaa"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/contracts/{}/changes'.format(self.contract['id']),
{'data': {'rationale_ua': "aaa"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'changes': [{'rationale': "penguin", 'rationaleTypes': ['volumeCuts']}]}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('changes', response.json['data'])
def create_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': 'change cause en',
'rationaleTypes': ['qualityImprovement']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertIn('date', change)
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['priceReduction']}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't create new contract change while any (pending) change exists"}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['non-existing-rationale']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": [["Value must be one of ['volumeCuts', 'itemPriceVariation', 'qualityImprovement', 'thirdParty', 'durationExtension', 'priceReduction', 'taxRate', 'fiscalYearExtension']."]]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['priceReduction']}})
self.assertEqual(response.status, '201 Created')
change2 = response.json['data']
self.assertEqual(change2['status'], 'pending')
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 2)
def patch_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertEqual(change['contractNumber'], u'№ 146')
creation_date = change['date']
now = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'date': now}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_ru': 'шота на руськом'}})
self.assertEqual(response.status, '200 OK')
self.assertIn('rationale_ru', response.json['data'])
first_patch_date = response.json['data']['date']
self.assertEqual(first_patch_date, creation_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_en': 'another cause desctiption'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationale_en'], 'another cause desctiption')
second_patch_date = response.json['data']['date']
self.assertEqual(first_patch_date, second_patch_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': ['fiscalYearExtension', 'priceReduction']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension', 'priceReduction'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': ['fiscalYearExtension', 'volumeCuts', 'taxRate']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension', 'volumeCuts', 'taxRate'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': 'fiscalYearExtension'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': 'fiscalYearExtension, volumeCuts'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": [["Value must be one of ['volumeCuts', 'itemPriceVariation', 'qualityImprovement', 'thirdParty', 'durationExtension', 'priceReduction', 'taxRate', 'fiscalYearExtension']."]]}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': []}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": ["Please provide at least 1 item."]}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'id': '1234' * 8}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
self.app.authorization = None
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_en': 'la-la-la'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.patch_json('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']),
{'data': {'rationale_en': 'la-la-la'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertNotEqual(response.json['data']['date'], creation_date)
self.assertNotEqual(response.json['data']['date'], first_patch_date)
self.assertNotEqual(response.json['data']['date'], second_patch_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'pending'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
def change_date_signed(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertEqual(change['contractNumber'], u'№ 146')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change status. 'dateSigned' is required."}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': "12-14-11"}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": ["Could not parse 12-14-11. Should be ISO8601."]}
])
valid_date1_raw = get_now()
valid_date1 = valid_date1_raw.isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': valid_date1}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date1)
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': one_day_in_past}}, status=403)
self.assertIn("can't be earlier than contract dateSigned", response.json['errors'][0]["description"])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': get_now().isoformat()}}, status=403)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change in current (active) status"}
])
response = self.app.get('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']))
change1 = response.json['data']
self.assertEqual(change1['dateSigned'], valid_date1)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'iнша причина зміни укр',
'rationale_en': u'another change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 147'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change2 = response.json['data']
self.assertEqual(change['status'], 'pending')
one_day_in_future = (get_now() + timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': one_day_in_future}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": [u"Contract signature date can't be in the future"]}
])
smaller_than_last_change = (valid_date1_raw - timedelta(seconds=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': smaller_than_last_change}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(smaller_than_last_change, valid_date1), response.json['errors'][0]["description"])
date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': date}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], date)
# date update request
valid_date2_raw = get_now()
valid_date2 = valid_date2_raw.isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': valid_date2}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр',
'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 148'}})
self.assertEqual(response.status, '201 Created')
change3 = response.json['data']
self.assertEqual(change['status'], 'pending')
smaller_than_last_change = (valid_date2_raw - timedelta(seconds=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': smaller_than_last_change}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(smaller_than_last_change, valid_date2), response.json['errors'][0]["description"])
date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': date}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'status': 'terminated', "amountPaid": {"amount": 15}}})
self.assertEqual(response.status, '200 OK')
def date_signed_on_change_creation(self):
# test create change with date signed
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': one_day_in_past,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}}, status=403)
self.assertIn("can't be earlier than contract dateSigned", response.json['errors'][0]["description"])
one_day_in_future = (get_now() + timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': one_day_in_future,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": [u"Contract signature date can't be in the future"]}
])
date = get_now().isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': date,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['dateSigned'], date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
def change_date_signed_very_old_contracts_data(self):
# prepare old contract data
contract = self.db.get(self.contract['id'])
contract['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change status. 'dateSigned' is required."}
])
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': one_day_in_past}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], one_day_in_past)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'iнша причина зміни укр',
'rationale_en': u'another change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 147'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change2 = response.json['data']
self.assertEqual(change['status'], 'pending')
two_days_in_past = (get_now() - timedelta(days=2)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': two_days_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(two_days_in_past, one_day_in_past), response.json['errors'][0]["description"])
valid_date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': valid_date}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], valid_date)
# prepare old contract change data
contract = self.db.get(self.contract['id'])
last_change = contract['changes'][-1]
last_change['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], last_change['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр',
'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 148'}})
self.assertEqual(response.status, '201 Created')
change3 = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': two_days_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(two_days_in_past, last_change['date']), response.json['errors'][0]["description"])
valid_date2 = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': valid_date2}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
def date_signed_on_change_creation_for_very_old_contracts_data(self):
# prepare old contract data
contract = self.db.get(self.contract['id'])
contract['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
self.app.authorization = ('Basic', ('broker', ''))
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146',
'dateSigned': one_day_in_past}})
self.assertEqual(response.json['data']['dateSigned'], one_day_in_past)
change = response.json['data']
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.json['data']['status'], 'active')
# prepare old contract change data
contract = self.db.get(self.contract['id'])
last_change = contract['changes'][-1]
last_change['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], last_change['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр', 'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 148',
'dateSigned': one_day_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(one_day_in_past, last_change['date']), response.json['errors'][0]["description"])
valid_date = get_now().isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр', 'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 148',
'dateSigned': valid_date}})
self.assertEqual(response.json['data']['dateSigned'], valid_date)
|
import sqlite3
from datetime import datetime, timedelta
import dateutil.relativedelta
def initialize():
'''Inicializa la base de datos con tabla 'registros' '''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS registros
(estado text, fecha text)''')
except sqlite3.Error as error:
raise
finally:
con.close()
def save_state(state: str, date:str):
'''Guarda el estado en la tabla 'registros' de la base de datos'''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
cur.execute(f"INSERT INTO registros VALUES ('{state}','{date}')")
con.commit()
except sqlite3.Error as error:
print(error)
raise
finally:
con.close()
def get_last_state():
'''Obtiene el estado y la fecha del último registro de la tabla 'registros' de la base de datos'''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
cur.execute("SELECT * FROM registros ORDER BY fecha DESC LIMIT 1 ")
row = cur.fetchone()
if row != None:
[state, time] = row
if state == 'con_mascara':
state = True
else:
state= False
time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S.%f")
return state, time
except sqlite3.Error as error:
raise
finally:
con.close()
def get_session_data(start_date: datetime):
'''Obtiene los estados desde que se inició la sesión a ahora'''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
cur.execute(f"SELECT estado FROM registros WHERE fecha > '{start_date}'")
session_data = []
data = cur.fetchall()
data = [x[0] for x in data]
for st in data:
if st == 'con_mascara':
state = True
else:
state = False
session_data.append(state)
return session_data
except sqlite3.Error as error:
raise
finally:
con.close()
def get_month_data():
'''Obtiene los registros de los ultimos 30 dias'''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
today = datetime.now().replace(hour=0)
last_month = datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
cur.execute(f'''SELECT * FROM registros WHERE fecha > '{last_month}' AND fecha < '{today}'
ORDER BY date(fecha) DESC''')
data_month = cur.fetchall()
return data_month
except sqlite3.Error as error:
raise
finally:
con.close()
def get_week_data():
'''Obtiene los registros desde el lunes hasta el dia en curso'''
try:
con = sqlite3.connect('facemask.db')
cur = con.cursor()
today = datetime.now().replace(hour=0)
last_monday = today - timedelta(days = today.weekday())
cur.execute(f"SELECT * FROM registros WHERE fecha > '{last_monday}'")
data_week = cur.fetchall()
return data_week
except sqlite3.Error as error:
raise
finally:
con.close()
|
#!/usr/bin/env python
import os
from collections import deque
def find_mismatch(xmas: list, preamble: int = 25) -> tuple:
running_list = deque(xmas[:preamble])
mismatch = None
i = preamble
while mismatch is None:
next_number = xmas[i]
for number in running_list:
if next_number - number in running_list:
break
else:
mismatch = next_number
return mismatch, i + 1
running_list.popleft()
running_list.append(next_number)
i += 1
def find_weakness(xmas: list, number: int, min_length: int = 2) -> int:
running_list = deque(xmas[:min_length])
i = min_length
while not (s := sum(running_list)) == number:
if s < number:
running_list.append(xmas[i])
i += 1
else:
running_list.popleft()
return sum([min(running_list), max(running_list)])
if __name__ == '__main__':
cwd = os.path.dirname(__file__)
with open(f'{cwd}/puzzle_input.txt', 'r') as file:
contents = file.read()[:-1].split('\n')
xmas = [int(n) for n in contents]
mismatch, lineno = find_mismatch(xmas)
print(f"The first number that isn't a sum of two prior numbers is: {mismatch} on line {lineno}")
encryption_weakness = find_weakness(xmas, mismatch)
print(f"The encryption weakness is: {encryption_weakness}")
|
from django.test import TestCase, Client
from django.core.cache import cache
from apps.accounts.models import Account
class AccountTest(TestCase):
"""General Tests for Accounts Verification"""
def setUp(self):
"""Fixtures"""
self.username = 'new_user'
self.username2 = 'new_user2'
self.role = 'Administrator'
self.email = 'newuser@university-schedule.com'
self.email2 = 'newuser2@university-schedule.com'
self.passwd = '1234'
# create an object of class Client (), web browser emulator (authorized)
self.auth_client = Client()
# create an object of class Client (), web browser emulator (unauthorized)
self.unauth_client = Client()
# create ACCOUNT objects (authorized / unauthorized)
self.user_auth = Account.objects.create(
username=self.username2,
password=self.passwd,
email=self.email2,
is_active=True,
is_staff=True,
role=self.role,
)
self.user_unauth = Account.objects.create(username='anonimus')
# emulation of authorization for self.user_auth
self.auth_client.force_login(self.user_auth)
# clear cache before running tests
cache.clear()
def test_model_user(self):
"""test for checking the Account model"""
# requests for the initial number of ACCOUNT objects
total_count_obj = Account.objects.all().count()
total_count_obj2 = Account.objects.filter(username=self.username).count()
# checking the number of objects ACCOUNT
self.assertEqual(2, total_count_obj)
self.assertEqual(0, total_count_obj2)
# creating a new user and checking
new_user = Account.objects.create(username=self.username, role=self.role, email=self.email)
self.assertEqual(new_user.username, self.username)
self.assertEqual(new_user.role, self.role)
self.assertEqual(new_user.email, self.email)
# queries for the number of ACCOUNT objects after creating a new one and checking
now_total_count_obj = Account.objects.all().count()
now_total_count_obj2 = Account.objects.filter(username=self.username).filter(email=self.email).count()
self.assertEqual(now_total_count_obj, total_count_obj + 1)
self.assertEqual(now_total_count_obj2, total_count_obj2 + 1)
# queries from the database and checking a new user
get_user = Account.objects.get(username=self.username, email=self.email)
self.assertEqual(get_user.username, self.username)
self.assertEqual(get_user.role, self.role)
self.assertEqual(get_user.email, self.email)
def test_login_auth_user(self):
"""test for checking login by an authorized user"""
# check login from admin panel
response = self.auth_client.post('/secureadmin/', {'username': self.username2, 'password': self.passwd})
self.assertEqual(response.status_code, 200)
# # logout
# self.auth_client.logout()
# # checking the authorization of the registered user
# response2 = self.auth_client.login(email=self.email2, password=self.passwd)
# self.assertEqual(response2, True)
def test_login_unauth_user(self):
"""test for checking login by unauthorized user"""
response = self.unauth_client.login(username='anonimus', password=self.passwd)
self.assertEqual(response, False)
class PageTest(AccountTest):
"""Common tests for checking pages"""
def test_index_page(self):
"""home page request test"""
response = self.auth_client.get('/')
self.assertEqual(response.status_code, 200)
def test_admin_page(self):
"""admin page request test"""
response = self.auth_client.get('/secureadmin/')
self.assertEqual(response.status_code, 200)
def test_404_page(self):
"""404 error request test"""
response = self.auth_client.get('/stud/', follow=True)
self.assertEqual(response.status_code, 404)
|
from collections import deque
from typing import List
# DP - Bottom up
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
m, n = len(mat), len(mat[0])
for r in range(m):
for c in range(n):
if mat[r][c] > 0:
top = mat[r - 1][c] if r > 0 else float('inf')
left = mat[r][c - 1] if c > 0 else float('inf')
mat[r][c] = min(top, left) + 1
for r in range(m - 1, -1, -1):
for c in range(n - 1, -1, -1):
if mat[r][c] > 0:
bottom = mat[r + 1][c] if r < m - 1 else float('inf')
right = mat[r][c + 1] if c < n - 1 else float('inf')
mat[r][c] = min(mat[r][c], bottom + 1, right + 1)
return mat
# BFS
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
m, n = len(mat), len(mat[0])
DIR = [0, 1, 0, -1, 0]
q = deque([])
for r in range(m):
for c in range(n):
if mat[r][c] == 0:
q.append((r, c))
else:
mat[r][c] = -1 # Marked as not processed yet!
while q:
r, c = q.popleft()
for i in range(4):
nr, nc = r + DIR[i], c + DIR[i + 1]
if nr < 0 or nr == m or nc < 0 or nc == n or mat[nr][nc] != -1: continue
mat[nr][nc] = mat[r][c] + 1
q.append((nr, nc))
return mat
|
import json
def generate_dict_from_json(file_path: str) -> dict:
"""
Returns a dictionary filled with JSON data loaded from the file provided.
:param file_path: Path to file containing JSON to load
:return: Dictionary with JSON retrieved from file
"""
with open(file_path) as file:
data = json.load(file)
return data
def load_file_contents(file_path: str) -> str:
"""
Returns the contents of a file
:param file_path: Path to the file to load
:return: String of file's contents
"""
with open(file_path) as file:
contents = file.read()
return contents
|
# coding: utf-8
from oslo_config import cfg
CONF = cfg.CONF
def base(request):
namespace = request.resolver_match.namespace
view_name = request.resolver_match.view_name
livereload_js = 'http://{0}:35729/livereload.js'.format(
CONF.web.node_public_host)
chat_socketio_js = 'http://{0}:{1}/socket.io/socket.io.js'.format(
CONF.web.node_public_host, CONF.web.node_public_port)
return {
'debug': CONF.web.debug,
'namespace': namespace,
'view_name': view_name,
'livereload_js': livereload_js,
'chat_socketio_js': chat_socketio_js,
'web_hostname': CONF.web.node_public_host,
'chat_port': CONF.web.node_public_port,
}
|
from pathlib import Path
from typing import Dict
from typing import List
import pandas as pd
import drem
def _convert_dataframe_to_dict_of_lists(
vo_cibse_link: pd.DataFrame, category_col: str, uses_col: str,
) -> Dict[str, str]:
"""Convert pandas DataFrame into a Dictionary of Lists.
Args:
vo_cibse_link (pd.DataFrame): Data to be converted
category_col (str): Column name of CIBSE 'category' column
uses_col (str): Column name of Valuation Office 'uses' column
Returns:
Dict[str, str]: Converted output
Example output:
{"General Retail (TM:46)": ["KIOSK CLOTHES SHOP", ...]}
"""
return {
idx: group[uses_col].drop_duplicates().tolist()
for idx, group in vo_cibse_link.groupby(category_col)
}
def _save_dict_to_text_file(vo_by_category: Dict[str, str], savedir: Path) -> None:
for key in vo_by_category.keys():
filepath: Path = savedir / "".join([key, ".txt"])
data: List[str] = ["".join([str(item), "\n"]) for item in vo_by_category[key]]
with open(filepath, "a") as text_file:
text_file.writelines(data)
def copy_vo_cibse_link_from_excel_to_text_files(
filepath: Path, savedir: Path, category_col: str, uses_col: str,
) -> None:
"""Copy Valuation-Office-to-CIBSE-Benchmark links from Excel to yamls.
Where each yaml represents a different CIBSE category... Can add new categories to
these yamls in the future as they are added to the Valuation Office data...
Args:
filepath (Path): Filepath to Excel file containing labels
savedir (Path): Path to directory where data will be saved
category_col (str): Column name of CIBSE 'category' column
uses_col (str): Column name of Valuation Office 'uses' column
"""
vo_cibse_link: pd.DataFrame = pd.read_excel(filepath, engine="openpyxl")
vo_cibse_link.loc[:, category_col] = vo_cibse_link[category_col].str.replace(
"/", " or ", regex=True,
)
vo_by_category: Dict[str, str] = _convert_dataframe_to_dict_of_lists(
vo_cibse_link, category_col, uses_col,
)
_save_dict_to_text_file(vo_by_category, savedir)
if __name__ == "__main__":
copy_vo_cibse_link_from_excel_to_text_files(
drem.filepaths.RAW_DIR / "cibse-vo.xlsx",
drem.filepaths.LINKS_DIR,
category_col="Reference Benchmark Used",
uses_col="Property Use",
)
copy_vo_cibse_link_from_excel_to_text_files(
drem.filepaths.ROUGHWORK_DIR / "vo-rows-not-caught-by-cibse.xlsx",
drem.filepaths.LINKS_DIR,
category_col="CATEGORY",
uses_col="Uses",
)
|
#! /usr/bin/env python
import numpy as np
import numpy.linalg as LA
# test passed
def allocate_time(path, max_vel, max_acc):
pathlength = path.shape[0]
distance = path[1:pathlength, :] - path[0:pathlength-1, :]
distance = np.transpose(LA.norm(distance, axis=1))
time_segment = np.multiply(3.0 * (distance * 2 / max_vel), (1 + np.exp(-2*distance/max_vel)*6.5*max_vel/max_acc))
return time_segment
|
from arm.logicnode.arm_nodes import *
class SetTraitPausedNode(ArmLogicTreeNode):
"""Sets the paused state of the given trait."""
bl_idname = 'LNSetTraitPausedNode'
bl_label = 'Set Trait Paused'
arm_version = 1
def init(self, context):
super(SetTraitPausedNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('NodeSocketShader', 'Trait')
self.add_input('NodeSocketBool', 'Paused')
self.add_output('ArmNodeSocketAction', 'Out')
|
import os
import sys
from setuptools import setup, find_packages
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
setup_args = dict(
name="dlgr.demos",
version="6.5.0",
description="Demonstration experiments for Dallinger",
url="http://github.com/Dallinger/Dallinger",
maintainer="Jordan Suchow",
maintainer_email="suchow@berkeley.edu",
license="MIT",
keywords=["science", "cultural evolution", "experiments", "psychology"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=find_packages("."),
package_dir={"": "."},
namespace_packages=["dlgr"],
include_package_data=True,
zip_safe=False,
install_requires=["setuptools"],
entry_points={
"dallinger.experiments": [
"Bartlett1932 = dlgr.demos.bartlett1932.experiment:Bartlett1932",
"TwentyFortyEight = dlgr.demos.twentyfortyeight.experiment:TwentyFortyEight",
"CoordinationChatroom = dlgr.demos.chatroom.experiment:CoordinationChatroom",
"ConcentrationGame = dlgr.demos.concentration.experiment:ConcentrationGame",
"FunctionLearning = dlgr.demos.function_learning.experiment:FunctionLearning",
"IteratedDrawing = dlgr.demos.iterated_drawing.experiment:IteratedDrawing",
"MCMCP = dlgr.demos.mcmcp.experiment:MCMCP",
"RogersExperiment = dlgr.demos.rogers.experiment:RogersExperiment",
"SheepMarket = dlgr.demos.sheep_market.experiment:SheepMarket",
"SnakeGame = dlgr.demos.snake.experiment:SnakeGame",
"VoxPopuli = dlgr.demos.vox_populi.experiment:VoxPopuli",
]
},
)
setup(**setup_args)
|
from nonebot import on_command
from nonebot.adapters.cqhttp import Message
from io import BytesIO
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.permission import GROUP, PRIVATE
from nonebot.adapters.cqhttp.event import Event
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import MessageSegment
import httpx
from ....nonebot_plugin_ff14 import config
PLUGIN_COMMAND = "/成就"
PLUGIN_INFO = "查询成就"
HELP_INFO = """/成就 ID
"""
achievement = on_command("achievement", permission=GROUP | PRIVATE, priority=5, aliases={"成就"})
@achievement.handle()
async def _(bot: Bot, event: Event, state: T_State):
args = str(event.get_message()).strip()
if args:
state["id"] = args
@achievement.got("id", prompt="想要查询的道具ID是多少?")
async def _get(bot: Bot, event: Event, state: T_State):
item_id: str = state['id']
if not item_id.isdecimal():
if item_id == '取消':
await achievement.finish()
await achievement.reject("请输入成就ID,搜索请使用命令/搜索,取消请输入“取消”")
item_info = await get_item_info(item_id)
await achievement.finish(item_info)
async def get_item_info(_id):
async with httpx.AsyncClient() as client:
try:
response = await client.get("{}/achievement/{}".format(config.kafeapi, _id))
except httpx.ConnectTimeout:
return "请求超时,请检查服务器网络!"
if not response.status_code == 200:
return "查询失败,请检查ID是否正确!"
else:
try:
_json = response.json()
except Exception as e:
return "请求失败!{}".format(e)
name = _json['Name']
description = _json['Description']
version = _json['GamePatch']['Version']
icon_url = _json['Icon']
text = "名称:{}\n说明:{}\n版本:{}".format(name, description, version)
try:
res: httpx.Response = await client.get("{}{}".format(config.kafeapi, icon_url))
except Exception as e:
message = Message.template(MessageSegment.text(text))
else:
message = Message.template(MessageSegment.image(BytesIO(res.content)) + MessageSegment.text(text))
return message
|
import logging
import sys
import argparse
# pylint: disable=import-error
from .upstream.server.entrypoint import configure_parser as server_parser
from .upstream.util.cli import *
from .upstream.__init__ import __version__
from .upstream.util.log import *
# # setting up custom trace debug level
LOGGER = logging.getLogger(__name__)
logging.addLevelName(logging.DEBUG - 5, 'TRACE')
addLoggingLevel('TRACE', logging.DEBUG - 5)
def main():
log_levels = ['TRACE', 'DEBUG', 'INFO']
parser = argparse.ArgumentParser(
description='server/client utility for usage as a mock upstream')
parser.add_argument(
"-l",
"--log",
dest="logLevel",
type=MakeCaseAgnostic(log_levels),
choices=log_levels,
help="Set the logging level (default Empty)")
sub_parsers = parser.add_subparsers(
title='Commands',
description='Available Upstream Subcommands',
help='Choose a upstream subcommand ')
# adding subcommand parsers
server_parser(sub_parsers)
args = vars(parser.parse_args())
if args:
logLevel = args['logLevel']
if logLevel:
logging.basicConfig(
format='%(asctime)s [%(threadName)-12.12s] [%(levelname)-8.8s] %(message)s',
stream=sys.stdout,
level=getattr(logging, logLevel)
)
subcommand = args.pop('func', '')
if subcommand:
subcommand(args)
else:
parser.print_usage()
LOGGER.debug(
f"main() - "
f"version={__version__} ")
if __name__ == "__main__":
main()
|
import requests
import postageapp
from ostruct import OpenStruct
import json
from json import JSONEncoder
class RequestEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
class Request:
def __init__(self, method = None):
self._method = method or 'send_message'
self._arguments = OpenStruct()
@property
def method(self):
return self._method
@method.setter
def method(self, _method):
self._method = _method
return self._method
@property
def arguments(self):
return self._arguments
@arguments.setter
def arguments(self, _arguments):
self._arguments = _arguments
return self._arguments
def api_url(self):
if (postageapp.config.port):
return "%s://%s:%d/v.1.0/%s.json" % (
postageapp.config.proto,
postageapp.config.host,
postageapp.config.port,
self.method
)
else:
return "%s://%s/v.1.0/%s.json" % (
postageapp.config.proto,
postageapp.config.host,
self.method
)
def headers(self):
return {
'User-Agent': 'PostageApp Python %s (%s)' % (postageapp.__version__, postageapp.config.framework),
'Content-Type': 'application/json'
}
def send(self):
body = {
'api_key': postageapp.config.api_key,
'arguments': self._arguments
}
r = requests.post(self.api_url(), data=RequestEncoder().encode(body), headers=self.headers())
return r.json()
|
# coding=utf-8
"""Annotate python syntax trees with formatting from the source file."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import ast
import contextlib
import functools
import itertools
import numbers
import six
from six.moves import zip
import sys
import token
from pasta.base import ast_constants
from pasta.base import ast_utils
from pasta.base import formatting as fmt
from pasta.base import token_generator
# ==============================================================================
# == Helper functions for decorating nodes with prefix + suffix ==
# ==============================================================================
def _gen_wrapper(f, scope=True, prefix=True, suffix=True, max_suffix_lines=None,
semicolon=False, comment=False, statement=False):
@contextlib.wraps(f)
def wrapped(self, node, *args, **kwargs):
with (self.scope(node, trailing_comma=False) if scope else _noop_context()):
if prefix:
self.prefix(node, default=self._indent if statement else '')
f(self, node, *args, **kwargs)
if suffix:
self.suffix(node, max_lines=max_suffix_lines, semicolon=semicolon,
comment=comment, default='\n' if statement else '')
return wrapped
@contextlib.contextmanager
def _noop_context():
yield
def expression(f):
"""Decorates a function where the node is an expression."""
return _gen_wrapper(f, max_suffix_lines=0)
def fstring_expression(f):
"""Decorates a function where the node is a FormattedValue in an fstring."""
return _gen_wrapper(f, scope=False)
def space_around(f):
"""Decorates a function where the node has whitespace prefix and suffix."""
return _gen_wrapper(f, scope=False)
def space_left(f):
"""Decorates a function where the node has whitespace prefix."""
return _gen_wrapper(f, scope=False, suffix=False)
def statement(f):
"""Decorates a function where the node is a statement."""
return _gen_wrapper(f, scope=False, max_suffix_lines=1, semicolon=True,
comment=True, statement=True)
def module(f):
"""Special decorator for the module node."""
return _gen_wrapper(f, scope=False, comment=True)
def block_statement(f):
"""Decorates a function where the node is a statement with children."""
@contextlib.wraps(f)
def wrapped(self, node, *args, **kwargs):
self.prefix(node, default=self._indent)
f(self, node, *args, **kwargs)
if hasattr(self, 'block_suffix'):
last_child = ast_utils.get_last_child(node)
# Workaround for ast.Module which does not have a lineno
if last_child and last_child.lineno != getattr(node, 'lineno', 0):
indent = (fmt.get(last_child, 'prefix') or '\n').splitlines()[-1]
self.block_suffix(node, indent)
else:
self.suffix(node, comment=True)
return wrapped
# ==============================================================================
# == NodeVisitors for annotating an AST ==
# ==============================================================================
class BaseVisitor(ast.NodeVisitor):
"""Walks a syntax tree in the order it appears in code.
This class has a dual-purpose. It is implemented (in this file) for annotating
an AST with formatting information needed to reconstruct the source code, but
it also is implemented in pasta.base.codegen to reconstruct the source code.
Each visit method in this class specifies the order in which both child nodes
and syntax tokens appear, plus where to account for whitespace, commas,
parentheses, etc.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self._stack = []
self._indent = ''
self._indent_diff = ''
self._default_indent_diff = ' '
def visit(self, node):
self._stack.append(node)
super(BaseVisitor, self).visit(node)
assert node is self._stack.pop()
def prefix(self, node, default=''):
"""Account for some amount of whitespace as the prefix to a node."""
self.attr(node, 'prefix', [lambda: self.ws(comment=True)], default=default)
def suffix(self, node, max_lines=None, semicolon=False, comment=False,
default=''):
"""Account for some amount of whitespace as the suffix to a node."""
def _ws():
return self.ws(max_lines=max_lines, semicolon=semicolon, comment=comment)
self.attr(node, 'suffix', [_ws], default=default)
def indented(self, node, children_attr):
children = getattr(node, children_attr)
prev_indent = self._indent
prev_indent_diff = self._indent_diff
new_diff = fmt.get(children[0], 'indent_diff')
if new_diff is None:
new_diff = self._default_indent_diff
self._indent_diff = new_diff
self._indent = prev_indent + self._indent_diff
for child in children:
yield child
self.attr(node, 'block_suffix_%s' % children_attr, [])
self._indent = prev_indent
self._indent_diff = prev_indent_diff
def set_default_indent_diff(self, indent):
self._default_indent_diff = indent
@contextlib.contextmanager
def scope(self, node, attr=None, trailing_comma=False, default_parens=False):
"""Context manager to handle a parenthesized scope.
Arguments:
node: (ast.AST) Node to store the scope prefix and suffix on.
attr: (string, optional) Attribute of the node contained in the scope, if
any. For example, as `None`, the scope would wrap the entire node, but
as 'bases', the scope might wrap only the bases of a class.
trailing_comma: (boolean) If True, allow a trailing comma at the end.
default_parens: (boolean) If True and no formatting information is
present, the scope would be assumed to be parenthesized.
"""
if attr:
self.attr(node, attr + '_prefix', [],
default='(' if default_parens else '')
yield
if attr:
self.attr(node, attr + '_suffix', [],
default=')' if default_parens else '')
def token(self, token_val):
"""Account for a specific token."""
def attr(self, node, attr_name, attr_vals, deps=None, default=None):
"""Handles an attribute on the given node."""
def ws(self, max_lines=None, semicolon=False, comment=True):
"""Account for some amount of whitespace.
Arguments:
max_lines: (int) Maximum number of newlines to consider.
semicolon: (boolean) If True, parse up to the next semicolon (if present).
comment: (boolean) If True, look for a trailing comment even when not in
a parenthesized scope.
"""
return ''
def dots(self, num_dots):
"""Account for a number of dots."""
return '.' * num_dots
def ws_oneline(self):
"""Account for up to one line of whitespace."""
return self.ws(max_lines=1)
def optional_token(self, node, attr_name, token_val, default=False):
"""Account for a suffix that may or may not occur."""
def one_of_symbols(self, *symbols):
"""Account for one of the given symbols."""
return symbols[0]
# ============================================================================
# == BLOCK STATEMENTS: Statements that contain a list of statements ==
# ============================================================================
# Keeps the entire suffix, so @block_statement is not useful here.
@module
def visit_Module(self, node):
try:
self.attr(
node, 'bom',
[lambda: self.tokens.eat_tokens(lambda t: t.type == token.ERRORTOKEN)],
default='')
except:
pass
self.generic_visit(node)
@block_statement
def visit_If(self, node):
tok = 'elif' if fmt.get(node, 'is_elif') else 'if'
self.attr(node, 'open_if', [tok, self.ws], default=tok + ' ')
self.visit(node.test)
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
if node.orelse:
if (len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If) and
self.check_is_elif(node.orelse[0])):
fmt.set(node.orelse[0], 'is_elif', True)
self.visit(node.orelse[0])
else:
self.attr(node, 'elseprefix', [self.ws])
self.token('else')
self.attr(node, 'open_else', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'orelse'):
self.visit(stmt)
@abc.abstractmethod
def check_is_elif(self, node):
"""Return True if the node continues a previous `if` statement as `elif`.
In python 2.x, `elif` statements get parsed as If nodes. E.g, the following
two syntax forms are indistinguishable in the ast in python 2.
if a:
do_something()
elif b:
do_something_else()
if a:
do_something()
else:
if b:
do_something_else()
This method should return True for the 'if b' node if it has the first form.
"""
@block_statement
def visit_While(self, node):
self.attr(node, 'while_keyword', ['while', self.ws], default='while ')
self.visit(node.test)
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
if node.orelse:
self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline],
default=self._indent + 'else:\n')
for stmt in self.indented(node, 'orelse'):
self.visit(stmt)
@block_statement
def visit_For(self, node, is_async=False):
if is_async:
self.attr(node, 'for_keyword', ['async', self.ws, 'for', self.ws],
default='async for ')
else:
self.attr(node, 'for_keyword', ['for', self.ws], default='for ')
self.visit(node.target)
self.attr(node, 'for_in', [self.ws, 'in', self.ws], default=' in ')
self.visit(node.iter)
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
if node.orelse:
self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline],
default=self._indent + 'else:\n')
for stmt in self.indented(node, 'orelse'):
self.visit(stmt)
def visit_AsyncFor(self, node):
return self.visit_For(node, is_async=True)
@block_statement
def visit_With(self, node, is_async=False):
if hasattr(node, 'items'):
return self.visit_With_3(node, is_async)
if not getattr(node, 'is_continued', False):
self.attr(node, 'with', ['with', self.ws], default='with ')
self.visit(node.context_expr)
if node.optional_vars:
self.attr(node, 'with_as', [self.ws, 'as', self.ws], default=' as ')
self.visit(node.optional_vars)
if len(node.body) == 1 and self.check_is_continued_with(node.body[0]):
node.body[0].is_continued = True
self.attr(node, 'with_comma', [self.ws, ',', self.ws], default=', ')
else:
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
def visit_AsyncWith(self, node):
return self.visit_With(node, is_async=True)
@abc.abstractmethod
def check_is_continued_try(self, node):
pass
@abc.abstractmethod
def check_is_continued_with(self, node):
"""Return True if the node continues a previous `with` statement.
In python 2.x, `with` statements with many context expressions get parsed as
a tree of With nodes. E.g, the following two syntax forms are
indistinguishable in the ast in python 2.
with a, b, c:
do_something()
with a:
with b:
with c:
do_something()
This method should return True for the `with b` and `with c` nodes.
"""
def visit_With_3(self, node, is_async=False):
if is_async:
self.attr(node, 'with', ['async', self.ws, 'with', self.ws],
default='async with ')
else:
self.attr(node, 'with', ['with', self.ws], default='with ')
for i, withitem in enumerate(node.items):
self.visit(withitem)
if i != len(node.items) - 1:
self.token(',')
self.attr(node, 'with_body_open', [':', self.ws_oneline], default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
@space_around
def visit_withitem(self, node):
self.visit(node.context_expr)
if node.optional_vars:
self.attr(node, 'as', [self.ws, 'as', self.ws], default=' as ')
self.visit(node.optional_vars)
@block_statement
def visit_ClassDef(self, node):
for i, decorator in enumerate(node.decorator_list):
self.attr(node, 'decorator_prefix_%d' % i, [self.ws, '@'], default='@')
self.visit(decorator)
self.attr(node, 'decorator_suffix_%d' % i, [self.ws],
default='\n' + self._indent)
self.attr(node, 'class_def', ['class', self.ws, node.name, self.ws],
default='class %s' % node.name, deps=('name',))
class_args = getattr(node, 'bases', []) + getattr(node, 'keywords', [])
with self.scope(node, 'bases', trailing_comma=bool(class_args),
default_parens=True):
for i, base in enumerate(node.bases):
self.visit(base)
self.attr(node, 'base_suffix_%d' % i, [self.ws])
if base != class_args[-1]:
self.attr(node, 'base_sep_%d' % i, [',', self.ws], default=', ')
if hasattr(node, 'keywords'):
for i, keyword in enumerate(node.keywords):
self.visit(keyword)
self.attr(node, 'keyword_suffix_%d' % i, [self.ws])
if keyword != node.keywords[-1]:
self.attr(node, 'keyword_sep_%d' % i, [',', self.ws], default=', ')
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
@block_statement
def visit_FunctionDef(self, node, is_async=False):
for i, decorator in enumerate(node.decorator_list):
self.attr(node, 'decorator_symbol_%d' % i, [self.ws, '@', self.ws],
default='@')
self.visit(decorator)
self.attr(node, 'decorator_suffix_%d' % i, [self.ws_oneline],
default='\n' + self._indent)
if is_async:
self.attr(node, 'function_def',
[self.ws, 'async', self.ws, 'def', self.ws, node.name, self.ws],
deps=('name',), default='async def %s' % node.name)
else:
self.attr(node, 'function_def',
[self.ws, 'def', self.ws, node.name, self.ws],
deps=('name',), default='def %s' % node.name)
# In Python 3, there can be extra args in kwonlyargs
kwonlyargs = getattr(node.args, 'kwonlyargs', [])
args_count = sum((len(node.args.args + kwonlyargs),
1 if node.args.vararg else 0,
1 if node.args.kwarg else 0))
with self.scope(node, 'args', trailing_comma=args_count > 0,
default_parens=True):
self.visit(node.args)
if getattr(node, 'returns', None):
self.attr(node, 'returns_prefix', [self.ws, '->', self.ws],
deps=('returns',), default=' -> ')
self.visit(node.returns)
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
def visit_AsyncFunctionDef(self, node):
return self.visit_FunctionDef(node, is_async=True)
@block_statement
def visit_TryFinally(self, node):
# Try with except and finally is a TryFinally with the first statement as a
# TryExcept in Python2
self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline],
default='try:\n')
# TODO(soupytwist): Find a cleaner solution for differentiating this.
if len(node.body) == 1 and self.check_is_continued_try(node.body[0]):
node.body[0].is_continued = True
self.visit(node.body[0])
else:
for stmt in self.indented(node, 'body'):
self.visit(stmt)
self.attr(node, 'open_finally',
[self.ws, 'finally', self.ws, ':', self.ws_oneline],
default='finally:\n')
for stmt in self.indented(node, 'finalbody'):
self.visit(stmt)
@block_statement
def visit_TryExcept(self, node):
if not getattr(node, 'is_continued', False):
self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline],
default='try:\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
for handler in node.handlers:
self.visit(handler)
if node.orelse:
self.attr(node, 'open_else',
[self.ws, 'else', self.ws, ':', self.ws_oneline],
default='else:\n')
for stmt in self.indented(node, 'orelse'):
self.visit(stmt)
@block_statement
def visit_Try(self, node):
# Python 3
self.attr(node, 'open_try', [self.ws, 'try', self.ws, ':', self.ws_oneline],
default='try:\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
for handler in node.handlers:
self.visit(handler)
if node.orelse:
self.attr(node, 'open_else',
[self.ws, 'else', self.ws, ':', self.ws_oneline],
default='else:\n')
for stmt in self.indented(node, 'orelse'):
self.visit(stmt)
if node.finalbody:
self.attr(node, 'open_finally',
[self.ws, 'finally', self.ws, ':', self.ws_oneline],
default='finally:\n')
for stmt in self.indented(node, 'finalbody'):
self.visit(stmt)
@block_statement
def visit_ExceptHandler(self, node):
self.token('except')
if node.type:
self.visit(node.type)
if node.type and node.name:
self.attr(node, 'as', [self.ws, self.one_of_symbols("as", ","), self.ws],
default=' as ')
if node.name:
if isinstance(node.name, ast.AST):
self.visit(node.name)
else:
self.token(node.name)
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
default=':\n')
for stmt in self.indented(node, 'body'):
self.visit(stmt)
@statement
def visit_Raise(self, node):
if hasattr(node, 'cause'):
return self.visit_Raise_3(node)
self.token('raise')
if node.type:
self.attr(node, 'type_prefix', [self.ws], default=' ')
self.visit(node.type)
if node.inst:
self.attr(node, 'inst_prefix', [self.ws, ',', self.ws], default=', ')
self.visit(node.inst)
if node.tback:
self.attr(node, 'tback_prefix', [self.ws, ',', self.ws], default=', ')
self.visit(node.tback)
def visit_Raise_3(self, node):
if node.exc:
self.attr(node, 'open_raise', ['raise', self.ws], default='raise ')
self.visit(node.exc)
if node.cause:
self.attr(node, 'cause_prefix', [self.ws, 'from', self.ws],
default=' from ')
self.visit(node.cause)
else:
self.token('raise')
# ============================================================================
# == STATEMENTS: Instructions without a return value ==
# ============================================================================
@statement
def visit_Assert(self, node):
self.attr(node, 'assert_open', ['assert', self.ws], default='assert ')
self.visit(node.test)
if node.msg:
self.attr(node, 'msg_prefix', [',', self.ws], default=', ')
self.visit(node.msg)
@statement
def visit_Assign(self, node):
for i, target in enumerate(node.targets):
self.visit(target)
self.attr(node, 'equal_%d' % i, [self.ws, '=', self.ws], default=' = ')
self.visit(node.value)
@statement
def visit_AugAssign(self, node):
self.visit(node.target)
op_token = '%s=' % ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
self.attr(node, 'operator', [self.ws, op_token, self.ws],
default=' %s ' % op_token)
self.visit(node.value)
@statement
def visit_AnnAssign(self, node):
# TODO: Check default formatting for different values of "simple"
self.visit(node.target)
self.attr(node, 'colon', [self.ws, ':', self.ws], default=': ')
self.visit(node.annotation)
if node.value:
self.attr(node, 'equal', [self.ws, '=', self.ws], default=' = ')
self.visit(node.value)
@expression
def visit_Await(self, node):
self.attr(node, 'await', ['await', self.ws], default='await ')
self.visit(node.value)
@statement
def visit_Break(self, node):
self.token('break')
@statement
def visit_Continue(self, node):
self.token('continue')
@statement
def visit_Delete(self, node):
self.attr(node, 'del', ['del', self.ws], default='del ')
for i, target in enumerate(node.targets):
self.visit(target)
if target is not node.targets[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
@statement
def visit_Exec(self, node):
# If no formatting info is present, will use parenthesized style
self.attr(node, 'exec', ['exec', self.ws], default='exec')
with self.scope(node, 'body', trailing_comma=False, default_parens=True):
self.visit(node.body)
if node.globals:
self.attr(node, 'in_globals',
[self.ws, self.one_of_symbols('in', ','), self.ws],
default=', ')
self.visit(node.globals)
if node.locals:
self.attr(node, 'in_locals', [self.ws, ',', self.ws], default=', ')
self.visit(node.locals)
@statement
def visit_Expr(self, node):
self.visit(node.value)
@statement
def visit_Global(self, node):
self.token('global')
identifiers = []
for ident in node.names:
if ident != node.names[0]:
identifiers.extend([self.ws, ','])
identifiers.extend([self.ws, ident])
self.attr(node, 'names', identifiers)
@statement
def visit_Import(self, node):
self.token('import')
for i, alias in enumerate(node.names):
self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ')
self.visit(alias)
if alias != node.names[-1]:
self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',')
@statement
def visit_ImportFrom(self, node):
self.token('from')
self.attr(node, 'module_prefix', [self.ws], default=' ')
module_pattern = []
if node.level > 0:
module_pattern.extend([self.dots(node.level), self.ws])
if node.module:
parts = node.module.split('.')
for part in parts[:-1]:
module_pattern += [self.ws, part, self.ws, '.']
module_pattern += [self.ws, parts[-1]]
self.attr(node, 'module', module_pattern,
deps=('level', 'module'),
default='.' * node.level + (node.module or ''))
self.attr(node, 'module_suffix', [self.ws], default=' ')
self.token('import')
with self.scope(node, 'names', trailing_comma=True):
for i, alias in enumerate(node.names):
self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ')
self.visit(alias)
if alias is not node.names[-1]:
self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',')
@expression
def visit_NamedExpr(self, node):
self.visit(node.target)
self.attr(node, 'equal', [self.ws, ':=', self.ws], default=' := ')
self.visit(node.value)
@statement
def visit_Nonlocal(self, node):
self.token('nonlocal')
identifiers = []
for ident in node.names:
if ident != node.names[0]:
identifiers.extend([self.ws, ','])
identifiers.extend([self.ws, ident])
self.attr(node, 'names', identifiers)
@statement
def visit_Pass(self, node):
self.token('pass')
@statement
def visit_Print(self, node):
self.attr(node, 'print_open', ['print', self.ws], default='print ')
if node.dest:
self.attr(node, 'redirection', ['>>', self.ws], default='>>')
self.visit(node.dest)
if node.values:
self.attr(node, 'values_prefix', [self.ws, ',', self.ws], default=', ')
elif not node.nl:
self.attr(node, 'trailing_comma', [self.ws, ','], default=',')
for i, value in enumerate(node.values):
self.visit(value)
if value is not node.values[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
elif not node.nl:
self.attr(node, 'trailing_comma', [self.ws, ','], default=',')
@statement
def visit_Return(self, node):
self.token('return')
if node.value:
self.attr(node, 'return_value_prefix', [self.ws], default=' ')
self.visit(node.value)
@expression
def visit_Yield(self, node):
self.token('yield')
if node.value:
self.attr(node, 'yield_value_prefix', [self.ws], default=' ')
self.visit(node.value)
@expression
def visit_YieldFrom(self, node):
self.attr(node, 'yield_from', ['yield', self.ws, 'from', self.ws],
default='yield from ')
self.visit(node.value)
# ============================================================================
# == EXPRESSIONS: Anything that evaluates and can be in parens ==
# ============================================================================
@expression
def visit_Attribute(self, node):
self.visit(node.value)
self.attr(node, 'dot', [self.ws, '.', self.ws], default='.')
self.token(node.attr)
@expression
def visit_BinOp(self, node):
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
self.visit(node.left)
self.attr(node, 'op', [self.ws, op_symbol, self.ws],
default=' %s ' % op_symbol, deps=('op',))
self.visit(node.right)
@expression
def visit_BoolOp(self, node):
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
for i, value in enumerate(node.values):
self.visit(value)
if value is not node.values[-1]:
self.attr(node, 'op_%d' % i, [self.ws, op_symbol, self.ws],
default=' %s ' % op_symbol, deps=('op',))
@expression
def visit_Call(self, node):
self.visit(node.func)
with self.scope(node, 'arguments', default_parens=True):
# python <3.5: starargs and kwargs are in separate fields
# python 3.5+: starargs args included as a Starred nodes in the arguments
# and kwargs are included as keywords with no argument name.
if sys.version_info[:2] >= (3, 5):
any_args = self.visit_Call_arguments35(node)
else:
any_args = self.visit_Call_arguments(node)
if any_args:
self.optional_token(node, 'trailing_comma', ',')
def visit_Call_arguments(self, node):
def arg_location(tup):
arg = tup[1]
if isinstance(arg, ast.keyword):
arg = arg.value
return (getattr(arg, "lineno", 0), getattr(arg, "col_offset", 0))
if node.starargs:
sorted_keywords = sorted(
[(None, kw) for kw in node.keywords] + [('*', node.starargs)],
key=arg_location)
else:
sorted_keywords = [(None, kw) for kw in node.keywords]
all_args = [(None, n) for n in node.args] + sorted_keywords
if node.kwargs:
all_args.append(('**', node.kwargs))
for i, (prefix, arg) in enumerate(all_args):
if prefix is not None:
self.attr(node, '%s_prefix' % prefix, [self.ws, prefix], default=prefix)
self.visit(arg)
if arg is not all_args[-1][1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
return bool(all_args)
def visit_Call_arguments35(self, node):
def arg_compare(a1, a2):
"""Old-style comparator for sorting args."""
def is_arg(a):
return not isinstance(a, (ast.keyword, ast.Starred))
# No kwarg can come before a regular arg (but Starred can be wherever)
if is_arg(a1) and isinstance(a2, ast.keyword):
return -1
elif is_arg(a2) and isinstance(a1, ast.keyword):
return 1
# If no lineno or col_offset on one of the args, they compare as equal
# (since sorting is stable, this should leave them mostly where they
# were in the initial list).
def get_pos(a):
if isinstance(a, ast.keyword):
a = a.value
return (getattr(a, 'lineno', None), getattr(a, 'col_offset', None))
pos1 = get_pos(a1)
pos2 = get_pos(a2)
if None in pos1 or None in pos2:
return 0
# If both have lineno/col_offset set, use that to sort them
return -1 if pos1 < pos2 else 0 if pos1 == pos2 else 1
# Note that this always sorts keywords identically to just sorting by
# lineno/col_offset, except in cases where that ordering would have been
# a syntax error (named arg before unnamed arg).
all_args = sorted(node.args + node.keywords,
key=functools.cmp_to_key(arg_compare))
for i, arg in enumerate(all_args):
self.visit(arg)
if arg is not all_args[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
return bool(all_args)
def visit_Starred(self, node):
self.attr(node, 'star', ['*', self.ws], default='*')
self.visit(node.value)
@expression
def visit_Compare(self, node):
self.visit(node.left)
for i, (op, comparator) in enumerate(zip(node.ops, node.comparators)):
self.attr(node, 'op_prefix_%d' % i, [self.ws], default=' ')
self.visit(op)
self.attr(node, 'op_suffix_%d' % i, [self.ws], default=' ')
self.visit(comparator)
@expression
def visit_Constant(self, node):
if hasattr(node, 'kind') and node.kind:
self.attr(node, 'content', [self.tokens.str],
default='%s"%s"' % (node.kind, node.value), deps=('value',))
elif isinstance(node.value, bool):
self.attr(node, 'content', [str(node.value)], default=str(node.value),
deps=('value',))
elif node.value is Ellipsis:
self.token('...')
elif isinstance(node.value, numbers.Number):
token_number_type = token_generator.TOKENS.NUMBER
self.attr(node, 'content',
[lambda: self.tokens.next_of_type(token_number_type).src],
deps=('value',), default=str(node.value))
elif isinstance(node.value, six.text_type) or isinstance(node.value, bytes):
self.attr(node, 'content', [self.tokens.str], deps=('value',),
default=node.value)
else:
self.token(str(node.value))
@expression
def visit_Dict(self, node):
self.token('{')
for i, key, value in zip(range(len(node.keys)), node.keys, node.values):
if key is None:
# Handle Python 3.5+ dict unpacking syntax (PEP-448)
self.attr(node, 'starstar_%d' % i, [self.ws, '**'], default='**')
else:
self.visit(key)
self.attr(node, 'key_val_sep_%d' % i, [self.ws, ':', self.ws],
default=': ')
self.visit(value)
if value is not node.values[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True)
self.attr(node, 'close_prefix', [self.ws, '}'], default='}')
@expression
def visit_DictComp(self, node):
self.attr(node, 'open_dict', ['{', self.ws], default='{')
self.visit(node.key)
self.attr(node, 'key_val_sep', [self.ws, ':', self.ws], default=': ')
self.visit(node.value)
for comp in node.generators:
self.visit(comp)
self.attr(node, 'close_dict', [self.ws, '}'], default='}')
@expression
def visit_GeneratorExp(self, node):
self._comp_exp(node)
@expression
def visit_IfExp(self, node):
self.visit(node.body)
self.attr(node, 'if', [self.ws, 'if', self.ws], default=' if ')
self.visit(node.test)
self.attr(node, 'else', [self.ws, 'else', self.ws], default=' else ')
self.visit(node.orelse)
@expression
def visit_Lambda(self, node):
self.attr(node, 'lambda_def', ['lambda', self.ws], default='lambda ')
self.visit(node.args)
self.attr(node, 'open_lambda', [self.ws, ':', self.ws], default=': ')
self.visit(node.body)
@expression
def visit_List(self, node):
self.attr(node, 'list_open', ['[', self.ws], default='[')
for i, elt in enumerate(node.elts):
self.visit(elt)
if elt is not node.elts[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
if node.elts:
self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True)
self.attr(node, 'list_close', [self.ws, ']'], default=']')
@expression
def visit_ListComp(self, node):
self._comp_exp(node, open_brace='[', close_brace=']')
def _comp_exp(self, node, open_brace=None, close_brace=None):
if open_brace:
self.attr(node, 'compexp_open', [open_brace, self.ws], default=open_brace)
self.visit(node.elt)
for i, comp in enumerate(node.generators):
self.visit(comp)
if close_brace:
self.attr(node, 'compexp_close', [self.ws, close_brace],
default=close_brace)
@expression
def visit_Name(self, node):
self.token(node.id)
@expression
def visit_NameConstant(self, node):
self.token(str(node.value))
@expression
def visit_Repr(self, node):
self.attr(node, 'repr_open', ['`', self.ws], default='`')
self.visit(node.value)
self.attr(node, 'repr_close', [self.ws, '`'], default='`')
@expression
def visit_Set(self, node):
self.attr(node, 'set_open', ['{', self.ws], default='{')
for i, elt in enumerate(node.elts):
self.visit(elt)
if elt is not node.elts[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
else:
self.optional_token(node, 'extracomma', ',',
allow_whitespace_prefix=True)
self.attr(node, 'set_close', [self.ws, '}'], default='}')
@expression
def visit_SetComp(self, node):
self._comp_exp(node, open_brace='{', close_brace='}')
@expression
def visit_Subscript(self, node):
self.visit(node.value)
self.attr(node, 'slice_open', [self.ws, '[', self.ws], default='[')
self.visit(node.slice)
self.attr(node, 'slice_close', [self.ws, ']'], default=']')
@expression
def visit_Tuple(self, node):
with self.scope(node, 'elts', default_parens=True):
for i, elt in enumerate(node.elts):
self.visit(elt)
if elt is not node.elts[-1]:
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws],
default=', ')
else:
self.optional_token(node, 'extracomma', ',',
allow_whitespace_prefix=True,
default=len(node.elts) == 1)
@expression
def visit_UnaryOp(self, node):
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
self.attr(node, 'op', [op_symbol, self.ws], default=op_symbol, deps=('op',))
self.visit(node.operand)
# ============================================================================
# == OPERATORS AND TOKENS: Anything that's just whitespace and tokens ==
# ============================================================================
@space_around
def visit_Ellipsis(self, node):
self.token('...')
def visit_And(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Or(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Add(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Sub(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Mult(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Div(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_MatMult(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Mod(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Pow(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_LShift(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_RShift(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_BitAnd(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_BitOr(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_BitXor(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_FloorDiv(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Invert(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Not(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_UAdd(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_USub(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Eq(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_NotEq(self, node):
self.attr(node, 'operator', [self.one_of_symbols('!=', '<>')])
def visit_Lt(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_LtE(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Gt(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_GtE(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_Is(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_IsNot(self, node):
self.attr(node, 'content', ['is', self.ws, 'not'], default='is not')
def visit_In(self, node):
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
def visit_NotIn(self, node):
self.attr(node, 'content', ['not', self.ws, 'in'], default='not in')
# ============================================================================
# == MISC NODES: Nodes which are neither statements nor expressions ==
# ============================================================================
def visit_alias(self, node):
name_pattern = []
parts = node.name.split('.')
for part in parts[:-1]:
name_pattern += [self.ws, part, self.ws, '.']
name_pattern += [self.ws, parts[-1]]
self.attr(node, 'name', name_pattern,
deps=('name',),
default=node.name)
if node.asname is not None:
self.attr(node, 'asname', [self.ws, 'as', self.ws], default=' as ')
self.token(node.asname)
@space_around
def visit_arg(self, node):
self.token(node.arg)
if node.annotation is not None:
self.attr(node, 'annotation_prefix', [self.ws, ':', self.ws],
default=': ')
self.visit(node.annotation)
@space_around
def visit_arguments(self, node):
# In Python 3, args appearing after *args must be kwargs
kwonlyargs = getattr(node, 'kwonlyargs', [])
kw_defaults = getattr(node, 'kw_defaults', [])
assert len(kwonlyargs) == len(kw_defaults)
total_args = sum((len(node.args + kwonlyargs),
len(getattr(node, 'posonlyargs', [])),
1 if node.vararg else 0,
1 if node.kwarg else 0))
arg_i = 0
pos_args = getattr(node, 'posonlyargs', []) + node.args
positional = pos_args[:-len(node.defaults)] if node.defaults else pos_args
keyword = node.args[-len(node.defaults):] if node.defaults else node.args
for arg in positional:
self.visit(arg)
arg_i += 1
if arg_i < total_args:
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
default=', ')
if arg_i == len(getattr(node, 'posonlyargs', [])):
self.attr(node, 'posonly_sep', [self.ws, '/', self.ws, ',', self.ws],
default='/, ')
for i, (arg, default) in enumerate(zip(keyword, node.defaults)):
self.visit(arg)
self.attr(node, 'default_%d' % i, [self.ws, '=', self.ws],
default='=')
self.visit(default)
arg_i += 1
if arg_i < total_args:
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
default=', ')
if node.vararg:
self.attr(node, 'vararg_prefix', [self.ws, '*', self.ws], default='*')
if isinstance(node.vararg, ast.AST):
self.visit(node.vararg)
else:
self.token(node.vararg)
self.attr(node, 'vararg_suffix', [self.ws])
arg_i += 1
if arg_i < total_args:
self.token(',')
elif kwonlyargs:
# If no vararg, but we have kwonlyargs, insert a naked *, which will
# definitely not be the last arg.
self.attr(node, 'kwonly_sep', [self.ws, '*', self.ws, ',', self.ws]);
for i, (arg, default) in enumerate(zip(kwonlyargs, kw_defaults)):
self.visit(arg)
if default is not None:
self.attr(node, 'kw_default_%d' % i, [self.ws, '=', self.ws],
default='=')
self.visit(default)
arg_i += 1
if arg_i < total_args:
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
default=', ')
if node.kwarg:
self.attr(node, 'kwarg_prefix', [self.ws, '**', self.ws], default='**')
if isinstance(node.kwarg, ast.AST):
self.visit(node.kwarg)
else:
self.token(node.kwarg)
self.attr(node, 'kwarg_suffix', [self.ws])
@space_around
def visit_comprehension(self, node):
if getattr(node, 'is_async', False):
self.attr(node, 'for', [self.ws, 'async', self.ws, 'for', self.ws],
default=' async for ')
else:
self.attr(node, 'for', [self.ws, 'for', self.ws], default=' for ')
self.visit(node.target)
self.attr(node, 'in', [self.ws, 'in', self.ws], default=' in ')
self.visit(node.iter)
for i, if_expr in enumerate(node.ifs):
self.attr(node, 'if_%d' % i, [self.ws, 'if', self.ws], default=' if ')
self.visit(if_expr)
@space_around
def visit_keyword(self, node):
if node.arg is None:
self.attr(node, 'stars', ['**', self.ws], default='**')
else:
self.token(node.arg)
self.attr(node, 'eq', [self.ws, '='], default='=')
self.visit(node.value)
@space_left
def visit_Index(self, node):
self.visit(node.value)
@space_left
def visit_ExtSlice(self, node):
for i, dim in enumerate(node.dims):
self.visit(dim)
if dim is not node.dims[-1]:
self.attr(node, 'dim_sep_%d' % i, [self.ws, ',', self.ws], default=', ')
self.optional_token(node, 'trailing_comma', ',', default=False)
@space_left
def visit_Slice(self, node):
if node.lower:
self.visit(node.lower)
self.attr(node, 'lowerspace', [self.ws, ':', self.ws], default=':')
if node.upper:
self.visit(node.upper)
self.attr(node, 'stepspace1', [self.ws])
self.optional_token(node, 'step_colon', ':')
self.attr(node, 'stepspace2', [self.ws])
if node.step and self.check_slice_includes_step(node):
self.optional_token(node, 'step_colon_2', ':', default=True)
node.step.is_explicit_step = True
self.visit(node.step)
def check_slice_includes_step(self, node):
"""Helper function for Slice node to determine whether to visit its step."""
# This is needed because of a bug in the 2.7 parser which treats
# a[::] as Slice(lower=None, upper=None, step=Name(id='None'))
# but also treats a[::None] exactly the same.
if not node.step:
return False
if getattr(node.step, 'is_explicit_step', False):
return True
return not (isinstance(node.step, ast.Name) and node.step.id == 'None')
@fstring_expression
def visit_FormattedValue(self, node):
self.visit(node.value)
if node.conversion != -1:
self.attr(node, 'conversion',
[self.ws, '!', chr(node.conversion)], deps=('conversion',),
default='!%c' % node.conversion)
if node.format_spec:
self.attr(node, 'format_spec_prefix', [self.ws, ':', self.ws],
default=':')
self.visit(node.format_spec)
class AnnotationError(Exception):
"""An exception for when we failed to annotate the tree."""
class AstAnnotator(BaseVisitor):
def __init__(self, source):
super(AstAnnotator, self).__init__()
self.tokens = token_generator.TokenGenerator(source)
def visit(self, node):
try:
fmt.set(node, 'indent', self._indent)
fmt.set(node, 'indent_diff', self._indent_diff)
fmt.set(node, 'start_line', self.tokens.peek().start[0])
fmt.set(node, 'start_col', self.tokens.peek().start[1])
super(AstAnnotator, self).visit(node)
fmt.set(node, 'end_line', self.tokens.peek().end[0])
fmt.set(node, 'end_col', self.tokens.peek().end[1])
except (TypeError, ValueError, IndexError, KeyError) as e:
raise AnnotationError(e)
def indented(self, node, children_attr):
"""Generator which annotates child nodes with their indentation level."""
children = getattr(node, children_attr)
cur_loc = self.tokens._loc
next_loc = self.tokens.peek_non_whitespace().start
# Special case: if the children are on the same line, then there is no
# indentation level to track.
if cur_loc[0] == next_loc[0]:
indent_diff = self._indent_diff
self._indent_diff = None
for child in children:
yield child
self._indent_diff = indent_diff
return
prev_indent = self._indent
prev_indent_diff = self._indent_diff
# Find the indent level of the first child
indent_token = self.tokens.peek_conditional(
lambda t: t.type == token_generator.TOKENS.INDENT)
new_indent = indent_token.src
new_diff = _get_indent_diff(prev_indent, new_indent)
if not new_diff:
new_diff = ' ' * 4 # Sensible default
print('Indent detection failed (line %d); inner indentation level is not '
'more than the outer indentation.' % cur_loc[0], file=sys.stderr)
# Set the indent level to the child's indent and iterate over the children
self._indent = new_indent
self._indent_diff = new_diff
for child in children:
yield child
# Store the suffix at this indentation level, which could be many lines
fmt.set(node, 'block_suffix_%s' % children_attr,
self.tokens.block_whitespace(self._indent))
# Dedent back to the previous level
self._indent = prev_indent
self._indent_diff = prev_indent_diff
@expression
def visit_Num(self, node):
"""Annotate a Num node with the exact number format."""
token_number_type = token_generator.TOKENS.NUMBER
contentargs = [lambda: self.tokens.next_of_type(token_number_type).src]
if self.tokens.peek().src == '-':
contentargs.insert(0, '-')
self.attr(node, 'content', contentargs, deps=('n',), default=str(node.n))
@expression
def visit_Str(self, node):
"""Annotate a Str node with the exact string format."""
self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s)
@expression
def visit_JoinedStr(self, node):
"""Annotate a JoinedStr node with the fstr formatting metadata."""
fstr_iter = self.tokens.fstr()()
res = ''
values = (v for v in node.values if isinstance(v, ast.FormattedValue))
while True:
res_part, tg = next(fstr_iter)
res += res_part
if tg is None:
break
prev_tokens = self.tokens
self.tokens = tg
self.visit(next(values))
self.tokens = prev_tokens
self.attr(node, 'content', [lambda: res], default=res)
@expression
def visit_Bytes(self, node):
"""Annotate a Bytes node with the exact string format."""
self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s)
@space_around
def visit_Ellipsis(self, node):
# Ellipsis is sometimes split into 3 tokens and other times a single token
# Account for both forms when parsing the input.
if self.tokens.peek().src == '...':
self.token('...')
else:
for i in range(3):
self.token('.')
def check_is_elif(self, node):
"""Return True iff the If node is an `elif` in the source."""
next_tok = self.tokens.next_name()
return isinstance(node, ast.If) and next_tok.src == 'elif'
def check_is_continued_try(self, node):
"""Return True iff the TryExcept node is a continued `try` in the source."""
return (isinstance(node, ast.TryExcept) and
self.tokens.peek_non_whitespace().src != 'try')
def check_is_continued_with(self, node):
"""Return True iff the With node is a continued `with` in the source."""
return isinstance(node, ast.With) and self.tokens.peek().src == ','
def check_slice_includes_step(self, node):
"""Helper function for Slice node to determine whether to visit its step."""
# This is needed because of a bug in the 2.7 parser which treats
# a[::] as Slice(lower=None, upper=None, step=Name(id='None'))
# but also treats a[::None] exactly the same.
return self.tokens.peek_non_whitespace().src not in '],'
def ws(self, max_lines=None, semicolon=False, comment=True):
"""Parse some whitespace from the source tokens and return it."""
next_token = self.tokens.peek()
if semicolon and next_token and next_token.src == ';':
result = self.tokens.whitespace() + self.token(';')
next_token = self.tokens.peek()
if next_token.type in (token_generator.TOKENS.NL,
token_generator.TOKENS.NEWLINE):
result += self.tokens.whitespace(max_lines=1)
return result
return self.tokens.whitespace(max_lines=max_lines, comment=comment)
def dots(self, num_dots):
"""Parse a number of dots."""
def _parse_dots():
return self.tokens.dots(num_dots)
return _parse_dots
def block_suffix(self, node, indent_level):
fmt.set(node, 'suffix', self.tokens.block_whitespace(indent_level))
def token(self, token_val):
"""Parse a single token with exactly the given value."""
token = self.tokens.next()
if token.src != token_val:
print(type(token.src), type(token_val))
raise AnnotationError("Expected %r but found %r\nline %d: %s" % (
token_val, token.src, token.start[0], token.line))
# If the token opens or closes a parentheses scope, keep track of it
if token.src in '({[':
self.tokens.hint_open()
elif token.src in ')}]':
self.tokens.hint_closed()
return token.src
def optional_token(self, node, attr_name, token_val,
allow_whitespace_prefix=False, default=False):
"""Try to parse a token and attach it to the node."""
del default
fmt.append(node, attr_name, '')
token = (self.tokens.peek_non_whitespace()
if allow_whitespace_prefix else self.tokens.peek())
if token and token.src == token_val:
parsed = ''
if allow_whitespace_prefix:
parsed += self.ws()
fmt.append(node, attr_name,
parsed + self.tokens.next().src + self.ws())
def one_of_symbols(self, *symbols):
"""Account for one of the given symbols."""
def _one_of_symbols():
next_token = self.tokens.next()
found = next((s for s in symbols if s == next_token.src), None)
if found is None:
raise AnnotationError(
'Expected one of: %r, but found: %r' % (symbols, next_token.src))
return found
return _one_of_symbols
def attr(self, node, attr_name, attr_vals, deps=None, default=None):
"""Parses some source and sets an attribute on the given node.
Stores some arbitrary formatting information on the node. This takes a list
attr_vals which tell what parts of the source to parse. The result of each
function is concatenated onto the formatting data, and strings in this list
are a shorthand to look for an exactly matching token.
For example:
self.attr(node, 'foo', ['(', self.ws, 'Hello, world!', self.ws, ')'],
deps=('s',), default=node.s)
is a rudimentary way to parse a parenthesized string. After running this,
the matching source code for this node will be stored in its formatting
dict under the key 'foo'. The result might be `(\n 'Hello, world!'\n)`.
This also keeps track of the current value of each of the dependencies.
In the above example, we would have looked for the string 'Hello, world!'
because that's the value of node.s, however, when we print this back, we
want to know if the value of node.s has changed since this time. If any of
the dependent values has changed, the default would be used instead.
Arguments:
node: (ast.AST) An AST node to attach formatting information to.
attr_name: (string) Name to store the formatting information under.
attr_vals: (list of functions/strings) Each item is either a function
that parses some source and return a string OR a string to match
exactly (as a token).
deps: (optional, set of strings) Attributes of the node which attr_vals
depends on.
default: (string) Unused here.
"""
del default # unused
if deps:
for dep in deps:
fmt.set(node, dep + '__src', getattr(node, dep, None))
attr_parts = []
for attr_val in attr_vals:
if isinstance(attr_val, six.string_types):
attr_parts.append(self.token(attr_val))
else:
attr_parts.append(attr_val())
fmt.set(node, attr_name, ''.join(attr_parts))
def scope(self, node, attr=None, trailing_comma=False, default_parens=False):
"""Return a context manager to handle a parenthesized scope.
Arguments:
node: (ast.AST) Node to store the scope prefix and suffix on.
attr: (string, optional) Attribute of the node contained in the scope, if
any. For example, as `None`, the scope would wrap the entire node, but
as 'bases', the scope might wrap only the bases of a class.
trailing_comma: (boolean) If True, allow a trailing comma at the end.
default_parens: (boolean) If True and no formatting information is
present, the scope would be assumed to be parenthesized.
"""
del default_parens
return self.tokens.scope(node, attr=attr, trailing_comma=trailing_comma)
def _optional_token(self, token_type, token_val):
token = self.tokens.peek()
if not token or token.type != token_type or token.src != token_val:
return ''
else:
self.tokens.next()
return token.src + self.ws()
def _get_indent_width(indent):
width = 0
for c in indent:
if c == ' ':
width += 1
elif c == '\t':
width += 8 - (width % 8)
return width
def _ltrim_indent(indent, remove_width):
width = 0
for i, c in enumerate(indent):
if width == remove_width:
break
if c == ' ':
width += 1
elif c == '\t':
if width + 8 - (width % 8) <= remove_width:
width += 8 - (width % 8)
else:
return ' ' * (width + 8 - remove_width) + indent[i + 1:]
return indent[i:]
def _get_indent_diff(outer, inner):
"""Computes the whitespace added to an indented block.
Finds the portion of an indent prefix that is added onto the outer indent. In
most cases, the inner indent starts with the outer indent, but this is not
necessarily true. For example, the outer block could be indented to four
spaces and its body indented with one tab (effectively 8 spaces).
Arguments:
outer: (string) Indentation of the outer block.
inner: (string) Indentation of the inner block.
Returns:
The string whitespace which is added to the indentation level when moving
from outer to inner.
"""
outer_w = _get_indent_width(outer)
inner_w = _get_indent_width(inner)
diff_w = inner_w - outer_w
if diff_w <= 0:
return None
return _ltrim_indent(inner, inner_w - diff_w)
|
#!/usr/bin/python3
# Download the latest KaoriYa Vim from the GitHub release
import argparse
import calendar
import io
import json
import os
import sys
import time
import urllib.request, urllib.error
# Repository Name
repo_name = 'koron/vim-kaoriya'
gh_release_url = 'https://api.github.com/repos/' + repo_name + '/releases/latest'
# Parse arguments
parser = argparse.ArgumentParser(
description='Download the latest KaoriYa Vim from the GitHub release')
parser.add_argument('-f', '--force', action='store_true',
help='overwrite the download file')
parser.add_argument('-n', '--filename', type=str, action='store',
help='filename to save')
parser.add_argument('-a', '--arch', type=str, action='store',
choices=['all', 'win32', 'win64'], default='all',
help='architecture to download')
args = parser.parse_args()
if args.filename and args.arch == 'all':
parser.error('-a must be specified when you specify -n.')
# Get information of GitHub release
# see: https://developer.github.com/v3/repos/releases/
try:
response = urllib.request.urlopen(gh_release_url)
except urllib.error.HTTPError:
print('GitHub release not found.', out=sys.stderr)
exit(1)
rel_info = json.load(io.StringIO(str(response.read(), 'utf-8')))
print('Last release:', rel_info['name'])
print('Created at:', rel_info['created_at'])
# Download the files
for asset in rel_info['assets']:
if args.filename:
name = args.filename
else:
name = asset['name']
if args.arch != 'all' and asset['name'].find(args.arch) < 0:
continue
if os.path.isfile(name) and not args.force:
print('File exists:', name)
continue
print('Downloading to:', name)
urllib.request.urlretrieve(asset['browser_download_url'], name)
# Set timestamp
asset_time = time.strptime(asset['updated_at'], '%Y-%m-%dT%H:%M:%SZ')
os.utime(name, times=(time.time(), calendar.timegm(asset_time)))
|
from flask import Flask, render_template, request, jsonify
import pymongo
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
'''
Objective:
- Create a UI to connect to cloud DBs (MongoDb and Cassandra)
- Create APIs to do CRUD operations to these DBs
'''
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST']) # To render Homepage
def home_page():
return render_template('index.html')
@app.route('/do_operation', methods=['POST'])
def do_operation():
if (request.method=='POST'):
operation=request.form['operation']
db_type=request.form['db_type']
username=request.form['username']
password=request.form['password']
table_name=request.form['table_name']
column_names=request.form['column_names']
column_datatypes=request.form['column_datatypes']
column_values=request.form['column_values']
file_location=request.form['file_location']
download_table=request.form['download_table']
l1=column_names.split(',')
l2=column_datatypes.split(',')
l3=column_values.split(',')
if (operation=='CreateTable'):
l4=""
for i in range(len(l1)):
l4=l4+" "+str(l1[i])+" "+l2[i]+","
#create DB table
try:
if (db_type=='MongoDB'):
#create connection
conn="mongodb+srv://"+username+":"+password+"@cluster0.93mth.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
client = pymongo.MongoClient(conn)
#create table/collection inside 'myDB'
db_cloud=client['myDB']
coll1=db_cloud[table_name]
result = "Table "+table_name+" in "+db_type+" created cuccessfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
#cassandra code
cloud_config= {'secure_connect_bundle': '/home/adi01/01_Code/Ineuron_Course/Databases/secure-connect-testdb.zip'}
auth_provider = PlainTextAuthProvider(username, password)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
#create Keyspace
row=session.execute("CREATE KEYSPACE home WITH replication={'class':'SimpleStrategy','replication_factor':4};")
#use keyspace
row=session.execute("use home;")
#creating table in the keyspace
query="CREATE TABLE "+table_name+" ("+l3+");"
row=session.execute(query)
result = "Table "+table_name+" in "+db_type+" created successfully!"
return render_template('results.html',result=result)
except Exception as e:
print(str(e))
elif(operation=='Update'):
record={}
if (db_type=='MongoDB'):
for i in range(len(l1)):
record[l1[i]]=l3[i]
#create connection
conn="mongodb+srv://"+username+":"+password+"@cluster0.93mth.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
client = pymongo.MongoClient(conn)
#create table/collection inside 'myDB'
db_cloud=client['myDB']
coll1=db_cloud[table_name]
#coll1.updateOne({
# {
# $set: record
# }
#})
result = "Table "+table_name+" in "+db_type+" updated successfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
#cassandra code
cloud_config= {'secure_connect_bundle': '<file_location>'}
auth_provider = PlainTextAuthProvider(username, password)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
#create Keyspace
row=session.execute("CREATE KEYSPACE home WITH replication={'class':'SimpleStrategy','replication_factor':4};")
#use keyspace
row=session.execute("use home;")
#creating table in the keyspace
query="CREATE TABLE "+table_name+" ("+l3+");"
row=session.execute(query)
result = "Table "+table_name+" in "+db_type+" created successfully!"
return render_template('results.html',result=result)
elif(operation=='Insert'):
if (db_type=='MongoDB'):
for i in range(len(l1)):
record[l1[i]]=l3[i]
#create connection
conn="mongodb+srv://"+username+":"+password+"@cluster0.93mth.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
client = pymongo.MongoClient(conn)
#create table/collection inside 'myDB'
coll1=myDB[table_name]
coll1.insert_one(record)
result = "Table "+table_name+" in "+db_type+" inserted successfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
result = "Table "+table_name+" in "+db_type+" inserted successfully!"
return render_template('results.html',result=result)
elif(operation=='Delete'):
if (db_type=='MongoDB'):
for i in range(len(l1)):
record[l1[i]]=l3[i]
#create connection
conn="mongodb+srv://"+username+":"+password+"@cluster0.93mth.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
client = pymongo.MongoClient(conn)
#create table/collection inside 'myDB'
coll1=myDB[table_name]
coll1.delete_one(record)
result = "Table "+table_name+" in "+db_type+" deleted successfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
result = "Table "+table_name+" in "+db_type+" deleted successfully!"
return render_template('results.html',result=result)
elif(operation=='BulkInsert'):
if (db_type=='MongoDB'):
result = "Table "+table_name+" in "+db_type+" bulkInserted successfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
result = "Table "+table_name+" in "+db_type+" bulkinserted successfully!"
return render_template('results.html',result=result)
elif(operation=='Download'):
if (db_type=='MongoDB'):
result = "Table "+table_name+" in "+db_type+" downloaded successfully!"
return render_template('results.html',result=result)
elif(db_type=='Cassandra'):
result = "Table "+table_name+" in "+db_type+" downloaded successfully!"
return render_template('results.html',result=result)
if __name__ == '__main__':
app.run()
|
# -*- python -*-
"""@file
@brief I2C-serial transport for pato
Copyright (c) 2014-2015 Dimitry Kloper <kloper@users.sf.net>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
from __future__ import absolute_import
import serial
import time
from util.protocol import ProtocolException
from bridge import Bridge as BridgeProtocol
from bridge.protocol import Cmd
class Bridge(object):
"""
@brief Communication transport using Pato's UART-TWI bridge
A simple transport that allows python code running on PC to talk
with Pato via UART, while Pato is compiled with TWI (I2C) interface
only.
This requires python pyserial package to be installed.
The main purpose of this transport is automatic E2D testing of various
features.
"""
def __init__(self, slave_addr, timeout=0.3, serial_timeout=10,
*args, **kwargs):
"""
@brief Constructor
@param[in] slave_addr TWI address of target Pato device
@param[in] timeout timeout value in time.clock() units for
the query sequence to be completed.
@param[in] serial_timeout timeout value for pyserial communication.
@param[in] args extra arguments for pyserial
@param[in] kwargs extra keyword arguments for pyserial
"""
self.serial = serial.Serial(timeout=serial_timeout, *args, **kwargs)
self.slave_addr = slave_addr
self.bridge = BridgeProtocol(self.serial)
self.timeout = timeout
def query(self, request):
"""
@brief Generic query (request/reply) method for UART-TWI bridge.
Schedule TWI Write of request packet supplied as a parameter. The write
is performed without stop condition. Then, TWI Read for a
single reply packet is scheduled.
If send and/or receive return unexpected result, the function will
retry both send and receive pair until timeout specified in constructor
is reached. If during the timeout period send and receive didn't
succeeded, @ref ProtocolException is thrown.
@param[in] request regular list of bytes representing packet to be sent
via the bridge.
@returns Received reply packet
@throws ProtocolException upon send or receive timeout
"""
now = time.monotonic()
elapsed = now
while elapsed - now < self.timeout:
(send_status, send_remaining) = \
self.bridge.execute(Cmd.TWI_MASTER_SEND,
self.slave_addr,
request,
0)
(recv_status, recv_remaining, reply) = \
self.bridge.execute(
Cmd.TWI_MASTER_RECV,
self.slave_addr,
5, 1, 1)
elapsed = time.monotonic()
if send_remaining + recv_remaining != 0:
print("send_remaining: {} status {:02x}".\
format(send_remaining, send_status))
print("recv_remaining: {} status {:02x}".\
format(recv_remaining, recv_status))
continue
return reply
raise ProtocolException("Failed to query")
def close(self):
"""
@brief Close serial line to bridge
"""
self.serial.close()
|
"""
Tests gen3.file.Gen3File for calls
"""
from unittest.mock import patch
import json
import pytest
from requests import HTTPError
NO_UPLOAD_ACCESS_MESSAGE = """
You do not have access to upload data.
You either need general file uploader permissions or
create and write-storage permissions on the
authz resources you specified (if you specified any).
"""
def test_get_presigned_url(gen3_file, supported_protocol):
"""
Get a presigned url for a file
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
"""
gen3_file._auth_provider._refresh_token = {"api_key": "123"}
sample_presigned_url = "https://fakecontainer/some/path/file.txt?k=v"
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 200
mock_request.get().text = json.dumps({"url": sample_presigned_url})
res = gen3_file.get_presigned_url(guid="123", protocol=supported_protocol)
assert "url" in res
assert res["url"] == sample_presigned_url
def test_get_presigned_url_no_refresh_token(gen3_file, supported_protocol):
"""
Get a presigned url for a file without a refresh token, which should raise an HTTPError
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
"""
gen3_file._auth_provider._refresh_token = None
with patch("gen3.file.requests.get", side_effect=HTTPError):
with pytest.raises(HTTPError):
res = gen3_file.get_presigned_url(guid="123", protocol=supported_protocol)
assert res == "Failed"
def test_get_presigned_url_no_api_key(gen3_file, supported_protocol):
"""
Get a presigned url for a file without an api_key
in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
"""
gen3_file._auth_provider._refresh_token = {"not_api_key": "123"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.get().text = "Failed"
res = gen3_file.get_presigned_url(guid="123", protocol=supported_protocol)
assert res == "Failed"
def test_get_presigned_url_wrong_api_key(gen3_file, supported_protocol):
"""
Get a presigned url for a file with the wrong value for the api_key
in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
"""
gen3_file._auth_provider._refresh_token = {"api_key": "wrong_value"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.get().text = "Failed"
res = gen3_file.get_presigned_url(guid="123", protocol=supported_protocol)
assert res == "Failed"
@pytest.mark.parametrize(
"guid,status_code,response_text,expected_response",
[
("123", 204, "", ""),
(None, 500, "Failed to delete data file.", "Failed to delete data file."),
],
)
def test_delete_file(gen3_file, guid, status_code, response_text, expected_response):
"""
Delete files for a Gen3File using a guid
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str guid:
file id to use for delete
:param int status_code:
mock status code
:param str response_text:
mock response text
:param str expected_response:
expected response to compare with mock
"""
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = status_code
mock_request.delete().text = response_text
res = gen3_file.delete_file(guid=guid)
assert res == expected_response
def test_delete_file_no_refresh_token(gen3_file):
"""
Delete files for a Gen3File without a refresh token, which should raise an HTTPError
:param gen3.file.Gen3File gen3_file:
Gen3File object
"""
gen3_file._auth_provider._refresh_token = None
with patch("gen3.file.requests.delete", side_effect=HTTPError):
with pytest.raises(HTTPError):
res = gen3_file.delete_file(guid="123")
assert res == "Failed to delete data file."
def test_delete_file_no_api_key(gen3_file):
"""
Delete files for a Gen3File without an api_key in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
"""
gen3_file._auth_provider._refresh_token = {"not_api_key": "123"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.delete().text = "Failed to delete data file."
res = gen3_file.delete_file(guid="123")
assert res == "Failed to delete data file."
def test_delete_file_wrong_api_key(gen3_file):
"""
Delete files for a Gen3File with the wrong value for the api_key
in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
"""
gen3_file._auth_provider._refresh_token = {"api_key": "wrong_value"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.delete().text = "Failed to delete data file."
res = gen3_file.delete_file(guid="123")
assert res == "Failed to delete data file."
@pytest.mark.parametrize(
"supported_protocol",
["s3", "az"],
indirect=True,
)
@pytest.mark.parametrize(
"authz,expires_in,status_code,response_text,expected_response",
[
(
None,
200,
201,
'{ "url": "https://fakecontainer/some/path/file.txt" }',
{"url": "https://fakecontainer/some/path/file.txt"},
),
(
["/programs"],
200,
201,
'{ "url": "https://fakecontainer/some/path/file.txt" }',
{"url": "https://fakecontainer/some/path/file.txt"},
),
(
["/programs"],
0,
201,
'{ "url": "https://fakecontainer/some/path/file.txt" }',
{"url": "https://fakecontainer/some/path/file.txt"},
),
(
"[/programs]",
200,
400,
NO_UPLOAD_ACCESS_MESSAGE,
"You do not have access to upload data.",
),
(
"[/programs]",
-200,
400,
"Requested expiry must be a positive integer; instead got -200",
"Requested expiry must be a positive integer; instead got",
),
(
"[]",
200,
403,
NO_UPLOAD_ACCESS_MESSAGE,
"You do not have access to upload data.",
),
],
)
def test_upload_file(
gen3_file,
supported_protocol,
authz,
expires_in,
status_code,
response_text,
expected_response,
):
"""
Upload files for a Gen3File given a protocol, authz, and expires_in
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
:param [] authz:
Authz list, for example [] or ['/programs']
:param int expires_in:
The signed URL should expire_in seconds from datetime.utcnow(),
this should be a positive int
:param int status_code:
mock status code
:param str response_text:
mock response text
:param str expected_response:
expected response to compare with mock
"""
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = status_code
mock_request.post().text = response_text
res = gen3_file.upload_file(
file_name="file.txt",
authz=authz,
protocol=supported_protocol,
expires_in=expires_in,
)
if status_code == 201:
# check that the SDK is getting fence
assert res.get("url") == expected_response["url"]
else:
# check the error message
assert expected_response in res
@pytest.mark.parametrize(
"supported_protocol",
["s3", "az"],
indirect=True,
)
@pytest.mark.parametrize(
"authz,expires_in",
[
(None, 200),
(["/programs"], 200),
(["/programs"], 0),
("[/programs]", 200),
("[/programs]", -200),
("[]", 200),
],
)
def test_upload_file_no_refresh_token(gen3_file, supported_protocol, authz, expires_in):
"""
Upload files for a Gen3File given a protocol, authz, and expires_in
without a refresh token, which should raise an HTTPError
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
:param [] authz:
Authz list, for example [] or ['/programs']
:param int expires_in:
The signed URL should expire_in seconds from datetime.utcnow(),
this should be a positive int
"""
gen3_file._auth_provider._refresh_token = None
with patch("gen3.file.requests.post", side_effect=HTTPError):
with pytest.raises(HTTPError):
gen3_file.upload_file(
file_name="file.txt",
authz=authz,
protocol=supported_protocol,
expires_in=expires_in,
)
@pytest.mark.parametrize(
"supported_protocol",
["s3", "az"],
indirect=True,
)
@pytest.mark.parametrize(
"authz,expires_in",
[
(None, 200),
(["/programs"], 200),
(["/programs"], 0),
("[/programs]", 200),
("[/programs]", -200),
("[]", 200),
],
)
def test_upload_file_no_api_key(gen3_file, supported_protocol, authz, expires_in):
"""
Upload files for a Gen3File given a protocol, authz, and expires_in
without an api_key in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
:param [] authz:
Authz list, for example [] or ['/programs']
:param int expires_in:
The signed URL should expire_in seconds from datetime.utcnow(),
this should be a positive int
"""
gen3_file._auth_provider._refresh_token = {"not_api_key": "123"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.post().text = "Failed to upload data file."
res = gen3_file.upload_file(
file_name="file.txt",
authz=authz,
protocol=supported_protocol,
expires_in=expires_in,
)
assert res == "Failed to upload data file."
@pytest.mark.parametrize(
"supported_protocol",
["s3", "az"],
indirect=True,
)
@pytest.mark.parametrize(
"authz,expires_in",
[
(None, 200),
(["/programs"], 200),
(["/programs"], 0),
("[/programs]", 200),
("[/programs]", -200),
("[]", 200),
],
)
def test_upload_file_wrong_api_key(gen3_file, supported_protocol, authz, expires_in):
"""
Upload files for a Gen3File given a protocol, authz, and expires_in
with the wrong value for the api_key in the refresh token, which should return a 401
:param gen3.file.Gen3File gen3_file:
Gen3File object
:param str supported_protocol:
a protocol from ["s3", "http", "ftp", "https", "gs", "az"]
:param [] authz:
Authz list, for example [] or ['/programs']
:param int expires_in:
The signed URL should expire_in seconds from datetime.utcnow(),
this should be a positive int
"""
gen3_file._auth_provider._refresh_token = {"api_key": "wrong_value"}
with patch("gen3.file.requests") as mock_request:
mock_request.status_code = 401
mock_request.post().text = "Failed to upload data file."
res = gen3_file.upload_file(
file_name="file.txt",
authz=authz,
protocol=supported_protocol,
expires_in=expires_in,
)
assert res == "Failed to upload data file."
|
"""urbanpiper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from delivery.views import (CurUserTaskView, DvrCurrentTaskView,
ModCurrentTaskView, ModifyTaskView,
PreviousTaskView, TaskActivityView, TaskHtmlView,
UpdateTaskView)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^raise_task/$', TaskHtmlView.as_view(), name='raise_task'),
url(r'^get_tasks/$', TaskActivityView.as_view(), name='get_tasks'),
url(r'^modify_task/(?P<task_uuid>[0-9a-z-]+)/$',
ModifyTaskView.as_view(), name='modify_task'),
url(r'^update_task/(?P<task_uuid>[0-9a-z-]+)/$',
ModifyTaskView.as_view(), name='update_task'),
url(r'^get_cur_task/$', DvrCurrentTaskView.as_view(), name='get_cur_task'),
url(r'^view_task/(?P<task_uuid>[0-9a-z-]+)/$',
ModCurrentTaskView.as_view(), name='view_task'),
url(r'^edit_task/(?P<task_uuid>[0-9a-z-]+)/$',
ModCurrentTaskView.as_view(), name='edit_task'),
url(r'^previous_task/$', PreviousTaskView.as_view(), name='previous_task'),
url(r'^cur_user_task/$', CurUserTaskView.as_view(), name='cur_user_task'),
url(r'^update_dvr_task/(?P<task_act_uuid>[0-9a-z-]+)/$',
UpdateTaskView.as_view(), name='update_dvr_task'),
url(r'^login/$', auth_views.login, {'template_name': 'login.html'},
name='login'),
url(r'^logout/$', auth_views.logout, {'template_name': 'logged_out.html'},
name='logout'),
]
|
#!/usr/bin/env python3
import sys, subprocess, time, os, getopt, copy, signal
from datetime import datetime
from ast import literal_eval
from ctypes import *
import numpy as np
# =================================================================================
reproduceFile = ""
memInfoFile = ""
TaskReproduce = 0
LoadMemInfoFile = 0
opts = []
args = []
try:
opts, args = getopt.getopt(sys.argv[1:], "hm:", ["help", "memInfoFile="])
except:
pass
print("opts:", opts)
print("args:", args)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('./run_DBDS.py -m <memInfoFile>')
sys.exit()
elif opt in ("-m", "--memInfoFile"):
memInfoFile = arg
print("memInfoFile:", memInfoFile)
# load npz data for memory Info
pureSoList = []
memGroupList = []
memPairList = []
if not os.path.exists(memInfoFile):
print("memInfoFile " + memInfoFile + " not exists.")
sys.exit()
NPZ = np.load(memInfoFile,allow_pickle=True)
pureSoList = NPZ["arr_0"].tolist()
memGroupList = NPZ["arr_1"].tolist()
memPairList = NPZ["arr_2"].tolist()
print("\033[34mpureSoList: \033[0m")
for each in pureSoList:
print(each)
print()
print("\033[34mmemGroupList: \033[0m")
for each in memGroupList:
print(each)
print()
print("\033[34mmemPairList: \033[0m")
for each in memPairList:
print(each)
print()
|
#!/usr/bin/env python
# a stacked bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
import sys
import math
import collections
from matplotlib.backends.backend_pdf import PdfPages
if __name__ == '__main__':
'''
Need an argument with the data file where each row has the format
"'benchmark': CLASS: EXEC_TIME"
e.g.
plot_indbar.py file.data n "x label" "y label" A B C D
'BT' : A: 0.1
'CG' : A: 0.2
Comments you can use C++-style inline comments //
'''
args = sys.argv
USAGE = '''USAGE: python plot_indbar.py <data file> op xlabel ylabel Class [Class..].
opt = operation to perform. "n (normal), l2 (log2), l10 (log10)"
xlabel = label to show in the x axis
ylabel = label to show in the y axis
data file = has to have a pair of rows for each benchmark
e.g. python plot_indbar.py file.data l x y A B C D, will show the values in log scale'''
if len(args) < 6:
print USAGE
sys.exit(-1)
dataFile = args[1]
op = args[2]
x_label = args[3]
y_label = args[4]
classes = args[5:]
if op not in ["n", "l2", "l10"]:
print USAGE
sys.exit(-1)
print "Processing dataFile: " + dataFile + ", classes: " + str(classes)
benchmarks = collections.OrderedDict()
benchmarks_array = list()
runtimes_array = list()
# reading the data file to build the dict
data = open(dataFile, "r")
line = data.readline().strip()
while len(line) > 0:
# remove comments
line = line[0:line.find("//")]
split_line = line.split(":")
tmp = split_line[0].strip('\'')
tmp_split = tmp.split(";")
b_name = tmp_split[0]
b_runtime = tmp_split[1] if len(tmp_split) == 2 else None
b_class = split_line[1].strip()
tmp = split_line[2].strip()
b_exectime = float(tmp if len(tmp) > 0 else -1)
if b_class in classes:
if b_name not in benchmarks:
# creating benchmark info
# dict for benchmark data
benchmarks_array.append(b_name)
tmp_dict = dict()
for key in classes:
tmp_dict[key] = dict()
benchmarks[b_name] = tmp_dict
benchmark_info = benchmarks[b_name]
if b_runtime not in runtimes_array:
runtimes_array.append(b_runtime)
if b_runtime != None:
benchmark_info[b_class][b_runtime] = b_exectime
line = data.readline().strip()
# number of columns in the plot
N = len(benchmarks)
print "Benchmarks considered: " + str(benchmarks.keys())
#print "Data matrix [number of benchmarks, number of classes] = [" + str(N) + ", " + str(len(classes)) + "]"
dataInfo = "Data matrix [# classes, # runtimes, # benchmarks] = [" + str(len(classes)) + ", " + str(len(runtimes_array)) + ", " + str(N) + "]"
print dataInfo
print str(benchmarks)
# data
data = np.zeros((len(classes), len(runtimes_array), N))
for bnc_pos in range(len(benchmarks_array)):
for class_pos in range(len(classes)):
b_name = benchmarks_array[bnc_pos]
b_class = classes[class_pos]
for run_pos in range(len(runtimes_array)):
b_runtime = runtimes_array[run_pos]
# executing the operation
x1 = benchmarks[b_name][b_class][b_runtime]
res = 0
if x1 != None:
# perform op
if op == "n":
res = x1
elif op == "l2":
res = math.log(x1)/math.log(2)
elif op == "l10":
res = math.log(x1)/math.log(10)
else:
print "ERROR: op not defined!: " + op
sys.exit(-1)
data[class_pos][run_pos][bnc_pos] = res
# saving processed data
print str(data)
proFile = open(dataFile + ".op", "w")
proFile.write("Benchmarks considered: " + str(benchmarks.keys()) + "\n")
proFile.write("Classes: " + str(classes) + "\n")
proFile.write("op: " + op + "\n")
proFile.write(str(benchmarks) + "\n")
proFile.write(dataInfo + "\n")
proFile.write(str(data))
proFile.close()
num_columns = N #len(classes)
plt.grid(True, lw=0.5, c='.5')
ind = np.arange(num_columns) # the x locations for the groups
width = 0.1
#colors = ['b', 'g', 'r', 'c', 'k', 'y', 'm', 'tan', 'darkorange']
hatches = ['///', '\\\\\\', '|||', '---', '+++', 'xxx', 'ooo', 'OOO', '...', '***']
plus_width = 0
axs = []
class_pos = 0
for run_pos in range(len(runtimes_array)):
x = ind + plus_width
axs.append(plt.bar(x, data[class_pos][run_pos], width, fill=True, color='w', edgecolor='k', hatch=hatches[run_pos]))
#axs.append(plt.bar(x, data[bnc_pos], width, color=colors[bnc_pos]))
plus_width += width
print "Max: " + str(data.max())
if op != "n":
maxY = data.max()
indY = np.arange(start=0, stop=maxY+0.5, step=0.5)
indYn = [""] * len(indY)
for i in range(int(math.ceil(maxY))):
indYn[2*i] = str(i)
plt.yticks(indY, indYn)
plt.xticks(ind + plus_width/2, benchmarks_array)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.minorticks_on()
plt.legend(axs, runtimes_array, loc='upper right', framealpha=1.0)
#plt.legend(axs, benchmarks_array, loc=2, framealpha=0.5, borderaxespad=0., bbox_to_anchor=(1.05, 1))
fig = plt.figure(1)
plt.show()
ax = plt.gca()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.relim()
ax.autoscale()
try:
# Initialize:
pp = PdfPages(dataFile + ".pdf")
pp.savefig(fig)
plt.close()
pp.close()
except IOError, e:
print e
|
"""
Generating data from the CarRacing gym environment.
!!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!!
"""
import argparse
from os.path import join, exists
import gym
import numpy as np
from utils.misc import sample_continuous_policy
def generate_data(rollouts, data_dir, noise_type): # pylint: disable=R0914
""" Generates data """
assert exists(data_dir), "The data directory does not exist..."
env = gym.make("CarRacing-v0")
seq_len = 1000
for i in range(rollouts):
env.reset()
env.env.viewer.window.dispatch_events()
if noise_type == 'white':
a_rollout = [env.action_space.sample() for _ in range(seq_len)]
elif noise_type == 'brown':
a_rollout = sample_continuous_policy(env.action_space, seq_len, 1. / 50)
s_rollout = []
r_rollout = []
d_rollout = []
t = 0
while True:
action = a_rollout[t]
t += 1
# The CarRacing-v0 environment has a step limit of 1000, this can be seen in env.spec.max_episode_steps
s, r, done, _ = env.step(action)
env.env.viewer.window.dispatch_events()
s_rollout += [s]
r_rollout += [r]
d_rollout += [done]
if done:
# Because these are random policies, most of them will not be done before the step limit of 1000
print("> End of rollout {}, {} frames...".format(i, len(s_rollout)))
np.savez(join(data_dir, 'rollout_{}'.format(i)),
observations=np.array(s_rollout),
rewards=np.array(r_rollout),
actions=np.array(a_rollout),
terminals=np.array(d_rollout))
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--rollouts', type=int, help="Number of rollouts")
parser.add_argument('--dir', type=str, help="Where to place rollouts")
parser.add_argument('--policy', type=str, choices=['white', 'brown'],
help='Noise type used for action sampling.',
default='brown')
args = parser.parse_args()
generate_data(args.rollouts, args.dir, args.policy)
|
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.ext.hybrid import hybrid_property
from logic_bank.rule_type.abstractrule import AbstractRule
class Derivation(AbstractRule):
def __init__(self, derive: InstrumentedAttribute):
# names = derive.split('.')
if not isinstance(derive, InstrumentedAttribute) and \
not isinstance(derive.descriptor, hybrid_property):
raise Exception("'derive' attribute not a class.attribute: " + str(derive))
super(Derivation, self).__init__(derive.class_)
self._column = derive.key
self._derive = derive
def __str__(self):
return f'Derive {self.table}.{self._column} as '
|
from django.contrib.auth.models import Group, User
from django.db import models
class Draft(models.Model):
title = models.CharField(max_length=100)
text = models.TextField()
owner = models.ForeignKey(User, on_delete=models.CASCADE)
|
from tkinter import *
from tkinter.ttk import Style
import cv2
from PokikiAPI import Pokiki
from PIL import Image, ImageTk
from tkinter import filedialog as fd
from threading import Thread
root = Tk()
root.title("Pokiki - Mosaic Video Maker")
root.resizable(False, False)
style = Style(root)
style.theme_use('clam')
# Change this to 0 if your camera doesn't open
capture = cv2.VideoCapture(0)
CANVAS_W, CANVAS_H = 1024, 576
CANVAS_H_W_RATIO = CANVAS_H / CANVAS_W
########## UI VARIABLES ############
# Tkinter functions work on the main thread
split_x = IntVar(root, value=30)
split_y = IntVar(root, value=20)
# Atomic vars to share data between threads
atom_split_x = split_x.get()
atom_split_y = split_y.get()
atom_image = None
resize_canvas = False
def set_split_x(x):
global atom_split_x
split_x.set(x)
atom_split_x = int(x)
def set_split_y(y):
global atom_split_y
split_y.set(y)
atom_split_y = int(y)
def open_file():
global capture, resize_canvas
filename: str = fd.askopenfilename()
# List of video file extensions
valid_extensions = ["mp4", "mov", "ogg", "wmv", "avi", "flv", "gif"]
if filename:
# Get opened file extension
ext = filename.split(".").pop()
if ext.lower() in valid_extensions:
capture = cv2.VideoCapture(filename)
resize_canvas = True
def open_camera():
global capture, resize_canvas
capture = cv2.VideoCapture(0)
resize_canvas = True
if __name__ == '__main__':
########## INSTANCES ############
pokiki = Pokiki()
########## UI SETUP ############
canvas = Canvas(root, width=CANVAS_W, height=CANVAS_H)
canvas.pack()
options_frame = LabelFrame(root, text="Options")
options_frame.pack(fill="both", expand="yes")
controls_frame = Frame(options_frame)
controls_frame.pack(side=LEFT)
label = Label(controls_frame, text="Split X")
x_scale = Scale(controls_frame, from_=5, to=250, orient=HORIZONTAL,
length=250, resolution=5, command=set_split_x, var=split_x)
label.pack()
x_scale.pack()
label = Label(controls_frame, text="Split Y")
y_scale = Scale(controls_frame, from_=5, to=250, orient=HORIZONTAL,
length=250, resolution=5, command=set_split_y, var=split_y)
label.pack()
y_scale.pack()
file_button = Button(options_frame, text="Open File", command=open_file)
file_button.pack(side=RIGHT)
camera_button = Button(options_frame, text="Use Camera", command=open_camera)
camera_button.pack(side=RIGHT)
########## VISUALIZATION LOOP ############
running = True
def run_video():
global atom_image, CANVAS_W, resize_canvas
while running:
# Read the next frame
retval, frame = capture.read()
# Check if there is a valid frame.
if not retval:
continue
if resize_canvas:
_, frame_w, _ = frame.shape
CANVAS_W = int(frame_w*CANVAS_H_W_RATIO)
resize_canvas = True
# Convert the original image to tiled
tile_img = pokiki.convertFromImage(frame, atom_split_x, atom_split_y)
tile_img = cv2.resize(tile_img, (CANVAS_W, CANVAS_H))
# Display the resulting frame
atom_image = tile_img
video_thread = Thread(target=run_video, args=())
video_thread.start()
def exit_app():
global running
running = False
video_thread.join()
root.destroy()
root.protocol("WM_DELETE_WINDOW", exit_app)
while running:
root.update_idletasks()
# Display the resulting frame
if atom_image is not None:
canvas.configure(width=CANVAS_W)
img = ImageTk.PhotoImage(image=Image.fromarray(atom_image))
canvas.create_image(0,0, anchor="nw", image=img)
atom_image = None
root.update()
|
# encoding: UTF-8
"""
双均线策略
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
"""
from __future__ import division
from ctaBase import *
from ctaTemplate import *
########################################################################
class DMAStrategy(CtaTemplate):
"""双均线交易策略"""
vtSymbol = 'rb1801'
exchange = 'SHFE'
className = 'DMAStrategy'
author = u'binbinwei'
name = EMPTY_UNICODE # 策略实例名称
# 策略参数
N = 5 # 快均线周期
P = 20 # 慢均线周期
A = 0.005 # 止损
mPrice = 5 # 一跳的价格
V = 1 # 下单手数
nMin = 1 # 操作级别分钟数
initDays = 10 # 初始化数据所用的天数
# 策略变量
ma0 = 0 # 当前K线慢均线数值
ma1 = 0 # 当前K线快均线数值
ma00 = 0 # 上一个K线慢均线数值
ma10 = 0 # 上一个K线快均线数值
# 参数列表,保存了参数的名称
paramList = ['N',
'P',
'A',
'V',
'nMin',
'mPrice']
# 变量列表,保存了变量的名称
varList = ['trading',
'ma0',
'ma1',
'pos']
# 参数映射表
paramMap = {'N': u'快均线周期',
'P': u'慢均线周期',
'A': u'止损指标',
'v': u'下单手数',
'nMin': u'K线分钟',
'exchange': u'交易所',
'vtSymbol': u'合约'}
# 变量映射表
varMap = {'trading': u'交易中',
'ma0': u'慢均线',
'ma1': u'快均线'}
# ----------------------------------------------------------------------
def __init__(self, ctaEngine=None, setting={}):
"""Constructor"""
super(DMAStrategy, self).__init__(ctaEngine, setting)
self.widgetClass = KLWidget
self.widget = None
self.bm = BarManager(self.onBar, self.nMin)
self.cost = 0 # 持仓成本
self.V = 1 # 下单手数
self.ma0 = 0 # 当前K线慢均线数值
self.ma1 = 0 # 当前K线快均线数值
self.ma00 = 0 # 上一个K线慢均线数值
self.ma10 = 0 # 上一个K线快均线数值
# 启动界面
self.signal = 0 # 买卖标志
self.mainSigs = ['ma0', 'ma1', 'cost'] # 主图显示
self.subSigs = [] # 副图显示
self.getGui()
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送"""
super(DMAStrategy, self).onTick(tick)
# 过滤涨跌停和集合竞价
if tick.lastPrice == 0 or tick.askPrice1 == 0 or tick.bidPrice1 == 0:
return
self.bm.updateTick(tick)
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
self.bar = bar
if self.tradeDate != bar.date:
self.tradeDate = bar.date
# 记录数据
if not self.am.updateBar(bar):
return
# 计算指标
self.getCtaIndictor(bar)
# 计算信号
self.getCtaSignal(bar)
# 简易信号执行
self.execSignal(self.V)
# 发出状态更新事件
if (not self.widget is None) and (not self.bar is None):
data = {'bar': self.bar, 'sig': self.signal, 'ma0': self.ma0, 'ma1': self.ma1, 'cost': self.cost}
self.widget.addBar(data)
if self.trading:
self.putEvent()
# ----------------------------------------------------------------------
def getCtaIndictor(self, bar):
"""计算指标数据"""
# 计算指标数值
ma = self.am.sma(self.P, True)
ma1 = self.am.sma(self.N, True)
self.ma0, self.ma00 = ma[-1], ma[-2]
self.ma1, self.ma10 = ma1[-1], ma1[-2]
# ----------------------------------------------------------------------
def getCtaSignal(self, bar):
"""计算交易信号"""
close = bar.close
hour = bar.datetime.hour
minute = bar.datetime.minute
# 定义尾盘,尾盘不交易并且空仓
self.endOfDay = hour == 14 and minute >= 40
# 判断是否要进行交易
self.buySig = self.ma1 > self.ma0 and self.ma10 < self.ma00
self.shortSig = self.ma1 < self.ma0 and self.ma10 > self.ma00
self.coverSig = self.buySig or close >= self.cost + self.A * close
self.sellSig = self.shortSig or close <= self.cost - self.A * close
# 交易价格
self.longPrice = bar.close
self.shortPrice = bar.close
# ----------------------------------------------------------------------
def execSignal(self, volume):
"""简易交易信号执行"""
pos = self.pos[self.vtSymbol]
endOfDay = self.endOfDay
# 挂单未成交
if not self.orderID is None:
self.cancelOrder(self.orderID)
self.signal = 0
# 当前无仓位
if pos == 0 and not self.endOfDay:
# 买开,卖开
if self.shortSig:
self.signal = -self.shortPrice
self.orderID = self.short(self.shortPrice, volume)
elif self.buySig:
self.signal = self.longPrice
self.orderID = self.buy(self.longPrice, volume)
# 持有多头仓位
elif pos > 0 and (self.sellSig or self.endOfDay):
self.signal = -self.shortPrice
self.orderID = self.sell(self.shortPrice, pos)
# 持有空头仓位
elif pos < 0 and (self.coverSig or self.endOfDay):
self.signal = self.longPrice
self.orderID = self.cover(self.longPrice, -pos)
# ----------------------------------------------------------------------
def onTrade(self, trade):
self.cost = trade.price
super(DMAStrategy, self).onTrade(trade, log=True)
# ----------------------------------------------------------------------
def onStart(self):
self.loadBar(3)
super(DMAStrategy, self).onStart()
self.getGui()
# ----------------------------------------------------------------------
def onStop(self):
super(DMAStrategy, self).onStop()
if not self.widget is None:
self.widget.clear()
self.closeGui()
|
from prolog.interpreter.signature import Signature, SignatureFactory
def test_eq():
sig1 = Signature("a", 0)
assert sig1.eq(sig1)
sig2 = Signature("a", 0)
assert sig1.eq(sig2)
sig3 = Signature("a", 1)
assert not sig1.eq(sig3)
sig4 = Signature("b", 0)
assert not sig1.eq(sig4)
def test_cache():
factory = SignatureFactory()
sig1 = factory.getsignature("a", 0)
sig2 = factory.getsignature("a", 0)
assert sig1 is sig2
assert sig1.cached
assert sig1.ensure_cached() is sig1
sig2 = factory.getsignature("abc", 0, cache=False)
sig1 = factory.getsignature("abc", 0)
assert not sig2.cached
assert sig2.ensure_cached() is sig1
sig3 = factory.getsignature("xyz", 0, cache=False)
assert not sig3.cached
assert sig3.ensure_cached() is sig3
assert sig3.cached
def test_extra_attr():
factory = SignatureFactory()
factory.register_extr_attr("foo", default=5)
sig1 = factory.getsignature("a", 0)
assert sig1.get_extra("foo") == 5
sig1.set_extra("foo", 6)
assert sig1.get_extra("foo") == 6
sig1 = factory.getsignature("b", 0, cache=False)
sig2 = factory.getsignature("b", 0)
assert sig2.get_extra("foo") == 5
sig2.set_extra("foo", 6)
assert sig1.get_extra("foo") == 6
def test_extra_attr_engine():
factory = SignatureFactory()
factory.register_extr_attr("foo", engine=True)
sig1 = factory.getsignature("a", 0)
e1 = "e1"
e2 = "e2"
sig1.set_extra_engine_local("foo", 6, e1)
assert sig1.get_extra_engine_local("foo", e1) == 6
assert sig1.get_extra_engine_local("foo", e2) is None
assert sig1.get_extra_engine_local("foo", e1) is None
sig1.set_extra_engine_local("foo", 8, e2)
assert sig1.get_extra_engine_local("foo", e2) == 8
def test_atom_signature():
factory = SignatureFactory()
factory.register_extr_attr("foo", engine=True)
sig1 = factory.getsignature("a", 0)
assert sig1.atom_signature is sig1
sig2 = factory.getsignature("a", 5)
assert sig2.atom_signature is sig1
|
## Evaluator for evaluating net accuracy against test data
import numpy as np
import os
import LabPicsMaterialInstanceReader as LabPicsInstanceReader
import torch
CatName={}
CatName[0]='Empty'
CatName[1]='Vessel'
CatName[2]='V Label'
CatName[3]='V Cork'
CatName[4]='V Parts GENERAL'
CatName[5]='Ignore'
CatName[6]='Liquid GENERAL'
CatName[7]='Liquid Suspension'
CatName[8]='Foam'
CatName[9]='Gel'
CatName[10]='Solid GENERAL'
CatName[11]='Granular'
CatName[12]='Powder'
CatName[13]='Solid Bulk'
CatName[14]='Vapor'
CatName[15]='Other Material'
CatName[16]='Filled'
############################################################################################################
####################################Create datat Reader and statitics file#####################################################################################
class Evaluator:
def __init__(self, AnnDir,OutFile):
self.AnnDir = AnnDir
self.OutFile=OutFile
if not os.path.exists(OutFile):
f=open(OutFile,"w")
f.close()
print("-------------------------------------Creating test evaluator------------------------------------------------------")
self.Reader = LabPicsInstanceReader.Reader(MainDir=self.AnnDir, TrainingMode=False)
#################################################Evaluate net accuracy####################################333
def Eval(self,Net,itr):
print("Evaluating")
Finished=False
IOUSum = 0
InterSum = 0
UnionSum = 0
ImSum=0
IOUDif = 0
CatTP = 0
CatFP = 0
CatFN = 0
IOUSumCat = np.zeros([20])
InterSumCat = np.zeros([20])
UnionSumCat = np.zeros([20])
ImSumCat = np.zeros([20])
IOUDifCat = np.zeros([20])
CatTPCat= np.zeros([20])
CatFPCat= np.zeros([20])
CatFNCat = np.zeros([20])
while (not Finished):
Imgs, AnnMapGt, BG,ROI,PointerMap, Ignore, Cats, Finished=self.Reader.LoadSingle()
# --------------------------------------
# Imgs[:, :, 0] *= 1 - AnnMapGt.astype(np.uint8)
# Imgs[:, :, 1] *= 1 - Ignore.astype(np.uint8)
# print(Cats)
# misc.imshow(Imgs)
# misc.imshow((ROI + AnnMapGt * 2 + PointerMap * 3).astype(np.uint8) * 40)
# print(ROI.shape)
# ----------------------------------------------
Imgs=np.expand_dims(Imgs,axis=0)
PointerMap = np.expand_dims(PointerMap,axis=0)
ROI = np.expand_dims(ROI, axis=0)
with torch.autograd.no_grad():
Prob, LbPred, PredIOU, Predclasslist = Net.forward(Images=Imgs, Pointer=PointerMap,ROI=ROI) # Run net inference and get prediction
PredIOU=np.squeeze(PredIOU.data.cpu().numpy())
Pred= LbPred.data.cpu().numpy()[0]*(1-Ignore)
GT=AnnMapGt*(1-Ignore)
Inter=(Pred*GT).sum()
Union=(Pred).sum()+(GT).sum()-Inter
if Union>0:
IOUSum += Inter/Union
InterSum += Inter
UnionSum += Union
IOUDif+=np.abs(Inter/Union-PredIOU)
ImSum += 1
for k in range(len(Cats)):
if Cats[k]>0:
k=int(k)
IOUSumCat[k] += Inter / Union
InterSumCat[k] += Inter
UnionSumCat[k]+= Union
ImSumCat[k] += 1
IOUDifCat[k]+=np.abs(Inter/Union-PredIOU)
if Cats[int(k)]>0:
if (Predclasslist[int(k)][0][1] > 0.5).data.cpu().numpy()>0:
CatTPCat[k]+=1
CatTP+=1
else:
CatFNCat[k] += 1
CatFN += 1
else:
if (Predclasslist[int(k)][0][1] > 0.5).data.cpu().numpy()>0:
CatFPCat[k]+=1
CatFP+=1
# if GT.sum()>0:
# print(k)
# Im=Imgs[0].copy()
# print( Inter / Union)
# Im[:, :, 0] *= 1 - GT.astype(np.uint8)
# Im[:, :, 2] *= (1-Ignore).astype(np.uint8)
# Im[:, :, 1] *= 1 - Pred.astype(np.uint8)
# misc.imshow(Im)
# break
f = open(self.OutFile, "a")
txt="\n=================================================================================\n"
txt+=str(itr)+"\n"
PerPixelPerCat = []
PerImagePerCat = []
MeanDifIOUPerCat =[]
for nm in range(IOUSumCat.shape[0]):
if UnionSumCat[nm]>0:
txt += str(nm) + "\t" +CatName[nm]+"\t"
txt += "IOU Average Per Pixel=\t"+str(InterSumCat[nm]/UnionSumCat[nm])+"\t"
txt += "IOU Average Per Image=\t" + str(IOUSumCat[nm]/ImSumCat[nm])+"\tNum Examples\t"+str(ImSumCat[nm])+"\t"
txt += "IOU Eval Pred Error=\t" + str(IOUDifCat[nm]/ImSumCat[nm])+"\t"
txt += "Accuracy Rate Cat=\t"+str(CatTPCat[nm]/(CatTPCat[nm]+CatFNCat[nm]+CatFPCat[nm]+0.0001)) + "Recall Rate Cat=\t"+str(CatTPCat[nm]/(CatTPCat[nm]+CatFNCat[nm]+0.0001)) + "Precision Rate Cat=\t"+str(CatTPCat[nm]/(CatTPCat[nm]+CatFPCat[nm]+0.0001)) + "\n"
PerPixelPerCat.append(InterSumCat[nm]/UnionSumCat[nm])
PerImagePerCat.append(IOUSumCat[nm]/ImSumCat[nm])
MeanDifIOUPerCat.append(IOUDifCat[nm]/ImSumCat[nm])
txt += "\n------Segmentation Accuracy------\n"
txt += "\n\n Total IOU Average Per Pixel=\t" + str(InterSum / UnionSum) + "\t"
txt += "Total IOU Average Per Image=\t" + str(IOUSum / ImSum) + "\n"
txt += "\n\n Cat Total IOU Average Per Pixel=\t" + str(np.mean(PerPixelPerCat)) + "\t"
txt += "Cat Total IOU Average Per Image=\t" + str(np.mean(PerImagePerCat)) + "\n"
txt +="\n------EVAL------\n"
txt += "\n\n Dif Pred IOU=\t" + str(IOUDif / ImSum) + "\t"
txt += "Dif Pred IOU Per Cat=\t" + str(np.mean(MeanDifIOUPerCat)) + "\n"
txt += "\n------Category------\n"
txt += "Accuracy Rate Cat=\t" + str(
CatTP / (CatTP + CatFN + CatFP+0.0001)) + "\tRecall Rate Cat=\t" + str(
CatTP / (CatTP + CatFN+0.0001)) + "\tPrecision Rate Cat=\t" + str(
CatTP / (CatTP + CatFP+0.0001)) + "\n"
f.write(txt)
f.close()
print(txt)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
from flask import current_app
from flod_common.session.utils import (unsign_auth_token,
verify_superuser_auth_token)
USERS_URL = os.environ.get('USERS_URL', 'http://localhost:4000')
USERS_VERSION = os.environ.get('USERS_VERSION', 'v1')
def get_user_id_for_user(cookies=None):
if cookies:
if cookies['auth_token']:
username = unsign_auth_token(cookies['auth_token'])
super_user_name = os.environ["AUTH_ADMIN_USER_ID"]
if username != super_user_name:
return username
return None
def get_user_by_id(user_id, cookies):
url = '%s/api/%s/users/%s' % (USERS_URL, USERS_VERSION, user_id)
response = requests.get(url, cookies=cookies)
return response
def get_user(cookies):
if 'auth_token' not in cookies:
current_app.logger.info('auth_token not found in cookies')
return None
auth_token = cookies['auth_token']
username = unsign_auth_token(auth_token)
if username is None:
current_app.logger.info(('auth_token could not be '
'unsigned: auth_token=%s'), auth_token)
return None
url = '%s/api/%s/users/%s' % (USERS_URL, USERS_VERSION, username)
r = requests.get(url, cookies=cookies)
return r.json()
def has_role(user, name):
return name in (role['name'] for role in user.get('roles', []))
def is_administrator(user):
return has_role(user, 'flod_brukere')
def make_credential_id(facility_id):
return 'CAN_EDIT_FACILITY' + '_' + str(facility_id)
def has_edit_credentials(data, facility_id):
if "credentials" not in data:
return False
target_cred = make_credential_id(facility_id)
for cred in data["credentials"]:
if cred["id"] == target_cred:
return True
return False
def can_user_edit_facility(user_id, facility_id, cookies):
response = get_user_by_id(user_id, cookies)
data = response.json()
return has_edit_credentials(data, facility_id)
|
with open('answer.data', 'rb') as a:
answer = a.read(8192)
with open('cloud.data', 'ab') as c:
while answer:
c.write(answer)
answer = a.read(8192)
|
import os
from os import path
import sys
import subprocess
import requests
def inHDInsightClusterNode():
if path.exists('/usr/hdp/current') :
print('Please don\'t run this script on your existing cluster node, yet!')
sys.exit()
return True
else:
#print('Running on an Azure Linux VM')
return False
def executeCommand(cmdName, params = ''):
#myProc = subprocess.run( cmdName + ' ' + params , shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
#return myProc.stdout
myProc = subprocess.Popen( cmdName + ' ' + params , stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = myProc.communicate()
if ( len(str(stdout)) > len(str(stderr)) ):
return stdout
else:
return stderr
#netcat
def ncCheck(target, port , protocol = 'TCP', timeOut = '5'):
cr = CheckResult()
#nc -vz -w 5 $line 443 2>&1
if (protocol=='TCP'):
cr.result = executeCommand('nc -vz -w ' + str(timeOut) + ' ' + target + ' ' + str(port))
elif (protocol=='UDP'):
cr.result = executeCommand('nc -vzu -w ' + str(timeOut) + ' ' + target + ' ' + str(port))
if ('succeeded!') in str(cr.result):
cr.isOk = True
return cr
#nslookup
def nslCheck(target):
cr = CheckResult()
cr.result = executeCommand('nslookup ' + target)
if ('succeeded!') in str(cr.result):
cr.isOk = True
return cr
#db specific netcat
def dbCheck(dbServerName, dbServerSuffix = 'database.windows.net', port= '1433'):
return ncCheck(dbServerName + '.' + dbServerSuffix, '1433')
def readParamsFromTxt():
if not( path.exists('params.txt')):
print('ERROR: Cannot find \'params.txt\' in the current folder')
sys.exit()
##Dictionary to store params
params = {}
f = open('params.txt','r')
for line in f:
if ( line.startswith('#') or line == '' or line == '\n'):
next
else:
#print('Line: ' + line)
tmpArr = line.split('=')
tmpKey = tmpArr[0]
tmpValue = (tmpArr[1])
#if line ends with \n, remove it
if tmpValue.endswith('\n'):
tmpValue = tmpValue[0:-1]
#strip the double quotes
tmpValue = tmpValue[1:-1]
params[tmpKey] = tmpValue
f.close()
#print(params)
return(params)
class CheckResult(object):
isOk = False
result = ''
class Validation:
def __init__(self, id, name, type, hostname, protocol, port, timeout):
self.id = id
self.name = name
self.type = type
self.hostname = hostname
self.protocol = protocol
self.port = port
self.timeout = timeout
self.succeeded = False
self.cmdout = ""
def doValidation(v):
cr = CheckResult()
if ( v.type == 'nc'):
cr = ncCheck(v.hostname, v.port, v.protocol, v.timeout)
if cr.isOk:
v.succeeded = True
v.cmdout = cr.result
else:
#failedValidationCount = failedValidationCount+1
v.succeeded = False
v.cmdout = cr.result
#TODO
elif ( v.type == 'nsl'):
cr = ncCheck(v.hostname, v.port, v.protocol, v.timeout)
if cf.isOk:
v.succeeded = True
v.cmdout = cr.result
else:
#failedValidationCount = failedValidationCount+1
v.succeeded = False
v.cmdout = cr.result
return v
|
# a heap structure designed for external sort
import random
import sys
import collections
item = collections.namedtuple('item', ['listname', 'index', 'value'])
class Heap:
def __init__(self):
self.h = []
self.currsize = 0
def leftChild(self,i):
if 2*i+1 < self.currsize:
return 2*i+1
return None
def rightChild(self,i):
if 2*i+2 < self.currsize:
return 2*i+2
return None
def maxHeapify(self,node):
if node < self.currsize:
m = node
lc = self.leftChild(node)
rc = self.rightChild(node)
if lc is not None and self.h[lc].value < self.h[m].value:
m = lc
if rc is not None and self.h[rc].value < self.h[m].value:
m = rc
if m!=node:
temp = self.h[node]
self.h[node] = self.h[m]
self.h[m] = temp
self.maxHeapify(m)
def getTop(self):
if self.currsize >= 1:
me = self.h[0]
temp = self.h[0]
self.h[0] = self.h[self.currsize-1]
# self.h[self.currsize-1] = temp
del self.h[self.currsize-1]
self.currsize -= 1
self.maxHeapify(0)
return me
return None
def insert(self,data):
self.h.append(data)
curr = self.currsize
self.currsize+=1
while self.h[curr].value < self.h[curr//2].value:
temp = self.h[curr//2]
self.h[curr//2] = self.h[curr]
self.h[curr] = temp
curr = curr//2
def display(self):
print(self.h)
def main():
listsize = 1000
listcount = 1000
l = [[random.randrange(1, 10000) for i in range(listsize)] for index in range(listcount)]
list(map(lambda a : a.sort(), l))
h = Heap()
for i in range(len(l)):
h.insert(item(i, 0, l[i][0]))
output = []
flag = 0
while flag < listcount * listsize:
temp = h.getTop()
output.append(temp.value)
if temp.index < listsize - 1:
h.insert(item(temp.listname, temp.index+1, l[temp.listname][temp.index+1]))
flag += 1
print(len(output))
if __name__=='__main__':
main()
|
#python train.py --solver SFD/solver.prototxt --gpu 0,1,2,3
from __future__ import print_function
import argparse
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from caffenet import CaffeNet
from prototxt import parse_solver
import caffe
class ParallelCaffeNet(nn.Module):
def __init__(self, caffe_module, device_ids):
super(ParallelCaffeNet, self).__init__()
self.device_ids = device_ids
self.module = nn.DataParallel(caffe_module, device_ids)
def convert2batch(self, label, batch_size, ngpus):
if ngpus > 1:
num = label.size(2)
label = label.expand(ngpus, 1, num, 8).contiguous()
sub_sz = batch_size/ngpus
for i in range(ngpus):
sub_label = label[i,0,:, 0]
sub_label[sub_label > (i+1)*sub_sz] = -1
sub_label[sub_label < i*sub_sz] = -1
sub_label = sub_label - sub_sz * i
label[i,0,:, 0] = sub_label
return label
def forward(self):
self.module.module.set_forward_data_only(True)
data, label = self.module.module()
label_data = self.convert2batch(label.data, data.size(0), len(self.device_ids))
label = Variable(label_data)
self.module.module.set_forward_net_only(True)
return self.module(data.cuda(), label.cuda())
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = base_lr
for i in range(len(stepvalues)):
if batch >= stepvalues[i]:
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Train Caffe Example')
parser.add_argument('--gpu', type=str, help='gpu ids e.g "0,1,2,3"')
parser.add_argument('--solver', type=str, help='the solver prototxt')
parser.add_argument('--model', type=str, help='the network definition prototxt')
parser.add_argument('--snapshot', type=str, help='the snapshot solver state to resume training')
parser.add_argument('--weights', type=str, help='the pretrained weight')
parser.add_argument('--lr', type=float, help='base learning rate')
args = parser.parse_args()
print(args)
solver = parse_solver(args.solver)
protofile = solver['train_net']
base_lr = float(solver['base_lr'])
gamma = float(solver['gamma'])
momentum = float(solver['momentum'])
weight_decay = float(solver['weight_decay'])
display = int(solver['display'])
test_iter = 0
max_iter = int(solver['max_iter'])
test_interval = 99999999
snapshot = int(solver['snapshot'])
snapshot_prefix = solver['snapshot_prefix']
stepvalues = solver['stepvalue']
stepvalues = [int(item) for item in stepvalues]
if args.lr != None:
base_lr = args.lr
#torch.manual_seed(int(time.time()))
#if args.gpu:
# torch.cuda.manual_seed(int(time.time()))
net = CaffeNet(protofile)
if args.weights:
net.load_weights(args.weights)
net.set_verbose(False)
net.set_train_outputs('mbox_loss')
if args.gpu:
device_ids = args.gpu.split(',')
device_ids = [int(i) for i in device_ids]
print('device_ids', device_ids)
if len(device_ids) > 1:
print('---- Multi GPUs ----')
net = ParallelCaffeNet(net.cuda(), device_ids=device_ids)
else:
print('---- Single GPU ----')
net.cuda()
print(net)
optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=momentum, weight_decay=weight_decay)
if args.snapshot:
state = torch.load(args.snapshot)
start_epoch = state['batch']+1
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print('loaded state %s' % (args.snapshot))
net.train()
lr = adjust_learning_rate(optimizer, 0)
logging('[0] init_lr = %f' % lr)
for batch in range(max_iter):
if batch in stepvalues:
lr = adjust_learning_rate(optimizer, batch)
logging('[%d] lr = %f' % (batch, lr))
if (batch+1) % test_interval == 0:
net.eval()
average_accuracy = 0.0
average_loss = 0.0
for i in range(test_iter):
loss, accuracy = net()
average_accuracy += accuracy.data.mean()
average_loss += loss.data.mean()
average_accuracy /= test_iter
average_loss /= test_iter
logging('[%d] test loss: %f\ttest accuracy: %f' % (batch+1, average_loss, average_accuracy))
net.train()
else:
optimizer.zero_grad()
loss = net().mean()
loss.backward()
optimizer.step()
if (batch+1) % display == 0:
logging('[%d] train loss: %f' % (batch+1, loss.data[0]))
if (batch+1) % snapshot == 0:
savename = '%s_batch%08d.pth' % (snapshot_prefix, batch+1)
logging('save state %s' % (savename))
state = {'batch': batch+1,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(state, savename)
|
#!/usr/bin/env python3
# Written by Ivan Anishchuk (anishchuk.ia@gmail.com) in 2015
"""
Solution for APU: Init Phase.
It's actually too complex because I've down all the wrong way:
I just copy-pasted the beginning of my solution for APU: Improvement.
During contest I obviously did something much simpler but I lost it
somewhere and needed more medium games solved on CG, so...
"""
import sys
from collections import OrderedDict, deque
class Node():
"""
Basic structure for nodes/islands.
Contains the information about position and neighbors
and very few logic.
"""
right = None
left = None
down = None
up = None
def __init__(self, pos, amount, number):
self.pos = pos
self.amount = amount
self.x = pos[0]
self.y = pos[1]
self.number = number
def after(self):
return(int(self.right is not None) + int(self.down is not None))
def before(self):
return(int(self.left is not None) + int(self.up is not None))
def __str__(self):
return('Node at {1.pos}'.format(self))
# Read the input.
data = []
width = int(input()) # the number of cells on the X axis
height = int(input()) # the number of cells on the Y axis
for i in range(height):
line = input() # width characters, each either a number or a '.'
data.append(list(line))
nodes = OrderedDict()
nodenum = 0
# Generate `Node` object for the nodes.
for y in range(height):
for x in range(width):
if data[y][x].isnumeric():
nodes[(x, y)] = Node((x, y), int(data[y][x]), nodenum)
nodenum += 1
# The total number of the nodes.
N = len(nodes)
# Find all the neighbors for each node.
for (x, y), node in nodes.items():
for xx in range(x+1, width):
if (xx, y) in nodes:
node.right = nodes[(xx, y)]
nodes[(xx, y)].left = node
break
for yy in range(y+1, height):
if (x, yy) in nodes:
node.down = nodes[(x, yy)]
nodes[(x, yy)].up = node
break
for node in nodes.values():
print(
node.x, node.y,
node.right.x if node.right else -1,
node.right.y if node.right else -1,
node.down.x if node.down else -1,
node.down.y if node.down else -1,
)
|
# -*- coding: utf-8 -*
from copy import deepcopy
from typing import Dict
from videoanalyst.utils import Registry
TRACK_MONITORS = Registry('TRACK_MONITOR')
VOS_MONITORS = Registry('VOS_MONITOR')
TASK_MONITORS = dict(
track=TRACK_MONITORS,
vos=VOS_MONITORS,
)
class MonitorBase:
r"""
Monitor base class for engine monitoring (e.g. visualization / tensorboard / training info logging)
"""
# Define your default hyper-parameters here in your sub-class.
default_hyper_params = dict()
def __init__(self, ):
self._hyper_params = deepcopy(
self.default_hyper_params) # mapping-like object
self._state = dict() # pipeline state
def get_hps(self) -> Dict:
r"""
Getter function for hyper-parameters
Returns
-------
dict
hyper-parameters
"""
return self._hyper_params
def set_hps(self, hps: Dict) -> None:
r"""
Set hyper-parameters
Arguments
---------
hps: dict
dict of hyper-parameters, the keys must in self.__hyper_params__
"""
for key in hps:
if key not in self._hyper_params:
raise KeyError
self._hyper_params[key] = hps[key]
def update_params(self):
r"""
an interface for update params
"""
def init(self, engine_state: Dict):
r"""register engine state & initialize monitor
"""
self._state["engine_state"] = engine_state
def update(self, engine_data: Dict):
"""an interface to update with engine_data and update iteration data for monitoring
Execution result will be saved in engine_state
Parameters
----------
engine_state : Dict
_state attribute of engine
engine_data : Dict
data given by engine at each iteration
"""
|
# ref: https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/utils/frame_utils.py
from __future__ import absolute_import
from pathlib import Path
import os
import time
from glob import glob
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
def parse_range_image_and_camera_projection(frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
range_images = {}
camera_projections = {}
range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name] = [ri]
if laser.name == dataset_pb2.LaserName.TOP:
range_image_top_pose_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = dataset_pb2.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return1.camera_projection_compressed, 'ZLIB')
cp = dataset_pb2.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name].append(ri)
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return2.camera_projection_compressed, 'ZLIB')
cp = dataset_pb2.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name].append(cp)
return range_images, camera_projections, range_image_top_pose
def convert_range_image_to_cartesian(frame,
range_images,
range_image_top_pose,
ri_index=0,
keep_polar_features=False):
"""Convert range images from polar coordinates to Cartesian coordinates.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
keep_polar_features: If true, keep the features from the polar range image
(i.e. range, intensity, and elongation) as the first features in the
output range image.
Returns:
dict of {laser_name, (H, W, D)} range images in Cartesian coordinates. D
will be 3 if keep_polar_features is False (x, y, z) and 6 if
keep_polar_features is True (range, intensity, elongation, x, y, z).
"""
cartesian_range_images = {}
frame_pose = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in frame.context.laser_calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
value=beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
if keep_polar_features:
# If we want to keep the polar coordinate features of range, intensity,
# and elongation, concatenate them to be the initial dimensions of the
# returned Cartesian range image.
range_image_cartesian = tf.concat(
[range_image_tensor[..., 0:3], range_image_cartesian], axis=-1)
cartesian_range_images[c.name] = range_image_cartesian
return cartesian_range_images
def convert_range_image_to_point_cloud(frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=0,
keep_polar_features=False):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
keep_polar_features: If true, keep the features from the polar range image
(i.e. range, intensity, and elongation) as the first features in the
output range image.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
(NOTE: Will be {[N, 6]} if keep_polar_features is true.
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
cartesian_range_images = convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index, keep_polar_features)
for c in calibrations:
range_image = range_images[c.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = cartesian_range_images[c.name]
points_tensor = tf.gather_nd(range_image_cartesian,
tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][ri_index]
cp_tensor = tf.reshape(tf.convert_to_tensor(
value=cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor,
tf.compat.v1.where(range_image_mask))
points.append(points_tensor.numpy())
cp_points.append(cp_points_tensor.numpy())
return points, cp_points
def convert_frame_to_dict(frame):
"""Convert the frame proto into a dict of numpy arrays.
The keys, shapes, and data types are:
POSE: 4x4 float32 array
For each lidar:
<LIDAR_NAME>_BEAM_INCLINATION: H float32 array
<LIDAR_NAME>_LIDAR_EXTRINSIC: 4x4 float32 array
<LIDAR_NAME>_RANGE_IMAGE_FIRST_RETURN: HxWx6 float32 array
<LIDAR_NAME>_RANGE_IMAGE_SECOND_RETURN: HxWx6 float32 array
<LIDAR_NAME>_CAM_PROJ_FIRST_RETURN: HxWx6 int64 array
<LIDAR_NAME>_CAM_PROJ_SECOND_RETURN: HxWx6 float32 array
(top lidar only) TOP_RANGE_IMAGE_POSE: HxWx6 float32 array
For each camera:
<CAMERA_NAME>_IMAGE: HxWx3 uint8 array
<CAMERA_NAME>_INTRINSIC: 9 float32 array
<CAMERA_NAME>_EXTRINSIC: 4x4 float32 array
<CAMERA_NAME>_WIDTH: int64 scalar
<CAMERA_NAME>_HEIGHT: int64 scalar
NOTE: This function only works in eager mode for now.
See the LaserName.Name and CameraName.Name enums in dataset.proto for the
valid lidar and camera name strings that will be present in the returned
dictionaries.
Args:
frame: open dataset frame
Returns:
Dict from string field name to numpy ndarray.
"""
# Laser name definition in https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/dataset.proto TOP = 1; FRONT = 2; SIDE_LEFT = 3; SIDE_RIGHT = 4; REAR = 5; The dataset contains data from five lidars - one mid-range lidar (top) and four short-range lidars (front, side left, side right, and rear), ref: https://waymo.com/open/data/perception/ The point cloud of each lidar is encoded as a range image. Two range images are provided for each lidar, one for each of the two strongest returns. It has 4 channels:
# channel 0: range (see spherical coordinate system definition) channel 1: lidar intensity channel 2: lidar elongation channel 3: is_in_nlz (1 = in, -1 = not in)
range_images, camera_projection_protos, range_image_top_pose = (
parse_range_image_and_camera_projection(frame))
# Convert range images from polar coordinates to Cartesian coordinates
# dict of {laser_name, (H, W, D)} range images in Cartesian coordinates. D
# will be 3 if keep_polar_features is False (x, y, z) and 6 if
# keep_polar_features is True (range, intensity, elongation, x, y, z).
first_return_cartesian_range_images = convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index=0,
keep_polar_features=True)
second_return_cartesian_range_images = convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index=1,
keep_polar_features=True)
data_dict = {}
# Save the beam inclinations, extrinsic matrices, first/second return range
# images, and first/second return camera projections for each lidar.
for c in frame.context.laser_calibrations:
laser_name_str = dataset_pb2.LaserName.Name.Name(c.name)
beam_inclination_key = f'{laser_name_str}_BEAM_INCLINATION'
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
data_dict[beam_inclination_key] = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_images[c.name][0].shape.dims[0]).numpy()
else:
data_dict[beam_inclination_key] = np.array(
c.beam_inclinations, np.float32)
data_dict[f'{laser_name_str}_LIDAR_EXTRINSIC'] = np.reshape(
np.array(c.extrinsic.transform, np.float32), [4, 4])
data_dict[f'{laser_name_str}_RANGE_IMAGE_FIRST_RETURN'] = (
first_return_cartesian_range_images[c.name].numpy())
data_dict[f'{laser_name_str}_RANGE_IMAGE_SECOND_RETURN'] = (
second_return_cartesian_range_images[c.name].numpy())
first_return_cp = camera_projection_protos[c.name][0]
data_dict[f'{laser_name_str}_CAM_PROJ_FIRST_RETURN'] = np.reshape(
np.array(first_return_cp.data), first_return_cp.shape.dims)
second_return_cp = camera_projection_protos[c.name][1]
data_dict[f'{laser_name_str}_CAM_PROJ_SECOND_RETURN'] = np.reshape(
np.array(second_return_cp.data), second_return_cp.shape.dims)
# Save the H x W x 3 RGB image for each camera, extracted from JPEG.
for im in frame.images:
cam_name_str = dataset_pb2.CameraName.Name.Name(im.name)
data_dict[f'{cam_name_str}_IMAGE'] = tf.io.decode_jpeg(
im.image).numpy()
# Save the intrinsics, 4x4 extrinsic matrix, width, and height of each camera.
for c in frame.context.camera_calibrations:
cam_name_str = dataset_pb2.CameraName.Name.Name(c.name)
data_dict[f'{cam_name_str}_INTRINSIC'] = np.array(
c.intrinsic, np.float32)
data_dict[f'{cam_name_str}_EXTRINSIC'] = np.reshape(
np.array(c.extrinsic.transform, np.float32), [4, 4])
data_dict[f'{cam_name_str}_WIDTH'] = np.array(c.width)
data_dict[f'{cam_name_str}_HEIGHT'] = np.array(c.height)
# Save the range image pixel pose for the top lidar.
data_dict['TOP_RANGE_IMAGE_POSE'] = np.reshape(
np.array(range_image_top_pose.data, np.float32),
range_image_top_pose.shape.dims)
data_dict['POSE'] = np.reshape(
np.array(frame.pose.transform, np.float32), (4, 4))
return data_dict
#from waymo_open_dataset import dataset_pb2 as open_dataset
def extract_onesegment_toframe(fileidx, tfrecord_pathnames, step):
segment_path = tfrecord_pathnames[fileidx]
c_start = time.time()
print(
f'extracting {fileidx}, path: {segment_path}, currenttime: {c_start}')
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
framesdict = {} # []
for i, data in enumerate(dataset):
if i % step != 0: # Downsample
continue
# print('.', end='', flush=True) #progress bar
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
# get one frame
# A unique name that identifies the frame sequence
context_name = frame.context.name
# print('context_name:', context_name)#14824622621331930560_2395_420_2415_420, same to the tfrecord file name
frame_timestamp_micros = str(frame.timestamp_micros)
# print(frame_timestamp_micros)
# frames.append(frame)
framesdict[frame_timestamp_micros] = frame
return framesdict
def saveonedictfile(data_files, fileidx, step, out_dir):
framesdict = extract_onesegment_toframe(fileidx, data_files, step)
num_frames = len(framesdict)
print(num_frames)
Final_array=[]
for key, frame in framesdict.items():
print(key)
context_name = frame.context.name
print('context_name:', context_name)
framedict=convert_frame_to_dict(frame)
#print(framedict)
# print(type(framedict['TOP_RANGE_IMAGE_FIRST_RETURN']))#FRONT_IMAGE: <class 'numpy.ndarray'>, (1280, 1920, 3)
print(framedict['TOP_RANGE_IMAGE_FIRST_RETURN'].shape) #<class 'numpy.ndarray'> (64, 2650, 6)
print(framedict['FRONT_IMAGE'].shape)
convertedframesdict = {'key':key, 'context_name':context_name, 'framedict':framedict}
Final_array.append(convertedframesdict)
second_time = time.time()
print(f"Finished conversion, Execution time: { second_time - c_start }") #Execution time: 555.8904404640198
filename=str(fileidx)+'_'+context_name+'.npy'
out_dir=Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
#np.save(out_dir / filename, Final_array)#Execution time: 54.30716061592102, 25G
filename=str(fileidx)+'_'+'step'+str(step)+'_'+context_name+'.npz'
np.savez_compressed(out_dir / filename, Final_array)#Execution time: 579.5185077190399, 7G
print(f"Finished np save, Execution time: { time.time() - second_time }")
if __name__ == "__main__":
#test the above functions: convert a Frame proto into a dictionary
#convert_frame_to_dict
#folders = ["training_0000","training_0001", "training_0002","training_0003","training_0004","training_0005","training_0006","training_0007","training_0008","training_0009", "training_0010", "training_0015", "training_0016", "training_0017","training_0018", "training_0019", "training_0020", "training_0021","training_0022","training_0023","training_0024","training_0025","training_0026","training_0027","training_0028","training_0029","training_0030","training_0031","validation_0000","validation_0001","validation_0002","validation_0003","validation_0004","validation_0005","validation_0006","validation_0007"]#["training_0001"]# ["training_0000", "training_0001"]
folders = ["training_0000"]
root_path="/data/cmpe249-f20/Waymo"
out_dir="/data/cmpe249-f20/WaymoKittitMulti/dict_train0"
data_files = [path for x in folders for path in glob(os.path.join(root_path, x, "*.tfrecord"))]
print("totoal number of files:", len(data_files))#886
c_start = time.time()
print(c_start)
fileidx = 1
step = 10
#save a single dict file
#saveonedictfile(data_files, fileidx, step, out_dir)
#save validation folders to dict files
folders = ["validation_0000","validation_0001","validation_0002","validation_0003","validation_0004","validation_0005","validation_0006","validation_0007"]
root_path="/data/cmpe249-f20/Waymo"
out_dir="/data/cmpe249-f20/WaymoKittitMulti/validationalldicts"
data_files = [path for x in folders for path in glob(os.path.join(root_path, x, "*.tfrecord"))]
print("totoal number of files:", len(data_files))#886
step=1
for fileidx in range(len(data_files)):
saveonedictfile(data_files, fileidx, step, out_dir)
print("finished")
# for key, value in framedict.items():
# print(key)
# FRONT_BEAM_INCLINATION
# FRONT_LIDAR_EXTRINSIC
# FRONT_RANGE_IMAGE_FIRST_RETURN
# FRONT_RANGE_IMAGE_SECOND_RETURN
# FRONT_CAM_PROJ_FIRST_RETURN
# FRONT_CAM_PROJ_SECOND_RETURN
# REAR_BEAM_INCLINATION
# REAR_LIDAR_EXTRINSIC
# REAR_RANGE_IMAGE_FIRST_RETURN
# REAR_RANGE_IMAGE_SECOND_RETURN
# REAR_CAM_PROJ_FIRST_RETURN
# REAR_CAM_PROJ_SECOND_RETURN
# SIDE_LEFT_BEAM_INCLINATION
# SIDE_LEFT_LIDAR_EXTRINSIC
# SIDE_LEFT_RANGE_IMAGE_FIRST_RETURN
# SIDE_LEFT_RANGE_IMAGE_SECOND_RETURN
# SIDE_LEFT_CAM_PROJ_FIRST_RETURN
# SIDE_LEFT_CAM_PROJ_SECOND_RETURN
# SIDE_RIGHT_BEAM_INCLINATION
# SIDE_RIGHT_LIDAR_EXTRINSIC
# SIDE_RIGHT_RANGE_IMAGE_FIRST_RETURN
# SIDE_RIGHT_RANGE_IMAGE_SECOND_RETURN
# SIDE_RIGHT_CAM_PROJ_FIRST_RETURN
# SIDE_RIGHT_CAM_PROJ_SECOND_RETURN
# TOP_BEAM_INCLINATION
# TOP_LIDAR_EXTRINSIC
# TOP_RANGE_IMAGE_FIRST_RETURN #HxWx6 float32 array with the range image of the first return for this lidar. The six channels are range, intensity, elongation, x, y, and z. The x, y, and z values are in vehicle frame.
# TOP_RANGE_IMAGE_SECOND_RETURN
# TOP_CAM_PROJ_FIRST_RETURN
# TOP_CAM_PROJ_SECOND_RETURN
# FRONT_IMAGE
# FRONT_LEFT_IMAGE
# SIDE_LEFT_IMAGE
# FRONT_RIGHT_IMAGE
# SIDE_RIGHT_IMAGE
# FRONT_INTRINSIC
# FRONT_EXTRINSIC
# FRONT_WIDTH
# FRONT_HEIGHT
# FRONT_LEFT_INTRINSIC
# FRONT_LEFT_EXTRINSIC
# FRONT_LEFT_WIDTH
# FRONT_LEFT_HEIGHT
# FRONT_RIGHT_INTRINSIC
# FRONT_RIGHT_EXTRINSIC
# FRONT_RIGHT_WIDTH
# FRONT_RIGHT_HEIGHT
# SIDE_LEFT_INTRINSIC
# SIDE_LEFT_EXTRINSIC
# SIDE_LEFT_WIDTH
# SIDE_LEFT_HEIGHT
# SIDE_RIGHT_INTRINSIC
# SIDE_RIGHT_EXTRINSIC
# SIDE_RIGHT_WIDTH
# SIDE_RIGHT_HEIGHT
# TOP_RANGE_IMAGE_POSE
# POSE
|
# -*-coding:utf-8-*-
"""Main application script"""
import os
import click
from flask_migrate import Migrate
from app import create_app, db
from app.models import User
app = create_app(os.getenv('APP_CONFIG') or 'default')
migrate = Migrate(app, db)
# set up code coverage
COV = None
if os.environ.get('APP_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, User=User)
@app.cli.command()
def initdb():
"""Initialize the database."""
click.echo('Init the db')
from flask_migrate import upgrade
# migrate database to latest revision
upgrade()
@app.cli.command()
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print("Coverage Summary:")
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'temp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://{covdir}index.html'.format(covdir=covdir))
COV.erase()
|
"""
08-function-calls.py - Using custom algorithms with python function calls.
**EventCall** ::
EventCall(function, *args, occurrences=inf, stopEventsWhenDone=True)
EventCall calls a function, with any number of arguments (\*args) and uses
its return value for the given parameter. The example below use a function
from the random module, *randrange*, with arguments and a user-defined
function, without argument, to create a rising, then falling, amplitude curve.
"""
import random
from pyo import *
s = Server().boot()
db = -30
dir = 1
def riseFallAmp():
"Rises and falls amplitude between -30 and -3 dB, 1 db at the time."
global db, dir
db += dir
if db >= -3:
dir = -1
elif db < -30:
dir = 1
return db
# Midi notes are chosen randomly with a function from the random module,
# while the amplitude change according to the riseFallAmp function's output.
e = Events(
midinote=EventCall(random.randrange, 48, 72, 3),
beat=1 / 4.0,
db=EventCall(riseFallAmp),
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
s.gui(locals())
|
""" Read/write JSON formatted data """
import os
import json
def read_test(file_path):
"""
Read data from a file having JSON formatted data.
"""
if os.path.exists(file_path):
with open(file_path, "r") as file:
json_data = json.load(file)
print('Value of key_1: ', json_data['key_1'])
print('Value of key_2: ', json_data['key_2'])
print('Value of key_3: ', json_data['key_3'])
def write_test(file_path):
"""
Write data into a file in JSON format.
"""
data = {
"key_1": "key_1_value",
"key_2": "key_2_value",
"key_3" : {
"abc" : "abc_value",
"xyz" : "xyv_value"
}
}
with open(file_path, "w") as file:
json.dump(data, file, indent=4, sort_keys=False)
# Execute tests
write_test("json_test.json")
read_test("json_test.json")
|
from enum import Enum
from typing import Any, Dict, Final, List, Optional, Sequence, Set, Union
import torch
from numpy import typing as nptyping
from embeddings.evaluator.metrics_evaluator import MetricsEvaluator
from embeddings.metric.hugging_face_metric import HuggingFaceMetric
from embeddings.metric.metric import Metric
from embeddings.metric.unit_seqeval_metric import UnitSeqevalMetric
class SequenceLabelingEvaluator(MetricsEvaluator):
class EvaluationMode(str, Enum):
UNIT = "unit"
CONLL = "conll"
STRICT = "strict"
class TaggingScheme(str, Enum):
IOB1 = "IOB1"
IOB2 = "IOB2"
IOE1 = "IOE1"
IOE2 = "IOE2"
IOBES = "IOBES"
BILOU = "BILOU"
SEQEVAL_EVALUATION_MODES: Final[Set[str]] = {EvaluationMode.CONLL, EvaluationMode.STRICT}
def __init__(
self,
evaluation_mode: EvaluationMode = EvaluationMode.CONLL,
tagging_scheme: Optional[TaggingScheme] = None,
) -> None:
super().__init__()
self.metric = self._get_metric(evaluation_mode, tagging_scheme)
def _get_metric(
self,
evaluation_mode: EvaluationMode,
tagging_scheme: Optional[TaggingScheme] = None,
) -> Union[HuggingFaceMetric, UnitSeqevalMetric]:
if evaluation_mode in SequenceLabelingEvaluator.SEQEVAL_EVALUATION_MODES:
if evaluation_mode == "strict" and not tagging_scheme:
raise ValueError("Tagging scheme must be set, when using strict evaluation mode!")
elif evaluation_mode == "conll" and tagging_scheme:
raise ValueError("Tagging scheme can be set only in strict mode!")
return HuggingFaceMetric(
name="seqeval",
compute_kwargs={
"mode": evaluation_mode if evaluation_mode == "strict" else None,
"scheme": tagging_scheme,
},
)
elif evaluation_mode == "unit":
return UnitSeqevalMetric()
else:
raise ValueError(
f"Evaluation mode {evaluation_mode} not supported. Must be one of "
f"[unit, conll, strict]."
)
@property
def metrics(
self,
) -> Sequence[Metric[Union[List[Any], nptyping.NDArray[Any], torch.Tensor], Dict[Any, Any]]]:
return [self.metric]
EvaluationMode = SequenceLabelingEvaluator.EvaluationMode
TaggingScheme = SequenceLabelingEvaluator.TaggingScheme
|
# -*- coding: utf-8 -*-
"""Validation machine file IPMSM
TOYOTA Prius 2004 interior magnet (V shape) with distributed winding
50 kW peak, 400 Nm peak at 1500 rpm from publication
Z. Yang, M. Krishnamurthy and I. P. Brown,
"Electromagnetic and vibrational characteristic of IPM over full torque-speed range,"
Electric Machines & Drives Conference (IEMDC), 2013 IEEE International, Chicago, IL, 2013, pp. 295-302.
"""
from numpy import pi
from ....Classes.MachineIPMSM import MachineIPMSM
from ....Classes.LamSlotWind import LamSlotWind
from ....Classes.SlotW11 import SlotW11
from ....Classes.WindingDW1L import WindingDW1L
from ....Classes.CondType11 import CondType11
from ....Classes.LamHole import LamHole
from ....Classes.HoleM50 import HoleM50
from ....Classes.Frame import Frame
from ....Classes.Shaft import Shaft
from ....Classes.ImportMatrixXls import ImportMatrixXls
from ....Classes.Material import Material
from ....Tests.Validation.Material.M400_50A import M400_50A
from ....Tests.Validation.Material.Magnet_prius import Magnet_prius
from ....Tests.Validation.Material.Copper1 import Copper1
# Stator setup
stator = LamSlotWind(
Rint=80.95e-3,
Rext=134.62e-3,
Nrvd=0,
L1=0.08382,
Kf1=0.95,
is_internal=False,
is_stator=True,
)
stator.slot = None
stator.winding = None
# Rotor setup
rotor = LamHole(
Rext=80.2e-3,
Rint=55.32e-3,
L1=0.08382,
Kf1=0.95,
is_internal=True,
is_stator=False,
Nrvd=0,
)
rotor.hole = [
HoleM50(
Zh=8,
H0=0.01096,
H1=0.0015,
H2=0.001,
H3=0.0065,
H4=0,
W0=0.042,
W1=0,
W2=0,
W3=0.014,
W4=0.0189,
)
]
rotor.hole[0].magnet_0.type_magnetization = 1
rotor.hole[0].magnet_1.type_magnetization = 1
shaft = Shaft(Lshaft=0.1, Drsh=0.11064)
frame = None
# Set Materials
stator.mat_type = M400_50A
rotor.mat_type = M400_50A
# stator.winding.conductor.cond_mat = Copper1
rotor.hole[0].magnet_0.mat_type = Magnet_prius
rotor.hole[0].magnet_1.mat_type = Magnet_prius
CEFC_Lam = MachineIPMSM(
name="CEFC_Lam",
desc="Slotless machine from CEFC publication",
stator=stator,
rotor=rotor,
shaft=shaft,
frame=frame,
)
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
from rlgraph.components.component import Component
from rlgraph.utils.decorators import rlgraph_api
from rlgraph.utils.util import SMALL_NUMBER
if get_backend() == "tf":
import tensorflow as tf
class SoftMax(Component):
"""
A simple softmax component that translates logits into probabilities (and log-probabilities).
API:
call(logits) -> returns probabilities (softmaxed) and log-probabilities.
"""
def __init__(self, scope="softmax", **kwargs):
super(SoftMax, self).__init__(scope=scope, **kwargs)
@rlgraph_api(must_be_complete=False)
def _graph_fn_get_probabilities_and_log_probs(self, logits):
"""
Creates properties/parameters and log-probs from some reshaped output.
Args:
logits (SingleDataOp): The (already reshaped) logits.
Returns:
tuple (2x SingleDataOp):
probabilities (DataOp): The probabilities after softmaxing the logits.
log_probs (DataOp): Simply the log(probabilities).
"""
if get_backend() == "tf":
# Translate logits into probabilities in a save way (SMALL_NUMBER trick).
probabilities = tf.maximum(x=tf.nn.softmax(logits=logits, axis=-1), y=SMALL_NUMBER)
# Log probs.
log_probs = tf.log(x=probabilities)
return probabilities, log_probs
|
pri=[{'Bachelor of Science in Mathematics<br>\n(General Mathematics Option)': [{'': []}, {'Required Subjects': [{'Departmental Program': '18.03', 'type': 'required'}]}, {'Restricted Electives': [{'Departmental Program': 'Select eight 12-unit subjects of essentially different content, including at least six advanced subjects (first decimal digit one or higher) that are distributed over at least three distinct areas (at least three distinct first decimal digits). One of these eight subjects must be one of the following:', 'type': 'required'}, {'Departmental Program': '18.06', 'type': 'required'}, {'Departmental Program': '18.700', 'type': 'required'}, {'Departmental Program': '18.701', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'Unrestricted Electives', 'type': 'required'}, {'Departmental Program': 'Units in Major That Also Satisfy the GIRs', 'type': 'required'}, {'Departmental Program': 'Total Units Beyond the GIRs Required for SB Degree', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'To satisfy the requirement that students take two CI-M subjects, students must select one of the following options:', 'type': 'required'}]}, {'Option A': [{'Departmental Program': 'Select two of the following:', 'type': 'selection'}, {'Departmental Program': '18.104', 'type': 'required'}, {'Departmental Program': '18.204', 'type': 'required'}, {'Departmental Program': '18.384', 'type': 'required'}, {'Departmental Program': '18.424', 'type': 'required'}, {'Departmental Program': '18.434', 'type': 'required'}, {'Departmental Program': '18.504', 'type': 'required'}, {'Departmental Program': '18.704', 'type': 'required'}, {'Departmental Program': '18.784', 'type': 'required'}, {'Departmental Program': '18.821', 'type': 'required'}, {'Departmental Program': '18.904', 'type': 'required'}, {'Departmental Program': '18.994', 'type': 'required'}]}, {'Option B': [{'Departmental Program': 'Select one subject from Option A and one of the following:', 'type': 'selection'}, {'Departmental Program': '8.06', 'type': 'required'}, {'Departmental Program': '14.33', 'type': 'required'}, {'Departmental Program': '18.100P', 'type': 'required'}, {'Departmental Program': '18.100Q', 'type': 'required'}, {'Departmental Program': '18.200', 'type': 'required'}]}]}, {'Bachelor of Science in Mathematics<br>\n(Applied Mathematics Option)': [{'': []}, {'Required Subjects': [{'Departmental Program': '18.03', 'type': 'required'}, {'Departmental Program': '18.04', 'type': 'required'}, {'Departmental Program': 'or\xa018.112', 'type': 'required'}, {'Departmental Program': '18.06', 'type': 'required'}, {'Departmental Program': '18.300', 'type': 'required'}, {'Departmental Program': 'Select one of the following:', 'type': 'selection'}, {'Departmental Program': '18.200', 'type': 'required'}, {'Departmental Program': '18.200A', 'type': 'required'}]}, {'Restricted Electives': [{'Departmental Program': 'Select four additional 12-unit Course 18 subjects from the following two groups with at least one subject from each group: 3', 'type': 'required'}, {'Departmental Program': 'Group I—Probability and statistics, combinatorics, computer science', 'type': 'required'}, {'Departmental Program': 'Group II—Numerical analysis, physical mathematics, nonlinear dynamics', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'Unrestricted Electives', 'type': 'required'}, {'Departmental Program': 'Units in Major That Also Satisfy the GIRs', 'type': 'required'}, {'Departmental Program': 'Total Units Beyond the GIRs Required for SB Degree', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'To satisfy the requirement that students take two CI-M subjects, students must select one of the following options:', 'type': 'required'}]}, {'Option A': [{'Departmental Program': 'Select two of the following:', 'type': 'selection'}, {'Departmental Program': '18.104', 'type': 'required'}, {'Departmental Program': '18.204', 'type': 'required'}, {'Departmental Program': '18.384', 'type': 'required'}, {'Departmental Program': '18.424', 'type': 'required'}, {'Departmental Program': '18.434', 'type': 'required'}, {'Departmental Program': '18.504', 'type': 'required'}, {'Departmental Program': '18.704', 'type': 'required'}, {'Departmental Program': '18.784', 'type': 'required'}, {'Departmental Program': '18.821', 'type': 'required'}, {'Departmental Program': '18.904', 'type': 'required'}, {'Departmental Program': '18.994', 'type': 'required'}]}, {'Option B': [{'Departmental Program': 'Select one subject from Option A and one of the following:', 'type': 'selection'}, {'Departmental Program': '8.06', 'type': 'required'}, {'Departmental Program': '14.33', 'type': 'required'}, {'Departmental Program': '18.100P', 'type': 'required'}, {'Departmental Program': '18.100Q', 'type': 'required'}, {'Departmental Program': '18.200', 'type': 'required'}]}]}, {'Bachelor of Science in Mathematics<br>\n(Pure Mathematics Option)': [{'': []}, {'Required Subjects': [{'Departmental Program': '18.03', 'type': 'required'}, {'Departmental Program': '18.100B', 'type': 'required'}, {'Departmental Program': '18.701', 'type': 'required'}, {'Departmental Program': '18.702', 'type': 'required'}, {'Departmental Program': '18.901', 'type': 'required'}]}, {'Restricted Electives': [{'Departmental Program': 'Select one of the following:', 'type': 'selection'}, {'Departmental Program': '18.101', 'type': 'required'}, {'Departmental Program': '18.102', 'type': 'required'}, {'Departmental Program': '18.103', 'type': 'required'}, {'Departmental Program': 'Select one undergraduate seminar from the following:', 'type': 'selection'}, {'Departmental Program': '18.104', 'type': 'required'}, {'Departmental Program': '18.504', 'type': 'required'}, {'Departmental Program': '18.704', 'type': 'required'}, {'Departmental Program': '18.784', 'type': 'required'}, {'Departmental Program': '18.904', 'type': 'required'}, {'Departmental Program': '18.994', 'type': 'required'}, {'Departmental Program': 'Select two additional 12-unit Course 18 subjects of essentially different content, with the first decimal digit one or higher', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'Unrestricted Electives', 'type': 'required'}, {'Departmental Program': 'Units in Major That Also Satisfy the GIRs', 'type': 'required'}, {'Departmental Program': 'Total Units Beyond the GIRs Required for SB Degree', 'type': 'required'}]}, {'Units in Major': [{'Departmental Program': 'To satisfy the requirement that students take two CI-M subjects, students must select one of the following options:', 'type': 'required'}]}, {'Option A': [{'Departmental Program': 'Select two of the following:', 'type': 'selection'}, {'Departmental Program': '18.104', 'type': 'required'}, {'Departmental Program': '18.204', 'type': 'required'}, {'Departmental Program': '18.384', 'type': 'required'}, {'Departmental Program': '18.424', 'type': 'required'}, {'Departmental Program': '18.434', 'type': 'required'}, {'Departmental Program': '18.504', 'type': 'required'}, {'Departmental Program': '18.704', 'type': 'required'}, {'Departmental Program': '18.784', 'type': 'required'}, {'Departmental Program': '18.821', 'type': 'required'}, {'Departmental Program': '18.904', 'type': 'required'}, {'Departmental Program': '18.994', 'type': 'required'}]}, {'Option B': [{'Departmental Program': 'Select one subject from Option A and one of the following:', 'type': 'selection'}, {'Departmental Program': '8.06', 'type': 'required'}, {'Departmental Program': '14.33', 'type': 'required'}, {'Departmental Program': '18.100P', 'type': 'required'}, {'Departmental Program': '18.100Q', 'type': 'required'}, {'Departmental Program': '18.200', 'type': 'required'}]}]}]
print(pri[0]["""Bachelor of Science in Mathematics<br>\n(General Mathematics Option)"""][3])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.