content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
"""
A simple example, have fun!
"""
__title__ = 'pgrsearch'
__author__ = 'Ex_treme'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018, Ex_treme'
from TEDT import TEDT
urls = [
'http://www.cankaoxiaoxi.com/china/20170630/2158196.shtml', # 参考消息
'http://news.ifeng.com/a/20180121/55332303_0.shtml', # 凤凰资讯
'http://china.huanqiu.com/article/2018-01/11541273.html', # 环球网
'http://news.china.com/socialgd/10000169/20180122/31990621.html', # 中华网
'http://www.thepaper.cn/newsDetail_forward_1962275', # 澎湃新闻
# 'http://news.szu.edu.cn/info/1003/4989.htm', # 深圳大学新闻网
'http://www16.zzu.edu.cn/msgs/vmsgisapi.dll/onemsg?msgid=1712291126498126051', # 郑州大学新闻网
'http://news.ruc.edu.cn/archives/194824', # 人民大学新闻网
'http://xinwen.ouc.edu.cn/Article/Class3/xwlb/2018/01/22/82384.html', # 中国海洋大学新闻网
'http://news.sjtu.edu.cn/info/1002/1645201.htm', # 上海交通大学新闻网
]
for url in urls:
t = TEDT(url, LOG_LEVEL='INFO',)
t.ie()
|
try:
from mushroom_rl.environments.dm_control_env import DMControl
import numpy as np
def test_dm_control():
np.random.seed(1)
mdp = DMControl('hopper', 'hop', 1000, .99, task_kwargs={'random': 1})
mdp.reset()
for i in range(10):
ns, r, ab, _ = mdp.step(
np.random.rand(mdp.info.action_space.shape[0]))
ns_test = np.array([-0.26244546, -2.33917271, 0.50130095, -0.50937527,
0.55561752, -0.21111919, -0.55516933, -2.03929087,
-18.22893801, 5.89523326, 22.07483625, -2.21756007,
3.95695223, 0., 0.])
assert np.allclose(ns, ns_test)
except ImportError:
pass
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from rally.plugins.openstack.context.designate import zones
from tests.unit import test
CTX = "rally.plugins.openstack.context"
SCN = "rally.plugins.openstack.scenarios"
class ZoneGeneratorTestCase(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def test_init(self):
self.context.update({
"config": {
"zones": {
"zones_per_tenant": 5,
}
}
})
inst = zones.ZoneGenerator(self.context)
self.assertEqual(inst.config, self.context["config"]["zones"])
@mock.patch("%s.designate.utils.DesignateScenario._create_zone" % SCN,
return_value={"id": "uuid"})
def test_setup(self, mock_designate_scenario__create_zone):
tenants_count = 2
users_per_tenant = 5
zones_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"zones": {
"zones_per_tenant": zones_per_tenant,
}
},
"admin": {
"credential": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
new_context = copy.deepcopy(self.context)
for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("zones", [])
for i in range(zones_per_tenant):
new_context["tenants"][id_]["zones"].append({"id": "uuid"})
zones_ctx = zones.ZoneGenerator(self.context)
zones_ctx.setup()
self.assertEqual(new_context, self.context)
@mock.patch("%s.designate.zones.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
tenants_count = 2
users_per_tenant = 5
zones_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": "endpoint"})
tenants[id_].setdefault("zones", [])
for j in range(zones_per_tenant):
tenants[id_]["zones"].append({"id": "uuid"})
self.context.update({
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"zones": {
"zones_per_tenant": 5,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
zones_ctx = zones.ZoneGenerator(self.context)
zones_ctx.cleanup()
mock_cleanup.assert_called_once_with(names=["designate.zones"],
users=self.context["users"])
|
from django.db import models
from home.info_holder import get_release_type_id
from home.models import WhatTorrent
RELEASE_PRIORITIES = [
(get_release_type_id('Album'), 1000),
(get_release_type_id('EP'), 990),
(get_release_type_id('Soundtrack'), 986),
# (get_release_type_id('Single'), 985),
(get_release_type_id('Live album'), 980),
]
def get_priority(release_type):
for priority in RELEASE_PRIORITIES:
if priority[0] == int(release_type):
return priority[1]
return None
def filter_group(artist_name, group):
if get_priority(group['releaseType']) is None:
return False
if not group['artists']:
return False
if not any(a['name'] == artist_name for a in group['artists']):
return False
return True
def filter_torrent(group, torrent):
if torrent['format'].lower() != 'flac':
return False
if torrent['media'].lower() not in ['cd', 'web']:
return False
return get_priority(group['releaseType'])
def is_existing(what_id):
try:
QueueItem.objects.get(what_id=what_id)
return True
except QueueItem.DoesNotExist:
return WhatTorrent.is_downloaded(None, what_id=what_id)
class QueueItem(models.Model):
class Meta:
permissions = (
('view_queueitem', 'Can view the queue.'),
)
datetime_added = models.DateTimeField(auto_now_add=True)
what_id = models.IntegerField()
priority = models.IntegerField()
artist = models.TextField()
title = models.TextField()
release_type = models.IntegerField()
format = models.TextField()
encoding = models.TextField()
torrent_size = models.BigIntegerField()
@classmethod
def get_front(cls):
items = QueueItem.objects.order_by('-priority', 'datetime_added')[:1]
if len(items):
return items[0]
return None
|
from .Bible import Book, Verse
from .Bible_Parser_Base import BibleParserBase
import xml.etree.ElementTree as ET
import os.path
class BibleParserXML(BibleParserBase):
name = "XML"
fileEndings = ["xml"]
def __init__(self, file_name):
BibleParserBase.__init__(self, file_name)
def loadInfo(self):
self.bible.translationAbbreviation = os.path.basename(self.file_name)
self.bible.translationName = os.path.basename(self.file_name)
def loadAll(self):
tree = ET.ElementTree(file=self.file_name)
books = tree.getroot()
for book in books:
b_number = int(book.attrib["bnumber"])
self.bible.append(Book(book.attrib["bname"], b_number))
for chapter in book:
ch_number = int(chapter.attrib["cnumber"])
for verse in chapter:
v_number = int(verse.attrib["vnumber"])
self.bible.addVerse(
Verse(b_number, ch_number, v_number, verse.text))
|
num=float(input("Enter a number:"))
if num<0: print("\nNumber entered is negative.")
elif num>0: print("\nNumber entered is positive.")
else: print("\nNumber entered is zero.")
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import os, random
from jenkinsflow import jobload
from .framework import api_select
here = os.path.abspath(os.path.dirname(__file__))
_context = dict(
exec_time=1,
params=(),
script=None,
securitytoken='abc',
print_env=False,
create_job=None,
num_builds_to_keep=4,
final_result_use_cli=False,
set_build_descriptions=()
)
def _random_job_name(api, short_name=None):
# If short_name is not specified, use a random name to make sure the job doesn't exist
short_name = short_name or str(random.random()).replace('.', '')
return api.job_name_prefix + short_name, short_name
def _assert_job(api, job_name, cleanup=False):
job = api.get_job(job_name)
assert job is not None
assert job.name == job_name
assert job.public_uri is not None and job_name in job.public_uri
if cleanup:
api.delete_job(job_name)
return None
return job
def test_job_load_new_no_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=False, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_new_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_existing_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=False)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load__existing_update(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
_assert_job(api, full_name, cleanup=False)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=False, context=_context)
_assert_job(api, full_name, cleanup=True)
def test_job_load_non_existing_pre_delete(api_type):
api = api_select.api(__file__, api_type, login=True)
full_name, short_name = _random_job_name(api)
api.job(short_name, 1, 1, 1, exec_time=1, non_existing=True)
jobload.update_job_from_template(api, full_name, api.job_xml_template, pre_delete=True, context=_context)
|
from datetime import datetime
from app import database as db
class ChatroomMessages(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
chatroomID = db.Column(db.Integer)
message = db.Column(db.String(10050), nullable = False)
sentUserID = db.Column(db.Integer, nullable=False)
timestamp = db.Column(db.DateTime, nullable=False)
def addChatroomMessage(cchatroomID, cmessage, csentUserID, ctimestamp):
cr_message = ChatroomMessages(chatroomID = cchatroomID, message = cmessage, sentUserID = csentUserID, timestamp = ctimestamp)
db.session.add(cr_message)
db.session.commit()
return True
def getChatroomMessages(cid):
return ChatroomMessages.query.filter_by(chatroomID=cid)
db.create_all() #Call this before doing any database stuff
|
from pathlib import Path
import nibabel as nib
from skimage.transform import resize
from PIL import ImageOps
import tensorflow as tf
import numpy as np
from scipy import ndimage
class TensorflowDatasetLoader:
def __init__(self, idxs, config):
depth = config['depth']
height = config['height']
width = config['width']
batch_size = config['batch_size']
lowest = config['lowest']
last = config['last']
use_label = config['use_atlas']
self.label_classes = config['label_classes']
self.use_affine = config['use_affine']
self.use_def = config['use_def']
self.idxs = idxs
self.crop_img = False
self.input_dim = (depth,height,width)
output_shape = []
output_types = []
for i in range(1, lowest - last + 2):
l = lowest + 1 - i
x_dim = depth // (2**l)
y_dim = height // (2**l)
z_dim = width // (2**l)
if self.use_affine:
output_shape.append((x_dim, y_dim, z_dim,3)) # affine
output_types.append(tf.float32)
if self.use_def:
output_shape.append((x_dim, y_dim, z_dim,3)) # deformable
output_types.append(tf.float32)
output_shape.append((x_dim, y_dim, z_dim,2)) # image sim placeholder
output_types.append(tf.float32)
if self.use_affine:
output_shape.append((*self.input_dim,3)) # affine
output_types.append(tf.float32)
if self.use_def:
output_shape.append((*self.input_dim,3)) # deformable
output_types.append(tf.float32)
output_shape.append((*self.input_dim,1)) # image sim
output_types.append(tf.float32)
input_types = [tf.float32, tf.float32]
input_shape = [(*self.input_dim,1),(*self.input_dim,1)]
if use_label:
output_shape.append((*self.input_dim, self.label_classes)) # seg
output_types.append(tf.float32)
input_types.append(tf.float32)
input_shape.append((*self.input_dim, self.label_classes))
input_data = tf.data.Dataset.from_generator(
self.generator(self.input_dim, lowest, last, idxs, True),
tuple(input_types),
tuple(input_shape))
output_data = tf.data.Dataset.from_generator(
self.generator(self.input_dim, lowest, last, idxs, False),
tuple(output_types),
output_shapes = tuple(output_shape))
dataset = tf.data.Dataset.zip((input_data, output_data))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(buffer_size=50, reshuffle_each_iteration=True)
dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
self.dataset = dataset
def _get_org_data(self, idx):
f_f, fl_f, f_r, f_r_ax, m_f, ml_f, m_r, m_r_ax, flip_axes, crop_img, task = TensorflowDatasetLoader.parse_idx(idx)
fixed = nib.load(f_f).get_fdata()
moving = nib.load(m_f).get_fdata()
if fl_f is not None and ml_f is not None:
fixed_label = nib.load(fl_f).get_fdata().astype('int')
#fixed_label = tf.one_hot(fixed_label, np.max(fixed_label))
moving_label = nib.load(ml_f).get_fdata().astype('int')
#moving_label = tf.one_hot(moving_label, np.max(moving_label))
else:
fixed_label, moving_label = None, None
return [fixed, moving], [fixed_label, moving_label]
def _get_train_samples(self, idx):
use_atlas = True
f_f, fl_f, f_r, f_r_ax, m_f, ml_f, m_r, m_r_ax, flip_axes, crop_img, task = TensorflowDatasetLoader.parse_idx(idx)
fixed = nib.load(f_f).get_fdata()
moving = nib.load(m_f).get_fdata()
if crop_img is not None:
fixed = TensorflowDatasetLoader.crop(fixed, crop_img)
moving = TensorflowDatasetLoader.crop(moving, crop_img)
fixed = TensorflowDatasetLoader.img_augmentation(fixed, f_r, f_r_ax, flip_axes)
moving = TensorflowDatasetLoader.img_augmentation(moving, m_r, m_r_ax, flip_axes)
if task in [2,3]:
fixed, moving = TensorflowDatasetLoader.normalize(fixed, moving)
else:
fixed = TensorflowDatasetLoader.normalize(fixed)
moving = TensorflowDatasetLoader.normalize(moving)
fixed = resize(fixed, self.input_dim, preserve_range=True, mode='constant')[...,None].astype('float32')
moving = resize(moving, self.input_dim, preserve_range=True, mode='constant')[...,None].astype('float32')
assert not np.any(np.isnan(fixed)), f_f
assert not np.any(np.isnan(moving)), m_f
if fl_f is not None and ml_f is not None:
fixed_label = self._get_label(fl_f, crop_img, f_r, f_r_ax, flip_axes)
moving_label = self._get_label(ml_f, crop_img, m_r, m_r_ax, flip_axes)
else:
use_atlas = False
fixed_label, moving_label = None, None
images = [fixed, moving]
if use_atlas:
labels = [fixed_label, moving_label]
out = images, labels
else:
out = images, None
return out
def get_one_hot(targets, nb_classes):
res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
return res.reshape(list(targets.shape)+[nb_classes])
def _get_label(self, file, crop_img, rot_angle, rot_ax, flip_axes):
label = nib.load(file).get_fdata().astype('int')
if crop_img is not None:
label = TensorflowDatasetLoader.crop(label, crop_img)
label = TensorflowDatasetLoader.img_augmentation(label, rot_angle, rot_ax, flip_axes)
assert not np.any(np.isnan(label)), file
label = TensorflowDatasetLoader.get_one_hot(label, self.label_classes)
label = np.round(resize(label, self.input_dim, preserve_range=True, mode='constant')).astype('float32')
return label
def _get_input_samples(self, idx):
use_atlas = True
f_f, _, f_r, f_r_ax, m_f, ml_f, m_r, m_r_ax, flip_axes, crop_img, task = TensorflowDatasetLoader.parse_idx(idx)
fixed = nib.load(f_f).get_fdata()
moving = nib.load(m_f).get_fdata()
if crop_img is not None:
fixed = TensorflowDatasetLoader.crop(fixed, crop_img)
moving = TensorflowDatasetLoader.crop(moving, crop_img)
fixed = TensorflowDatasetLoader.img_augmentation(fixed, f_r, f_r_ax, flip_axes)
moving = TensorflowDatasetLoader.img_augmentation(moving, m_r, m_r_ax, flip_axes)
if task in [2,3]:
fixed, moving = TensorflowDatasetLoader.normalize(fixed, moving)
else:
fixed = TensorflowDatasetLoader.normalize(fixed)
moving = TensorflowDatasetLoader.normalize(moving)
fixed = resize(fixed, self.input_dim, preserve_range=True, mode='constant')[...,None].astype('float32')
moving = resize(moving, self.input_dim, preserve_range=True, mode='constant')[...,None].astype('float32')
assert not np.any(np.isnan(fixed)), f_f
assert not np.any(np.isnan(moving)), m_f
if ml_f is not None:
moving_label = self._get_label(ml_f, crop_img, m_r, m_r_ax, flip_axes)
else:
use_atlas = False
moving_label = None
images = [fixed, moving]
if use_atlas:
out = images, moving_label
else:
out = images, None
return out
def _get_output_samples(self, idx):
use_atlas = True
f_f, fl_f, f_r, f_r_ax, _, _, _, _, flip_axes, crop_img, task = TensorflowDatasetLoader.parse_idx(idx)
fixed = nib.load(f_f).get_fdata()
if crop_img is not None:
fixed = TensorflowDatasetLoader.crop(fixed, crop_img)
fixed = TensorflowDatasetLoader.img_augmentation(fixed, f_r, f_r_ax, flip_axes)
fixed = TensorflowDatasetLoader.normalize(fixed)
fixed = resize(fixed, self.input_dim, preserve_range=True, mode='constant')[...,None].astype('float32')
assert not np.any(np.isnan(fixed)), f_f
if fl_f is not None:
fixed_label = self._get_label(fl_f, crop_img, f_r, f_r_ax, flip_axes)
else:
use_atlas = False
fixed_label = None
if use_atlas:
out = fixed, fixed_label
else:
out = fixed, None
return out
def generator(self, input_dim, lowest, last, idxs, use_input = True):
def gen():
for i in range(len(idxs)):
if use_input:
images, moving_label = self._get_input_samples(idxs[i])
inputs = [images[0], images[1]]
if moving_label is not None:
inputs.append(moving_label)
yield tuple(inputs)
else:
fixed, fixed_label = self._get_output_samples(idxs[i])
out = []
zeros = np.zeros((*input_dim, 3), dtype='float32')
l = 0
for i in range(1, lowest - last + 2):
l = lowest + 1 - i
out_flow = resize(zeros[...,0], tuple([x//(2**l) for x in input_dim]), mode='constant')[...,None]
out_flow1 = np.repeat(out_flow, 3, axis=-1)
out_flow2 = np.repeat(out_flow, 2, axis=-1)
if self.use_affine:
out.append(out_flow1) # Affine
if self.use_def:
out.append(out_flow1) # Deformable
#DUMMY
out.append(out_flow2) # Placeholder for images sim
if self.use_affine:
out.append(zeros) # Affine
if self.use_def:
out.append(zeros) # Deformable
out.append(fixed) # Image sim
if fixed_label is not None:
out.append(fixed_label) # Seg
yield tuple(out)
return gen
def parse_idx(idx):
fixed = idx[0][0]
fixed_label = idx[0][1]
fixed_rot = idx[0][2][0]
fixed_rot_ax = idx[0][2][1]
moving = idx[1][0]
moving_label = idx[1][1]
moving_rot = idx[1][2][0]
moving_rot_ax = idx[1][2][1]
flip_axes = idx[2]
crop_img = idx[3]
task = idx[4]
return fixed, fixed_label, fixed_rot, fixed_rot_ax, moving, moving_label, moving_rot, moving_rot_ax, flip_axes, crop_img, task
def img_augmentation(img, rot_angle, rot_ax, flip_axes):
if flip_axes > -1:
img = np.flip(img, flip_axes)
if rot_angle != 0:
img = ndimage.rotate(img, rot_angle, axes=(rot_ax[0], rot_ax[1]), mode='nearest', reshape=False)
return img
def normalize(img1, img2 = None):
img1_max = np.max(img1)
img1_min = np.min(img1)
out_1 = (img1-img1_min)/(img1_max-img1_min)
if img2 is not None:
out_2 = (img2-img1_min)/(img1_max-img1_min)
return out_1, out_2
return out_1
def crop(img, c):
img = img[img.shape[0]//c:(c-1)*img.shape[0]//c,
img.shape[1]//c:(c-1)*img.shape[1]//c,
img.shape[2]//c:(c-1)*img.shape[2]//c]
return img
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the relay pyxir frontend"""
import unittest
import numpy as np
# ! To import tvm
import pyxir
try:
import tvm
from tvm import relay
from tvm.relay import testing
skip = False
except Exception as e:
# Skip TVM tests
skip = True
if not skip:
from pyxir.frontend.tvm import relay as xf_relay
class TestRelayFrontend(unittest.TestCase):
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_simple_network(self):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
simple_net = relay.nn.conv2d(
data=simple_net,
weight=weight,
kernel_size=(3, 3),
channels=16,
padding=(0, 0),
)
simple_net = relay.nn.batch_norm(
simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
simple_net = relay.nn.relu(simple_net)
simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3))
simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 16))
dense_weight = relay.var("dense_weight")
dense_bias = relay.var("dense_bias")
simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10)
simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1)
simple_net = relay.nn.softmax(simple_net, axis=1)
simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 10))
func = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Pad"
assert layers[2].type[0] == "Convolution"
assert layers[3].type[0] == "BatchNorm"
assert layers[4].type[0] == "ReLU"
assert layers[5].type[0] == "Mean"
assert layers[6].type[0] == "Reshape"
assert layers[7].type[0] == "Dense"
assert layers[8].type[0] == "BiasAdd"
assert layers[9].type[0] == "Softmax"
assert layers[10].type[0] == "Reshape"
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_simple_network_cvx(self):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
simple_net = relay.nn.conv2d(
data=simple_net,
weight=weight,
kernel_size=(3, 3),
channels=16,
padding=(0, 0),
)
simple_net = relay.nn.relu(simple_net)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(
mod, params, cvx_preprocessing={"data": "scale-0.5__transpose-2,0,1"}
)
assert len(xgraph.get_input_names()) == 1
layers = xgraph.get_layers()
# assert layers[0].type[0] == "Constant"
assert layers[0].type[0] == "StrInput"
assert layers[0].shapes == [-1]
assert layers[1].type[0] == "Cvx"
assert layers[1].shapes == [-1, 3, 224, 224]
assert layers[2].type[0] == "Pad"
assert layers[3].type[0] == "Convolution"
assert layers[4].type[0] == "ReLU"
assert layers[0].tops == ["data_cvx"]
assert layers[1].bottoms == ["data"]
assert layers[1].tops[0][:7] == "nn.pad-"
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_conv2d_transpose(self):
data = relay.var("data", relay.TensorType((-1, 1, 3, 3), "float32"))
weight = relay.var("weight")
simple_net = relay.nn.conv2d_transpose(
data=data,
weight=weight,
kernel_size=(2, 2),
channels=1,
padding=(0, 0),
strides=(2, 2),
data_layout="NCHW",
kernel_layout="IOHW",
)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[0].shapes == [-1, 1, 3, 3]
assert layers[1].type[0] == "Conv2DTranspose"
assert layers[1].shapes == [-1, 1, 6, 6]
assert layers[1].sizes == [36]
assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
assert layers[1].attrs["strides"] == [2, 2]
assert layers[1].attrs["dilation"] == [1, 1]
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_resnet_block(self):
data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv2d0_expr = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn0_expr = relay.nn.batch_norm(
conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
relu0_expr = relay.nn.relu(bn0_expr)
max_pool0_expr = relay.nn.max_pool2d(
relu0_expr, pool_size=(2, 2), strides=(2, 2)
)
conv2d1_weight = relay.var("conv2d1_weight")
conv2d1_bias = relay.var("conv2d1_bias")
conv2d1_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d1_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
)
bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
relu1_expr = relay.nn.relu(bias_add0_expr)
add0_expr = relay.op.tensor.add(max_pool0_expr, relu1_expr)
avg_pool0_expr = relay.nn.avg_pool2d(
add0_expr, pool_size=(2, 2), strides=(2, 2)
)
global_avg_pool0_expr = relay.op.nn.global_avg_pool2d(avg_pool0_expr)
bf_expr = relay.nn.batch_flatten(global_avg_pool0_expr)
net = avg_pool0_expr
net = relay.Function(relay.analysis.free_vars(net), net)
mod, params = testing.create_workload(net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Convolution"
assert layers[2].type[0] == "BatchNorm"
assert layers[3].type[0] == "ReLU"
assert layers[4].type[0] == "Pooling"
assert layers[5].type[0] == "Convolution"
assert layers[6].type[0] == "BiasAdd"
assert layers[7].type[0] == "ReLU"
assert layers[8].type[0] == "Eltwise"
assert layers[9].type[0] == "Pooling"
assert layers[9].shapes == [-1, 16, 56, 56]
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_inception_block(self):
data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv2d0_expr = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn0_expr = relay.nn.batch_norm(
conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
relu0_expr = relay.nn.relu(bn0_expr)
max_pool0_expr = relay.nn.max_pool2d(
relu0_expr, pool_size=(2, 2), strides=(2, 2)
)
conv2d1_weight = relay.var("conv2d1_weight")
conv2d1_bias = relay.var("conv2d1_bias")
conv2d1_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d1_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
strides=(2, 2),
)
bias_add1_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
relu1_expr = relay.nn.relu(bias_add1_expr)
conv2d2_weight = relay.var("conv2d2_weight")
conv2d2_bias = relay.var("conv2d2_bias")
conv2d2_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d2_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
strides=(2, 2),
)
bias_add2_expr = relay.nn.bias_add(conv2d2_expr, conv2d2_bias, axis=1)
relu2_expr = relay.nn.relu(bias_add2_expr)
concat0_expr = relay.op.tensor.concatenate([relu1_expr, relu2_expr], axis=1)
global_max_pool0_expr = relay.op.nn.global_max_pool2d(concat0_expr)
net = global_max_pool0_expr
net = relay.Function(relay.analysis.free_vars(net), net)
mod, params = testing.create_workload(net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Convolution"
assert layers[2].type[0] == "BatchNorm"
assert layers[3].type[0] == "ReLU"
assert layers[4].type[0] == "Pooling"
assert layers[5].type[0] == "Convolution"
assert layers[6].type[0] == "BiasAdd"
assert layers[7].type[0] == "ReLU"
assert layers[8].type[0] == "Convolution"
assert layers[9].type[0] == "BiasAdd"
assert layers[10].type[0] == "ReLU"
assert layers[11].type[0] == "Concat"
assert layers[12].type[0] == "Pooling"
assert layers[12].shapes == [-1, 32, 1, 1]
|
from tartiflette import Directive
from tartiflette.types.exceptions.tartiflette import SkipExecution
class Skip:
async def on_field_execution(
self, directive_args, next_resolver, parent_result, args, ctx, info
):
if directive_args["if"]:
raise SkipExecution()
return await next_resolver(parent_result, args, ctx, info)
def bake(schema_name, _config):
sdl = "directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT"
Directive(name="skip", schema_name=schema_name)(Skip())
return sdl
|
"""
Expect error directives
"""
from __future__ import (
absolute_import,
unicode_literals,
)
from pyparsing import (
Literal,
Optional,
Word,
alphanums,
restOfLine,
)
import six
from pysoa.common.types import Error
from pysoa.test.plan.grammar.assertions import (
assert_actual_list_not_subset,
assert_expected_list_subset_of_actual,
assert_lists_match_any_order,
)
from pysoa.test.plan.grammar.data_types import AnyValue
from pysoa.test.plan.grammar.directive import (
ActionDirective,
register_directive,
)
from pysoa.test.plan.grammar.tools import (
path_get,
path_put,
)
__test_plan_prune_traceback = True # ensure code in this file is not included in failure stack traces
class ActionExpectsNoErrorsDirective(ActionDirective):
"""
Expect that no errors are reported back in the service call response. Any error in either the job response or the
action response will cause this expectation to fail.
"""
@classmethod
def name(cls):
return 'expect_no_errors'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsNoErrorsDirective, cls).get_full_grammar() +
Literal('expect') +
':' +
Literal('no errors')
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
action_case['expects_no_errors'] = True
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if not action_case.get('expects_no_errors', False):
return
if job_response.errors:
raise AssertionError('{}: Expected no job errors, but got: {}'.format(msg or '', job_response.errors))
if action_response.errors:
raise AssertionError('{}: Expected no action errors, but got: {}'.format(msg or '', action_response.errors))
class ActionExpectsErrorsDirective(ActionDirective):
"""
Set expectations that specific errors will (or will not) be in the service response. Any error that that matches
this code, whether or not it has a field or message, will fulfill this expectation.
If ``not`` is used, the absence of the error will be asserted (it negates the expectation exactly). As long as no
error has this code, this expectation will pass.
If ``exact`` is used, then all of the errors you define must match all of the errors in your response, and your
response cannot have any non-matching extra errors. ``exact`` and non-``exact`` are mutually-exclusive
expectations: an action case that has a mixture of ``exact`` and non-``exact`` error expectations will fail. For
each error case, you must use one or the other.
If ``job`` is used, then the job response will be examined for the error instead of the action response.
"""
@classmethod
def name(cls):
return 'expect_error'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsErrorsDirective, cls).get_full_grammar() +
Literal('expect') +
':' +
Optional('not').setResultsName('not') +
Optional('exact')('exact') +
Optional('job')('job') +
Literal('error') +
':' +
Literal('code') +
'=' +
Word(alphanums + '-_')('error_code')
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
path = 'expects_{not_q}{job_q}{exact_q}error'.format(
not_q='not_' if getattr(parse_results, 'not', None) else '',
job_q='job_' if parse_results.job else '',
exact_q='exact_' if parse_results.exact else '',
)
try:
errors = path_get(action_case, path)
except (KeyError, IndexError):
errors = []
path_put(action_case, path, errors)
errors.append(Error(
code=parse_results.error_code,
message=getattr(parse_results, 'error_message', None) or AnyValue('str'),
field=getattr(parse_results, 'field_name', None) or AnyValue('str', permit_none=True),
traceback=AnyValue('str', permit_none=True),
variables=AnyValue('list', permit_none=True),
))
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
for instruction, expected in six.iteritems(action_case):
if instruction.startswith('expects_') and instruction.endswith('_error'):
target = action_response
if '_job_' in instruction:
target = job_response
errors = target.errors if target else []
try:
if '_not_' in instruction:
assert_actual_list_not_subset(expected, errors, 'NOT EXPECTED ERRORS: {}'.format(msg or ''))
elif '_exact_' in instruction:
assert_lists_match_any_order(expected, errors, 'EXPECTED EXACT ERRORS: {}'.format(msg or ''))
else:
assert_expected_list_subset_of_actual(expected, errors, 'EXPECTED ERRORS: {}'.format(msg or ''))
except AssertionError as e:
for error in errors:
if error.code == 'SERVER_ERROR':
raise type(e)(
'{message}\n\nSERVER_ERROR: {detail}'.format(
message=e.args[0],
detail=error.message,
),
)
raise
class ActionExpectsFieldErrorsDirective(ActionExpectsErrorsDirective):
"""
Set expectations that specific errors will (or will not) be in the service response. Any error that that matches
this code *and* field, whether or not it has a message value, will fulfill this expectation.
If ``not`` is used, the absence of the error will be asserted (it negates the expectation exactly). As long as no
error has this code *and* field (even if some errors have this code and other errors have this field), this
expectation will pass.
If ``exact`` is used, then all of the errors you define must match all of the errors in your response, and your
response cannot have any non-matching extra errors. ``exact`` and non-``exact`` are mutually-exclusive
expectations: an action case that has a mixture of ``exact`` and non-``exact`` error expectations will fail. For
each error case, you must use one or the other.
If ``job`` is used, then the job response will be examined for the error instead of the action response.
"""
@classmethod
def name(cls):
return 'expect_error_field'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldErrorsDirective, cls).get_full_grammar() +
',' +
Literal('field') +
'=' +
Word(alphanums + '-_.{}[]')('field_name')
)
class ActionExpectsMessageErrorsDirective(ActionExpectsErrorsDirective):
"""
Set expectations that specific errors will (or will not) be in the service response. Any error that that matches
this code *and* message, whether or not it has a field value, will fulfill this expectation.
If ``not`` is used, the absence of the error will be asserted (it negates the expectation exactly). As long as no
error has this code *and* message (even if some errors have this code and other errors have this message), this
expectation will pass.
If ``exact`` is used, then all of the errors you define must match all of the errors in your response, and your
response cannot have any non-matching extra errors. ``exact`` and non-``exact`` are mutually-exclusive
expectations: an action case that has a mixture of ``exact`` and non-``exact`` error expectations will fail. For
each error case, you must use one or the other.
If ``job`` is used, then the job response will be examined for the error instead of the action response.
"""
@classmethod
def name(cls):
return 'expect_error_message'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsMessageErrorsDirective, cls).get_full_grammar() +
',' +
Literal('message') +
'=' +
restOfLine('error_message').setParseAction(lambda s, l, t: t[0].strip(' \t'))
)
class ActionExpectsFieldMessageErrorsDirective(ActionExpectsFieldErrorsDirective):
"""
Set expectations that specific errors will (or will not) be in the service response. Any error that that matches
this code, field, *and* message will fulfill this expectation.
If ``not`` is used, the absence of the error will be asserted (it negates the expectation exactly). As long as no
error has this code, field, *and* message, this expectation will pass.
If ``exact`` is used, then all of the errors you define must match all of the errors in your response, and your
response cannot have any non-matching extra errors. ``exact`` and non-``exact`` are mutually-exclusive
expectations: an action case that has a mixture of ``exact`` and non-``exact`` error expectations will fail. For
each error case, you must use one or the other.
If ``job`` is used, then the job response will be examined for the error instead of the action response.
"""
@classmethod
def name(cls):
return 'expect_error_field_message'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldMessageErrorsDirective, cls).get_full_grammar() +
',' +
Literal('message') +
'=' +
restOfLine('error_message').setParseAction(lambda s, l, t: t[0].strip(' \t'))
)
# This order is very important; do not disturb
register_directive(ActionExpectsFieldMessageErrorsDirective)
register_directive(ActionExpectsMessageErrorsDirective)
register_directive(ActionExpectsFieldErrorsDirective)
register_directive(ActionExpectsErrorsDirective)
register_directive(ActionExpectsNoErrorsDirective)
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import argparse
from lib.processor import TaskProcessor
from lib.processor import ServiceProcessor
from lib.pluginfactory import EmitterPlgFact
from lib.pluginfactory import SpacePluginFactory
import lib.constants as c
from lib.tools import Tools
class YaptSvc(object):
if __name__ == '__main__':
Tools.create_config_view(c.CONFIG_TYPE_MAIN)
EmitterPlgFact()
parser = argparse.ArgumentParser()
parser.add_argument("amqpIp", help="provide amqp bus ip")
args = parser.parse_args()
c.conf.AMQP.Host = args.amqpIp
if c.conf.JUNOSSPACE.Enabled:
spf = SpacePluginFactory(c.conf.JUNOSSPACE.Version)
c.SRC = spf.init_plugin()
for item in range(c.conf.YAPT.WorkerThreads):
taskprocessor = TaskProcessor(target=TaskProcessor, name=c.AMQP_PROCESSOR_TASK + str(item),
args=(c.conf.AMQP.Exchange, c.conf.AMQP.Type, c.AMQP_PROCESSOR_TASK,
c.AMQP_PROCESSOR_TASK,))
taskprocessor.start()
serviceprocessor = ServiceProcessor(target=ServiceProcessor, name=c.AMQP_PROCESSOR_SVC,
args=(c.conf.AMQP.Exchange, c.conf.AMQP.Type,
c.AMQP_PROCESSOR_SVC,
c.AMQP_PROCESSOR_SVC,))
serviceprocessor.start()
|
# Smallest Multiple -- Solved
# Solution = 232792560
import math
from Timer import Timer
Stopwatch = Timer()
def LCM(n,p):
return (n*p) // math.gcd(n,p)
Solution = 1
for i in range(2, 21):
Solution = LCM(Solution, i)
print('The least common multiple of all of the numbers from 1 to 20 is: ', Solution)
Stopwatch.stop()
|
import imghdr
import string
import random
from flask import abort
import os
from werkzeug.utils import secure_filename
from app.config import Config
def validate_image(stream):
header = stream.read(512)
stream.seek(0)
format = imghdr.what(None, header)
if not format:
return None
return '.' + (format if format != 'jpeg' else 'jpg')
def generate_filename(ext):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=20)) + ext
def save_image(photo):
filename = secure_filename(photo.filename)
photo_filename = ''
if filename != '':
file_ext = os.path.splitext(filename)[1].lower()
photo_filename = generate_filename(file_ext)
if file_ext not in Config.UPLOAD_IMAGE_EXTENSIONS or \
file_ext != validate_image(photo.stream):
return False
if not os.path.exists(Config.UPLOAD_IMAGE_PATH):
os.mkdir(os.path.join(Config.UPLOAD_IMAGE_DIR, 'uploads'))
photo.save(os.path.join(Config.UPLOAD_IMAGE_PATH, photo_filename))
return photo_filename
def delete_image(filename):
os.remove(os.path.join(Config.UPLOAD_IMAGE_PATH, filename))
def save_doc(doc):
filename = secure_filename(doc.filename)
doc_filename = ''
if filename != '':
file_ext = os.path.splitext(filename)[1].lower()
doc_filename = generate_filename(file_ext)
if file_ext not in Config.UPLOAD_DOC_EXTENSIONS:
return False
if not os.path.exists(Config.UPLOAD_DOC_PATH):
os.mkdir(os.path.join(Config.UPLOAD_DOC_DIR, 'uploads'))
doc.save(os.path.join(Config.UPLOAD_DOC_PATH, doc_filename))
return doc_filename
def delete_doc(filename):
os.remove(os.path.join(Config.UPLOAD_DOC_PATH, filename))
|
from lib.action import BitBucketAction
class UpdateServiceAction(BitBucketAction):
def run(self, repo, id, url):
"""
Update a service/hook
"""
bb = self._get_client(repo=repo)
success, result = bb.service.update(
service_id=id,
URL=url
)
return result
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import os
import sys
import numpy as np
import argparse
from copy import deepcopy
from torchvision import transforms
from models.vae import ConditionalVAE, ConditionalVAE_conv
from utils import *
import time
def main_LGLvKR_Fine(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print(75 * '=' + '\n' + '|| \t model dir:\t%s\t ||\n' % args.modeldir + 75 * '=')
permutation = np.array(list(range(10)))
target_transform = transforms.Lambda(lambda y, p=permutation: int(p[y]))
train_data = get_dataset(args.dataset, type="train", dir=args.data_dir, target_transform=target_transform,
verbose=False)
test_data = get_dataset(args.dataset, type="test", dir=args.data_dir, target_transform=target_transform,
verbose=False)
classes_per_task = int(np.floor(10 / args.tasks))
labels_per_task = [list(np.array(range(classes_per_task)) + classes_per_task * task_id) for task_id in
range(args.tasks)]
labels_per_task_test = [list(np.array(range(classes_per_task + classes_per_task * task_id))) for task_id in
range(args.tasks)]
sys.stdout = Logger(os.path.join(args.modeldir, '{}_log_{}.txt'.format(args.name,args.op)))
# training
time_cost = 0
for task_i in range(args.tasks):
print(40 * '=' + ' Task %1d ' % (task_i+1) + 40 * '=')
if not os.path.exists(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i)):
train_loader = get_data_loader(
SubDataset(train_data, labels_per_task[task_i], target_transform=None),
args.batch_size,
cuda=True,
drop_last=True)
print("training(n=%5d)..." % len(train_loader.dataset))
if task_i == 0:
if args.dataset == 'mnist' or args.dataset == 'fashion':
cvae = ConditionalVAE(args)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
cvae = ConditionalVAE_conv(args)
print("there are {} params with {} elems in the cvae".format(
len(list(cvae.parameters())), sum(p.numel() for p in cvae.parameters() if p.requires_grad))
)
else:
cvae = torch.load(os.path.join(args.modeldir, args.name + '_%1d.pth' % (task_i - 1)))
print("there are {} params with {} elems in the cvae".format(
len(list(cvae.parameters())), sum(p.numel() for p in cvae.parameters() if p.requires_grad))
)
cvae = cvae.cuda()
optimizer_CVAE = torch.optim.Adam(cvae.parameters(), lr=args.lr, betas=(0.9, 0.999))
start = time.time()
for epoch in range(args.epochs):
loss_log = {'V/loss': 0.0, 'V/loss_rec': 0.0, 'V/loss_var': 0.0}
for x, y in train_loader:
if args.dataset == 'mnist' or args.dataset == 'fashion':
x = x.cuda().view(x.size(0), -1)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
x = x.cuda()
y = torch.eye(args.class_dim)[y].cuda()
x_rec, mu, logvar = cvae(x, y)
loss_rec = torch.nn.MSELoss(reduction='sum')(x, x_rec) / x.size(0)
loss_var = (-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp())) / x.size(0)
loss_cvae = loss_rec + args.alpha_var * loss_var
optimizer_CVAE.zero_grad()
loss_cvae.backward()
optimizer_CVAE.step()
loss_log['V/loss'] += loss_cvae.item()
loss_log['V/loss_rec'] += loss_rec.item()
loss_log['V/loss_var'] += loss_var.item() * args.alpha_var
print('[VAE Epoch%2d]\t V/loss: %.3f\t V/loss_rec: %.3f\t V/loss_var: %.3f'
% (epoch + 1,
loss_log['V/loss'],
loss_log['V/loss_rec'],
loss_log['V/loss_var']))
time_cost += time.time() - start
torch.save(cvae, os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
else:
print(40 * '=' + ' Task %1d ' % (task_i+1) + 40 * '=')
cvae = torch.load(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
cvae.eval()
##################### Test with LPIPS #########################################################################
if args.LPIPS:
caculate_LPIPS(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
##################### Test with FID #########################################################################
if args.fid:
caculate_fid(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
####################### Test with Acc and rAcc ####################################
if args.ACC:
caculate_ACC(args,task_i,train_data,test_data,labels_per_task_test[task_i],cvae.decode)
# ####################### Test as generated pictures ####################################
if args.generate:
generat_img(args,task_i,labels_per_task[task_i],cvae.decode)
print(100 * '-')
print('Total training time is: %.3f seconds'%time_cost)
def main_LGLvKR_joint(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print(75 * '=' + '\n' + '|| \t model dir:\t%s\t ||\n' % args.modeldir + 75 * '=')
permutation = np.array(list(range(10)))
target_transform = transforms.Lambda(lambda y, p=permutation: int(p[y]))
train_data = get_dataset(args.dataset, type="train", dir=args.data_dir, target_transform=target_transform,
verbose=False)
test_data = get_dataset(args.dataset, type="test", dir=args.data_dir, target_transform=target_transform,
verbose=False)
classes_per_task = int(np.floor(10 / args.tasks))
labels_per_task = [list(np.array(range(classes_per_task+ classes_per_task * task_id))) for task_id in range(args.tasks)]
labels_per_task_test = [list(np.array(range(classes_per_task + classes_per_task * task_id))) for task_id in range(args.tasks)]
sys.stdout = Logger(os.path.join(args.modeldir, '{}_log_{}.txt'.format(args.name,args.op)))
# training
time_cost = 0
for task_i in range(args.tasks):
print(40 * '=' + ' Task %1d ' % (task_i+1) + 40 * '=')
if not os.path.exists(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i)):
train_loader = get_data_loader(
SubDataset(train_data, labels_per_task[task_i], target_transform=None),
args.batch_size,
cuda=True,
drop_last=True)
print("training(n=%5d)..." % len(train_loader.dataset))
if args.dataset == 'mnist' or args.dataset == 'fashion':
cvae = ConditionalVAE(args)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
cvae = ConditionalVAE_conv(args)
print("there are {} params with {} elems in the cvae".format(
len(list(cvae.parameters())), sum(p.numel() for p in cvae.parameters() if p.requires_grad))
)
cvae = cvae.cuda()
optimizer_CVAE = torch.optim.Adam(cvae.parameters(), lr=args.lr, betas=(0.9, 0.999))
start = time.time()
for epoch in range(args.epochs):
loss_log = {'V/loss': 0.0, 'V/loss_rec': 0.0, 'V/loss_var': 0.0}
for x, y in train_loader:
if args.dataset == 'mnist' or args.dataset == 'fashion':
x = x.cuda().view(x.size(0), -1)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
x = x.cuda()
y = torch.eye(args.class_dim)[y].cuda()
x_rec, mu, logvar = cvae(x, y)
loss_rec = torch.nn.MSELoss(reduction='sum')(x, x_rec) / x.size(0)
loss_var = (-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp())) / x.size(0)
loss_cvae = loss_rec + args.alpha_var * loss_var
optimizer_CVAE.zero_grad()
loss_cvae.backward()
optimizer_CVAE.step()
loss_log['V/loss'] += loss_cvae.item()
loss_log['V/loss_rec'] += loss_rec.item()
loss_log['V/loss_var'] += loss_var.item() * args.alpha_var
print('[VAE Epoch%2d]\t V/loss: %.3f\t V/loss_rec: %.3f\t V/loss_var: %.3f'
% (epoch + 1,
loss_log['V/loss'],
loss_log['V/loss_rec'],
loss_log['V/loss_var']))
time_cost += time.time() - start
torch.save(cvae, os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
else:
cvae = torch.load(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
cvae.eval()
##################### Test with LPIPS #########################################################################
if args.LPIPS:
caculate_LPIPS(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
##################### Test with FID #########################################################################
if args.fid:
caculate_fid(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
####################### Test with Acc and rAcc ####################################
if args.ACC:
caculate_ACC(args,task_i,train_data,test_data,labels_per_task_test[task_i],cvae.decode)
# ####################### Test as generated pictures ####################################
if args.generate:
generat_img(args,task_i,labels_per_task[task_i],cvae.decode)
print(100 * '-')
print('Total training time is: %.3f seconds'%time_cost)
def main_LGLvKR(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print(75 * '=' + '\n' + '|| \t model dir:\t%s\t ||\n' % args.modeldir + 75 * '=')
permutation = np.array(list(range(10)))
target_transform = transforms.Lambda(lambda y, p=permutation: int(p[y]))
train_data = get_dataset(args.dataset, type="train", dir=args.data_dir, target_transform=target_transform, verbose=False)
test_data = get_dataset(args.dataset, type="test", dir=args.data_dir, target_transform=target_transform, verbose=False)
classes_per_task = int(np.floor(10 / args.tasks))
labels_per_task = [list(np.array(range(classes_per_task)) + classes_per_task * task_id) for task_id in range(args.tasks)]
labels_per_task_test = [list(np.array(range(classes_per_task + classes_per_task * task_id))) for task_id in range(args.tasks)]
sys.stdout = Logger(os.path.join(args.modeldir, 'log_{}_{}.txt'.format(args.name,args.op)))
# training
time_cost = 0
for task_i in range(args.tasks):
print(40 * '=' + ' Task %1d ' % (task_i + 1) + 40 * '=')
if not os.path.exists(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i)):
train_loader = get_data_loader(
SubDataset(train_data, labels_per_task[task_i], target_transform=None),
args.batch_size,
cuda=True,
drop_last=True)
print("training(n=%5d)..." % len(train_loader.dataset))
if task_i == 0:
if args.dataset == 'mnist' or args.dataset == 'fashion':
cvae = ConditionalVAE(args)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
cvae = ConditionalVAE_conv(args)
print("there are {} params with {} elems in the cvae".format(
len(list(cvae.parameters())), sum(p.numel() for p in cvae.parameters() if p.requires_grad))
)
else:
cvae = torch.load(os.path.join(args.modeldir, args.name + '_%1d.pth' % (task_i - 1)))
cvae_old = deepcopy(cvae)
cvae_old.eval()
cvae_old = freeze_model(cvae_old)
print("there are {} params with {} elems in the cvae, {} params with {} elems in the cvae_old".format(
len(list(cvae.parameters())), sum(p.numel() for p in cvae.parameters() if p.requires_grad),
len(list(cvae_old.parameters())), sum(p.numel() for p in cvae_old.parameters() if p.requires_grad))
)
cvae = cvae.cuda()
optimizer_CVAE = torch.optim.Adam(cvae.parameters(), lr=args.lr, betas=(0.9, 0.999))
start = time.time()
for epoch in range(args.epochs):
loss_log = {'V/loss': 0.0, 'V/loss_rec': 0.0, 'V/loss_var': 0.0, 'V/loss_var_hat': 0.0, 'V/loss_aug': 0.0}
for x, y in train_loader:
if args.dataset == 'mnist' or args.dataset == 'fashion':
x = x.cuda().view(x.size(0), -1)
elif args.dataset == 'svhn' or args.dataset == 'cifar10':
x = x.cuda()
y = torch.eye(args.class_dim)[y].cuda()
if task_i == 0:
x_rec, mu, logvar = cvae(x, y)
mu_hat, logvar_hat = cvae.encode(x_rec, y)
loss_rec = torch.nn.MSELoss(reduction='sum')(x, x_rec) / x.size(0)
loss_var = (-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp())) / x.size(0)
loss_var_hat = (-0.5 * torch.sum(1 + logvar_hat - mu_hat ** 2 - logvar_hat.exp())) / x_rec.size(0)
loss_cvae = loss_rec + args.alpha_var * loss_var + args.alpha_var_hat * loss_var_hat
else:
mu, logvar = cvae.encode(x, y)
z = cvae.reparameterize(mu, logvar)
y_pre = torch.randint(0, labels_per_task[task_i][0], (args.batch_size,))
y_pre = torch.eye(args.class_dim)[y_pre].cuda()
z_pre = torch.empty((args.batch_size, args.latent_dim)).normal_(mean=0, std=1).cuda()
xPre_old = cvae_old.decode(z_pre, y_pre)
z_merged = torch.cat((z_pre, z))
y_merged = torch.cat((y_pre, y))
xRec_merged = cvae.decode(z_merged, y_merged)
mu_hat, logvar_hat = cvae.encode(xRec_merged[:args.batch_size], y)
loss_rec = torch.nn.MSELoss(reduction='sum')(x, xRec_merged[args.batch_size:]) / x.size(0)
loss_var = (-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp())) / x.size(0)
loss_var_hat = (-0.5 * torch.sum(1 + logvar_hat - mu_hat ** 2 - logvar_hat.exp())) / x.size(0)
loss_aug = torch.dist(xRec_merged[:args.batch_size], xPre_old, 2) / xPre_old.size(0)
loss_aug = loss_aug * task_i
loss_cvae = loss_rec + args.alpha_var * loss_var + args.alpha_var_hat * loss_var_hat + args.alpha_aug * loss_aug
optimizer_CVAE.zero_grad()
loss_cvae.backward()
optimizer_CVAE.step()
loss_log['V/loss'] += loss_cvae.item()
loss_log['V/loss_rec'] += loss_rec.item()
loss_log['V/loss_var'] += loss_var.item() * args.alpha_var
loss_log['V/loss_var_hat'] += loss_var_hat.item() * args.alpha_var_hat
loss_log['V/loss_aug'] += loss_aug.item() * args.alpha_aug if task_i != 0 else 0
print('[VAE Epoch%2d]\t V/loss: %.3f\t V/loss_rec: %.3f\t V/loss_var: %.3f\t V/loss_var_hat: %.3f\t V/loss_aug: %.3f'
% (epoch + 1,
loss_log['V/loss'],
loss_log['V/loss_rec'],
loss_log['V/loss_var'],
loss_log['V/loss_var_hat'],
loss_log['V/loss_aug']))
time_cost += time.time() - start
torch.save(cvae, os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
else:
cvae = torch.load(os.path.join(args.modeldir, args.name + '_%1d.pth' % task_i))
cvae.eval()
##################### Test with LPIPS #########################################################################
if args.LPIPS:
caculate_LPIPS(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
##################### Test with FID #########################################################################
if args.fid:
caculate_fid(args,task_i,test_data,labels_per_task_test[task_i],cvae.decode)
####################### Test with Acc and rAcc ####################################
if args.ACC:
caculate_ACC(args,task_i,train_data,test_data,labels_per_task_test[task_i],cvae.decode)
# ####################### Test as generated pictures ####################################
if args.generate:
generat_img(args,task_i,labels_per_task[task_i],cvae.decode)
print(100 * '-')
print('Total training time is: %.3f seconds'%time_cost)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
common = parser.add_argument_group("common parameters group")
network = parser.add_argument_group("network parameters group")
train = parser.add_argument_group("training parameters group")
common.add_argument('-modeldir', default='./checkpoints', help='check point directory')
common.add_argument('-dataset',type=str, default='mnist', help='dataset name & uses dataset mnist/svhn/fasion')
common.add_argument('-data_dir', type=str, default='./dataset/mnist', help='data directory')
common.add_argument('-tasks', type=int, default=10, help='number of tasks')
common.add_argument('-batch_size', type=int, default=128, help='batch size')
common.add_argument('-op',type=str, default='tarin', choices=['train','eval_'])
common.add_argument('-LPIPS', default=False, action='store_true', help='Whether LPIPS needs to be calculated')
common.add_argument('-fid', default=False, action='store_true', help='Whether fid needs to be calculated')
common.add_argument('-ACC', default=False, action='store_true', help='Whether ACC needs to be calculated')
common.add_argument('-generate', default=False, action='store_true', help='Whether imgs need to be generated')
network.add_argument('-feat_dim', type=int, default=32 * 32, help='input features dimension')
network.add_argument('-latent_dim', type=int, default=2, help='latent variable dimension')
network.add_argument('-class_dim', type=int, default=10, help='class or one-hot label dimension')
network.add_argument('-hidden_dim', type=int, default=256, help='hidden dimension')
train.add_argument('-lr', type=float, default=0.0002, help='learning rate')
train.add_argument('-alpha_var', type=float, default=1., help='alpha parameter for variational loss')
train.add_argument('-alpha_var_hat', type=float, default=1., help="alpha parameter for variational loss of reconstructed data")
train.add_argument('-alpha_aug', type=float, default=1., help="alpha parameter for the augmented loss")
train.add_argument('-epochs', default=10, type=int, metavar='N', help='number of epochs')
train.add_argument("-gpu", type=str, default='0', help='which gpu to use')
train.add_argument("-name", type=str, default='10tasks', help='the name of the temporary saved model')
train.add_argument('-method',type=str,default='LGLvKR',choices=['LGLvKR_Fine','LGLvKR_joint','LGLvKR','LGLvKR_noFC','LGLvKR_noKR'])
args = parser.parse_args()
if args.dataset == 'mnist' or args.dataset =='mnist28':
args.feat_dim = 32*32*1
args.model_dir = args.model_dir + '/SplitMNIST/'
elif args.dataset == 'fashion':
args.feat_dim = 32*32*1
args.model_dir = args.model_dir + '/fashion/'
elif args.dataset == 'svhn':
args.feat_dim = 32*32*3
args.data_dir = './dataset/svhn'
args.model_dir = args.model_dir + '/svhn/'
if args.method == 'LGLvKR_fine':
args.modeldir = args.model_dir + 'LGLvKR_finetuning/' + '{}epoch'.format(args.epochs)
main_LGLvKR_Fine(args)
elif args.method == 'LGLvKR_joint':
args.modeldir = args.model_dir + 'LGLvKR_jointTraining/' + '{}epoch'.format(args.epochs)
main_LGLvKR_joint(args)
elif args.method == 'LGLvKR':
args.modeldir = args.model_dir + 'LGLvKR/' + '{}epoch'.format(args.epochs)
main_LGLvKR(args)
elif args.method == 'LGLvKR_noFC':
args.modeldir = args.model_dir + 'LGLvKR_noFC/' + '{}epoch'.format(args.epochs)
args.alpha_var_hat = 0.
main_LGLvKR(args)
elif args.method == 'LGLvKR_noKR':
args.modeldir = args.model_dir + 'LGLvKR_noKR/' + '{}epoch'.format(args.epochs)
args.alpha_aug = 0.
main_LGLvKR(args)
|
#!/usr/bin/python3
from brownie.test import given, strategy
# Does the RewardAdded event fire?
@given(_amt=strategy("uint256", max_value=(10 ** 18), exclude=0))
def test_reward_added_fires(multi, reward_token, alice, _amt):
multi.stake(10 ** 18, {"from": alice})
reward_token.approve(multi, _amt, {"from": alice})
multi.setRewardsDistributor(reward_token, alice, {"from": alice})
tx = multi.notifyRewardAmount(reward_token, _amt, {"from": alice})
assert tx.events["RewardAdded"].values() == [_amt]
# Does the Staked event fire?
@given(_amt=strategy("uint256", max_value=(10 ** 18), exclude=0))
def test_staked_fires(multi, alice, _amt):
tx = multi.stake(_amt, {"from": alice})
assert tx.events["Staked"].values()[0] == alice
assert tx.events["Staked"].values()[1] == _amt
# Does the Withdrawn event fire?
@given(amount=strategy("uint256", max_value=(10 ** 18), min_value=(10 ** 1), exclude=0))
def test_withdrawn_event_fires(multi, alice, amount):
multi.stake(amount, {"from": alice})
tx = multi.withdraw(amount // 2, {"from": alice})
assert tx.events["Withdrawn"].values()[0] == alice
assert tx.events["Withdrawn"].values()[1] == amount // 2
# Does the RewardPaid event fire?
@given(amount=strategy("uint256", max_value=(10 ** 18), min_value=(10 ** 2)))
def test_reward_paid_event_fires(
multi, accounts, base_token, reward_token, chain, alice, bob, amount
):
tx = multi.getReward()
reward_token.approve(multi, amount, {"from": bob})
multi.setRewardsDistributor(reward_token, bob, {"from": alice})
multi.notifyRewardAmount(reward_token, amount, {"from": bob})
base_token.approve(multi, amount, {"from": bob})
multi.stake(amount, {"from": bob})
chain.mine(timedelta=60)
value_earned = multi.earned(bob, reward_token)
tx = multi.getReward({"from": bob})
assert tx.events["Transfer"].values()[0] == multi
assert tx.events["Transfer"].values()[1] == bob
assert tx.events["RewardPaid"].values()[0] == bob
assert tx.events["RewardPaid"].values()[1] == reward_token
assert tx.events["RewardPaid"].values()[2] == value_earned
# Does the RewardsDurationUpdated event fire?
@given(duration=strategy("uint256", max_value=(10 ** 5), exclude=0))
def test_rewards_duration_fires(multi, alice, reward_token, duration):
multi.setRewardsDistributor(reward_token, alice, {"from": alice})
tx = multi.setRewardsDuration(reward_token, duration, {"from": alice})
assert tx.events["RewardsDurationUpdated"].values()[0] == reward_token
assert tx.events["RewardsDurationUpdated"].values()[1] == duration
# Does the Recovered event fire?
@given(amount=strategy("uint256", max_value=(10 ** 10), exclude=0))
def test_recovered_fires(multi, alice, err_token, amount):
tx = multi.recoverERC20(err_token, amount, {"from": alice})
assert tx.events["Recovered"].values()[0] == err_token
assert tx.events["Recovered"].values()[1] == amount
|
import json
import statistics
import time
import urllib.request
from multiprocessing import Pool
with open("payload_performance.json", "r") as json_file:
json_list = list(json_file)
data_points = []
for json_str in json_list:
result = json.loads(json_str)
if not result:
continue
body = {"data": [result["text"]]}
data_points.append(body)
data_points = data_points[:200]
myurl = (
"http://localhost:8080/invocations"
) # https://pbgvppvv48.execute-api.us-east-1.amazonaws.com/test/alchemy
def send_request(body):
req = urllib.request.Request(myurl)
req.add_header("Content-Type", "application/json")
jsondata = json.dumps(body)
jsondataasbytes = jsondata.encode("utf-8") # needs to be bytes
req.add_header("Content-Length", len(jsondataasbytes))
print(jsondataasbytes)
try:
start = time.time()
response = urllib.request.urlopen(req, jsondataasbytes)
end = time.time()
return end - start
except urllib.error.HTTPError as e:
return 0
p = Pool(processes=2)
intervals = p.map(send_request, data_points)
p.close()
# for i, body in enumerate(data_points):
# req = urllib.request.Request(myurl)
# req.add_header('Content-Type', 'application/json')
# jsondata = json.dumps(body)
# jsondataasbytes = jsondata.encode('utf-8') # needs to be bytes
# req.add_header('Content-Length', len(jsondataasbytes))
# print (jsondataasbytes)
# try:
# start = time.time()
# response = urllib.request.urlopen(req, jsondataasbytes)
# end = time.time()
# intervals.append(end - start)
# print(response)
# except urllib.error.HTTPError as e:
# print(e)
avg_time = statistics.mean(intervals)
print("Avg time: " + str(avg_time))
median_time = statistics.median(intervals)
print("median time: " + str(median_time))
print("Min time: " + str(min(intervals)))
print("Max time: " + str(max(intervals)))
|
from __future__ import absolute_import, unicode_literals
import pytest
from six import string_types
from c8.fabric import TransactionFabric
from c8.exceptions import (
TransactionStateError,
TransactionExecuteError,
TransactionJobResultError
)
from c8.job import TransactionJob
from tests.helpers import clean_doc, extract, generate_string
# noinspection PyUnresolvedReferences
def test_transaction_wrapper_attributes(db, col, username):
txn_db = db.begin_transaction(timeout=100, sync=True)
assert txn_db._executor._sync is True
assert txn_db._executor._timeout == 100
assert isinstance(txn_db, TransactionFabric)
assert txn_db.username == username
assert txn_db.context == 'transaction'
assert txn_db.db_name == db.name
assert txn_db.name == db.name
assert repr(txn_db) == '<TransactionFabric {}>'.format(db.name)
txn_col = txn_db.collection(col.name)
assert txn_col.username == username
assert txn_col.context == 'transaction'
assert txn_col.db_name == db.name
assert txn_col.name == col.name
txn_c8ql = txn_db.c8ql
assert txn_c8ql.username == username
assert txn_c8ql.context == 'transaction'
assert txn_c8ql.db_name == db.name
job = txn_col.get(generate_string())
assert isinstance(job, TransactionJob)
assert isinstance(job.id, string_types)
assert repr(job) == '<TransactionJob {}>'.format(job.id)
def test_transaction_execute_without_result(db, col, docs):
with db.begin_transaction(return_result=False) as txn_db:
txn_col = txn_db.collection(col.name)
# Ensure that no jobs are returned
assert txn_col.insert(docs[0]) is None
assert txn_col.delete(docs[0]) is None
assert txn_col.insert(docs[1]) is None
assert txn_col.delete(docs[1]) is None
assert txn_col.insert(docs[2]) is None
assert txn_col.get(docs[2]) is None
assert txn_db.queued_jobs() is None
# Ensure that the operations went through
assert txn_db.queued_jobs() is None
assert extract('_key', col.all()) == [docs[2]['_key']]
def test_transaction_execute_with_result(db, col, docs):
with db.begin_transaction(return_result=True) as txn_db:
txn_col = txn_db.collection(col.name)
job1 = txn_col.insert(docs[0])
job2 = txn_col.insert(docs[1])
job3 = txn_col.get(docs[1])
jobs = txn_db.queued_jobs()
assert jobs == [job1, job2, job3]
assert all(job.status() == 'pending' for job in jobs)
assert txn_db.queued_jobs() == [job1, job2, job3]
assert all(job.status() == 'done' for job in txn_db.queued_jobs())
assert extract('_key', col.all()) == extract('_key', docs[:2])
# Test successful results
assert job1.result()['_key'] == docs[0]['_key']
assert job2.result()['_key'] == docs[1]['_key']
assert job3.result()['_key'] == docs[1]['_key']
def test_transaction_execute_error_in_result(db, col, docs):
txn_db = db.begin_transaction(timeout=100, sync=True)
txn_col = txn_db.collection(col.name)
job1 = txn_col.insert(docs[0])
job2 = txn_col.insert(docs[1])
job3 = txn_col.insert(docs[1]) # duplicate
with pytest.raises(TransactionExecuteError) as err:
txn_db.commit()
assert err.value.error_code == 1210
jobs = [job1, job2, job3]
assert txn_db.queued_jobs() == jobs
assert all(job.status() == 'pending' for job in jobs)
def test_transaction_empty_commit(db):
txn_db = db.begin_transaction(return_result=True)
assert list(txn_db.commit()) == []
txn_db = db.begin_transaction(return_result=False)
assert txn_db.commit() is None
def test_transaction_double_commit(db, col, docs):
txn_db = db.begin_transaction()
job = txn_db.collection(col.name).insert(docs[0])
# Test first commit
assert txn_db.commit() == [job]
assert job.status() == 'done'
assert len(col) == 1
assert clean_doc(col.random()) == docs[0]
# Test second commit which should fail
with pytest.raises(TransactionStateError) as err:
txn_db.commit()
assert 'already committed' in str(err.value)
assert job.status() == 'done'
assert len(col) == 1
assert clean_doc(col.random()) == docs[0]
def test_transaction_action_after_commit(db, col):
with db.begin_transaction() as txn_db:
txn_db.collection(col.name).insert({})
# Test insert after the transaction has been committed
with pytest.raises(TransactionStateError) as err:
txn_db.collection(col.name).insert({})
assert 'already committed' in str(err.value)
assert len(col) == 1
def test_transaction_method_not_allowed(db):
with pytest.raises(TransactionStateError) as err:
txn_db = db.begin_transaction()
txn_db.c8ql.functions()
assert str(err.value) == 'action not allowed in transaction'
with pytest.raises(TransactionStateError) as err:
with db.begin_transaction() as txn_db:
txn_db.c8ql.functions()
assert str(err.value) == 'action not allowed in transaction'
def test_transaction_execute_error(bad_db, col, docs):
txn_db = bad_db.begin_transaction(return_result=True)
job = txn_db.collection(col.name).insert_many(docs)
# Test transaction execute with bad fabric
with pytest.raises(TransactionExecuteError):
txn_db.commit()
assert len(col) == 0
assert job.status() == 'pending'
def test_transaction_job_result_not_ready(db, col, docs):
txn_db = db.begin_transaction(return_result=True)
job = txn_db.collection(col.name).insert_many(docs)
# Test get job result before commit
with pytest.raises(TransactionJobResultError) as err:
job.result()
assert str(err.value) == 'result not available yet'
# Test commit to make sure it still works after the errors
assert list(txn_db.commit()) == [job]
assert len(job.result()) == len(docs)
assert extract('_key', col.all()) == extract('_key', docs)
def test_transaction_execute_raw(db, col, docs):
# Test execute raw transaction
doc = docs[0]
key = doc['_key']
result = db.execute_transaction(
command='''
function (params) {{
var db = require('internal').db;
db.{col}.save({{'_key': params.key, 'val': 1}});
return true;
}}
'''.format(col=col.name),
params={'key': key},
write=[col.name],
read=[col.name],
sync=False,
timeout=1000,
max_size=100000,
allow_implicit=True,
intermediate_commit_count=10,
intermediate_commit_size=10000
)
assert result is True
assert doc in col and col[key]['val'] == 1
# Test execute invalid transaction
with pytest.raises(TransactionExecuteError) as err:
db.execute_transaction(command='INVALID COMMAND')
assert err.value.error_code == 10
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AdministratorType',
'CatalogCollationType',
'IdentityType',
'InstancePoolLicenseType',
'ManagedDatabaseCreateMode',
'ManagedInstanceLicenseType',
'ManagedInstanceProxyOverride',
'ManagedServerCreateMode',
'SecurityAlertPolicyState',
'SensitivityLabelRank',
]
class AdministratorType(str, Enum):
"""
Type of the sever administrator.
"""
ACTIVE_DIRECTORY = "ActiveDirectory"
class CatalogCollationType(str, Enum):
"""
Collation of the metadata catalog.
"""
DATABAS_E_DEFAULT = "DATABASE_DEFAULT"
SQ_L_LATIN1_GENERAL_CP1_C_I_AS = "SQL_Latin1_General_CP1_CI_AS"
class IdentityType(str, Enum):
"""
The identity type. Set this to 'SystemAssigned' in order to automatically create and assign an Azure Active Directory principal for the resource.
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
class InstancePoolLicenseType(str, Enum):
"""
The license type. Possible values are 'LicenseIncluded' (price for SQL license is included) and 'BasePrice' (without SQL license price).
"""
LICENSE_INCLUDED = "LicenseIncluded"
BASE_PRICE = "BasePrice"
class ManagedDatabaseCreateMode(str, Enum):
"""
Managed database create mode. PointInTimeRestore: Create a database by restoring a point in time backup of an existing database. SourceDatabaseName, SourceManagedInstanceName and PointInTime must be specified. RestoreExternalBackup: Create a database by restoring from external backup files. Collation, StorageContainerUri and StorageContainerSasToken must be specified. Recovery: Creates a database by restoring a geo-replicated backup. RecoverableDatabaseId must be specified as the recoverable database resource ID to restore.
"""
DEFAULT = "Default"
RESTORE_EXTERNAL_BACKUP = "RestoreExternalBackup"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
RECOVERY = "Recovery"
RESTORE_LONG_TERM_RETENTION_BACKUP = "RestoreLongTermRetentionBackup"
class ManagedInstanceLicenseType(str, Enum):
"""
The license type. Possible values are 'LicenseIncluded' (regular price inclusive of a new SQL license) and 'BasePrice' (discounted AHB price for bringing your own SQL licenses).
"""
LICENSE_INCLUDED = "LicenseIncluded"
BASE_PRICE = "BasePrice"
class ManagedInstanceProxyOverride(str, Enum):
"""
Connection type used for connecting to the instance.
"""
PROXY = "Proxy"
REDIRECT = "Redirect"
DEFAULT = "Default"
class ManagedServerCreateMode(str, Enum):
"""
Specifies the mode of database creation.
Default: Regular instance creation.
Restore: Creates an instance by restoring a set of backups to specific point in time. RestorePointInTime and SourceManagedInstanceId must be specified.
"""
DEFAULT = "Default"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
class SecurityAlertPolicyState(str, Enum):
"""
Specifies the state of the policy, whether it is enabled or disabled or a policy has not been applied yet on the specific database.
"""
NEW = "New"
ENABLED = "Enabled"
DISABLED = "Disabled"
class SensitivityLabelRank(str, Enum):
NONE = "None"
LOW = "Low"
MEDIUM = "Medium"
HIGH = "High"
CRITICAL = "Critical"
|
import os
base_path = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2"
max_source_length = 512
max_decoding_length = 5
source_vocab_file = os.path.join(base_path, "vocab.source")
target_vocab_file = os.path.join(base_path, "vocab.target")
train = {
"batch_size": 12,
"allow_smaller_final_batch": False,
"source_dataset": {
"files": os.path.join(base_path, "train.source.all_sentences"),
"vocab_file": source_vocab_file,
},
"target_dataset": {
"files": os.path.join(base_path, "train.target.all_sentences"),
"vocab_file": target_vocab_file,
}
}
# No Validation and Test
val = {
"batch_size": 7,
"shuffle": False,
"source_dataset": {
"files": os.path.join(base_path, "validation.source.balanced_10_samples"),
"vocab_file": source_vocab_file,
},
"target_dataset": {
"files": os.path.join(base_path, "validation.target.balanced_10_samples"),
"vocab_file": target_vocab_file,
}
}
# No Validation and Test
test = {
"batch_size": 7,
"shuffle": False,
"source_dataset": {
"files": os.path.join(base_path, "validation.source.balanced_10_samples"),
"vocab_file": source_vocab_file,
},
"target_dataset": {
"files": os.path.join(base_path, "validation.target.balanced_10_samples"),
"vocab_file": target_vocab_file,
}
}
|
import argparse
import os
import time
import logging
from logging import getLogger
import urllib
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from utils.options import parse_semisup_args
from utils.utils import (
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
accuracy,
)
logger = getLogger()
def main():
global args, best_acc
args = parse_semisup_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
if args.rank==0:
if not os.path.exists(args.exp_dir):
os.makedirs(args.exp_dir)
logger, training_stats = initialize_exp(
args, "epoch", "loss", "prec1", "prec5", "loss_val", "prec1_val", "prec5_val"
)
# build data
train_data_path = os.path.join(args.data_path, "train")
train_dataset = datasets.ImageFolder(train_data_path)
# take either 1% or 10% of images
subset_file = "{}percent.txt".format(args.labels_perc)
with open(subset_file, "r") as f:
list_imgs = f.readlines()
list_imgs = [x.split("\n")[0] for x in list_imgs]
train_dataset.samples = [(
os.path.join(train_data_path, li.split('_')[0], li),
train_dataset.class_to_idx[li.split('_')[0]]
) for li in list_imgs]
val_dataset = datasets.ImageFolder(os.path.join(args.data_path, "val"))
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]
)
train_dataset.transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
tr_normalize,
])
val_dataset.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
tr_normalize,
])
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
# build model
model = models.__dict__[args.arch](num_classes=1000)
# convert batch norm layers
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
# load weights
if os.path.isfile(args.pretrained):
state_dict = torch.load(args.pretrained, map_location="cuda:" + str(args.gpu))
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
# remove prefixe "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("encoder_q.", ""): v for k, v in state_dict.items()}
for k, v in model.state_dict().items():
if k not in list(state_dict):
logger.info('key "{}" could not be found in provided state dict'.format(k))
elif state_dict[k].shape != v.shape:
logger.info('key "{}" is of different shape in model and provided state dict'.format(k))
state_dict[k] = v
msg = model.load_state_dict(state_dict, strict=False)
logger.info("Load pretrained model with msg: {}".format(msg))
else:
logger.info("No pretrained weights found => training from random weights")
# model to gpu
model = model.cuda()
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu],
find_unused_parameters=True,
)
# set optimizer
trunk_parameters = []
head_parameters = []
for name, param in model.named_parameters():
if 'fc' in name:
head_parameters.append(param)
else:
trunk_parameters.append(param)
optimizer = torch.optim.SGD(
[{'params': trunk_parameters},
{'params': head_parameters, 'lr': args.lr_last_layer}],
lr=args.lr,
momentum=0.9,
weight_decay=0,
)
# set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, args.decay_epochs, gamma=args.gamma
)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": (0., 0.)}
restart_from_checkpoint(
os.path.join(args.exp_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set samplers
train_loader.sampler.set_epoch(epoch)
scores = train(model, optimizer, train_loader, epoch)
scores_val = validate_network(val_loader, model)
training_stats.update(scores + scores_val)
scheduler.step()
# save checkpoint
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.exp_dir, "checkpoint.pth.tar"))
logger.info("Fine-tuning with {}% of labels completed.\n"
"Test accuracies: top-1 {acc1:.1f}, top-5 {acc5:.1f}".format(
args.labels_perc, acc1=best_acc[0], acc5=best_acc[1]))
def train(model, optimizer, loader, epoch):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter("time", ":.2f")
data_time = AverageMeter("data time", ":.2f")
# training statistics
top1 = AverageMeter("top1", ":.3f")
top5 = AverageMeter("top5", ":.3f")
losses = AverageMeter("loss", ":.3e")
end = time.perf_counter()
model.train()
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
output = model(inp)
# compute cross entropy loss
loss = criterion(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# update stats
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if args.rank == 0 and iter_epoch % 50 == 0:
logger.info(
"Epoch[{0}] - Iter: [{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec {top1.val:.3f} ({top1.avg:.3f})\t"
"LR trunk {lr}\t"
"LR head {lr_W}".format(
epoch,
iter_epoch,
len(loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
lr=optimizer.param_groups[0]["lr"],
lr_W=optimizer.param_groups[1]["lr"],
)
)
return epoch, losses.avg, top1.avg.item(), top5.avg.item()
def validate_network(val_loader, model):
batch_time = AverageMeter("time", ":.2f")
losses = AverageMeter("loss", ":.3e")
top1 = AverageMeter("top1", ":.2f")
top5 = AverageMeter("top5", ":.2f")
global best_acc
# switch to evaluate mode
model.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for i, (inp, target) in enumerate(val_loader):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(inp)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
# measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if top1.avg.item() > best_acc[0]:
best_acc = (top1.avg.item(), top5.avg.item())
if args.rank == 0:
logger.info(
"Test:\t"
"Time {batch_time.avg:.3f}\t"
"Loss {loss.avg:.4f}\t"
"Acc@1 {top1.avg:.3f}\t"
"Best Acc@1 so far {acc:.1f}".format(
batch_time=batch_time, loss=losses, top1=top1, acc=best_acc[0]))
return losses.avg, top1.avg.item(), top5.avg.item()
if __name__ == "__main__":
main()
|
from django.shortcuts import render,redirect
from django.contrib.auth import authenticate, login,logout
from django.contrib.auth.forms import AuthenticationForm
def login_request(request):
if request.method=='POST':
username=request.POST['username']
password=request.POST['password']
print(username,password)
user=authenticate(request,username=username,password=password)
if user:
login(request, user)
print(user)
return redirect('courses')
else:
print('no user')
form=AuthenticationForm()
return render(request=request,template_name='login.html',context={'form':form})
def logout_request(request):
logout(request)
return redirect("login")
|
from topology import topology
from json import load,dump
def convert(geojson,topojson,object_name=False, *args, **kwargs):
if isinstance(geojson,dict):
input_dict = geojson
elif isinstance(geojson,str) or isinstance(geojson,unicode):
inFile = open(geojson)
input_dict = load(inFile)
if not object_name and 'type' in input_dict and hasattr(inFile,'name') and inFile.name.lower().endswith('.geojson'):
input_dict = {inFile.name[:-8].split('/')[-1]:input_dict}
elif isinstance(geojson,file):
input_dict=load(geojson)
if 'type' in input_dict:
if object_name:
input_dict = {object_name:input_dict}
else:
input_dict = {'name':input_dict}
output_dict = topology(input_dict, *args, **kwargs)
if isinstance(topojson,str) or isinstance(topojson,unicode):
with open(topojson,'w') as f:
dump(output_dict,f)
elif isinstance(topojson,file):
dump(output_dict,topojson)
else:
return output_dict
|
# -*- coding: utf-8 -*-
import re
import lxml
import lxml.etree
from lxml.html.clean import Cleaner
import parsel
_clean_html = Cleaner(
scripts=True,
javascript=False, # onclick attributes are fine
comments=True,
style=True,
links=True,
meta=True,
page_structure=False, # <title> may be nice to have
processing_instructions=True,
embedded=True,
frames=True,
forms=False, # keep forms
annoying_tags=False,
remove_unknown_tags=False,
safe_attrs_only=False,
).clean_html
def _cleaned_html_tree(html):
if isinstance(html, lxml.html.HtmlElement):
tree = html
else:
parser = lxml.html.HTMLParser(encoding='utf8')
tree = lxml.html.fromstring(html.encode('utf8'), parser=parser)
return _clean_html(tree)
def parse_html(html):
""" Create an lxml.html.HtmlElement from a string with html.
"""
parser = lxml.html.HTMLParser(encoding='utf8')
return lxml.html.fromstring(html.encode('utf8'), parser=parser)
_whitespace = re.compile(r'\s+')
_has_trailing_whitespace = re.compile(r'\s$').search
_has_punct_after = re.compile(r'^[,:;.!?"\)]').search
_has_punct_before = re.compile(r'\($').search
def selector_to_text(sel, guess_punct_space=True):
""" Convert a cleaned selector to text.
See html_text.extract_text docstring for description of the approach and options.
"""
if guess_punct_space:
def fragments():
prev = None
for text in sel.xpath('.//text()').extract():
if prev is not None and (_has_trailing_whitespace(prev)
or (not _has_punct_after(text) and
not _has_punct_before(prev))):
yield ' '
yield text
prev = text
return _whitespace.sub(' ', ''.join(fragments()).strip())
else:
fragments = (x.strip() for x in sel.xpath('.//text()').extract())
return _whitespace.sub(' ', ' '.join(x for x in fragments if x))
def cleaned_selector(html):
""" Clean selector.
"""
try:
tree = _cleaned_html_tree(html)
sel = parsel.Selector(root=tree, type='html')
except (lxml.etree.XMLSyntaxError,
lxml.etree.ParseError,
lxml.etree.ParserError,
UnicodeEncodeError):
# likely plain text
sel = parsel.Selector(html)
return sel
def extract_text(html, guess_punct_space=True):
"""
Convert html to text, cleaning invisible content such as styles.
Almost the same as normalize-space xpath, but this also
adds spaces between inline elements (like <span>) which are
often used as block elements in html markup.
When guess_punct_space is True (default), no extra whitespace is added
for punctuation. This has a slight (around 10%) performance overhead
and is just a heuristic.
html should be a unicode string or an already parsed lxml.html element.
"""
sel = cleaned_selector(html)
return selector_to_text(sel, guess_punct_space=guess_punct_space)
|
class Endpoint:
index = 0
latency = 0
caches = {}
requests = {}
def __init__(self, index, latency):
self.index = index
self.latency = latency
self.caches = {}
self.requests = {}
def add_cache(self, index, latency):
self.caches[index] = latency
def add_request(self, video_id, number_of_request):
if video_id in self.requests:
self.requests[video_id] += number_of_request
else:
self.requests[video_id] = number_of_request
# Video Ids
def get_rank_requests(self):
keys = self.requests.keys()
keys.sort(lambda x, y: cmp(self.requests[x], self.requests[y]), reverse=True)
return keys
# Cache Ids
def get_rank_caches(self):
keys = self.caches.keys()
keys.sort(lambda x, y: cmp(self.caches[x], self.caches[y]))
return keys
|
from collections import Sequence
from tornado.web import RequestHandler
from hybrid.util import imports
from hybrid.metaclass import CatchExceptionMeta
class CastException(Exception):
pass
class BaseHandler(RequestHandler):
__metaclass__ = CatchExceptionMeta
def getviewfunc(self, view, module):
if not view and not module:
from ..views import json_view
return json_view
elif not view:
raise RuntimeError("missing view function name")
if not module:
from .. import views
m = views
else:
m = imports(module)
if not m or not hasattr(m, view):
raise RuntimeError('can\'t find %s:%s' % (module, view))
return getattr(m, view)
def render_func(self, data, view=None, module=None, *a, **kw):
self.write(self.getviewfunc(view, module)(data, *a, **kw))
self.finish()
def cast(self, v, t):
try:
return t(v)
except:
return CastException()
def param_check(self, args, howto=None):
howto = howto or (lambda a: self.get_argument(a, None))
for arg in args:
arg = list(arg)
if len(arg) == 1:
arg += [(type, )]
if len(arg) == 2:
arg += [lambda *a, **kw: True]
if not isinstance(arg[1], Sequence):
arg[1] = (arg[1], )
else:
arg[1] = tuple(arg[1])
value = howto(arg[0])
if value is None and None in arg[1]:
continue
elif value is None and None not in arg[1]:
return False
def __check(t):
value2 = self.cast(value, t)
if not isinstance(value2, CastException) and \
isinstance(value2, arg[1]) and \
arg[2](value2):
return True
if not any([__check(t) for t in arg[1]]):
return False
return True
|
from app.automata_learning.black_box.pac_learning.smart_teacher.equivalence import equivalence
def model_compare(hypothesis_pre, hypothesis_now, upper_guard, system):
# Do not compare for the first time
if hypothesis_pre is None:
return True, []
eq_flag, ctx = equivalence(hypothesis_now, hypothesis_pre, upper_guard) # ctx is DTWs
if eq_flag:
raise Exception('eq_flag must be false!')
flag = True
DRTWs_real, value_real = system.test_DTWs(ctx)
# 这个测试可以认为是mq上,不记录在test数量上
system.test_num -= 1
system.mq_num += 1
DRTWs_now, value_now = hypothesis_now.test_DTWs(ctx)
if value_real != value_now:
flag = False
return flag, ctx
|
from .user_config import load_data, save_data
|
import pandas as pd
import numpy as np
import random
import plotly.express as px
import pickle
class ShapeEstimator:
def __init__(self, connections_file_name, duplicate_data=False, point_details_file_name=None, color_definitions_file=None, optimized_points_file=None):
self.data = self.load_data(connections_file_name)
self.duplicate_data = duplicate_data
if self.duplicate_data:
self.data = self.data_duplicator()
self.point_details_data = None
self.color_dictionary = {}
self.color_definition_column = None
if point_details_file_name is not None:
self.point_details_data = self.load_data(point_details_file_name)
if color_definitions_file is not None:
self.color_definitions_data = self.load_data(color_definitions_file)
self.color_definition_column = self.color_definitions_data.columns[0]
self.color_dictionary = self.create_color_dictionary()
if optimized_points_file is not None:
self.load_data_from_optimization_file(optimized_points_file)
self.unique_points = self.get_all_unique_points_codes()
self.connections_count = self.points_connections_counter()
self.best_point_hub_name = self.connections_count.head(1)['point_name'].item()
if optimized_points_file is None:
self.points_dictionary = self.create_points_dictionary()
self.optimize()
self.values, self.labels = self.split_points_dictionary()
self.normalize_values()
self.data_frame = self.create_dataframe_from_points_dictionary()
self.calculate_average_points_distance_from_the_center()
def load_data_from_optimization_file(self, optimized_points_file):
pickle_in = open(optimized_points_file, 'rb')
self.points_dictionary, cumulative_errors, max_errors, self.duplicate_data = pickle.load(pickle_in)
if self.duplicate_data:
self.data = self.data_duplicator()
self.show_optimization_stats(cumulative_errors, max_errors)
def data_duplicator(self):
return self.data.append(self.data, ignore_index=True)
@staticmethod
def load_data(file_name):
return pd.read_csv(file_name, sep=',')
def get_all_unique_points_codes(self):
return pd.unique(np.array(self.data[['departure_point', 'arrival_point']]).flatten())
def create_points_dictionary(self):
points_dictionary = {}
for point_name in self.unique_points:
points_dictionary[point_name] = [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2, (random.random() - 0.5) * 2]
return points_dictionary
def create_color_dictionary(self):
def get_values_from_columns(index):
return [self.color_definitions_data.iloc[i, index] for i in range(len(self.color_definitions_data.iloc[:, index]))]
keys = get_values_from_columns(0)
values = get_values_from_columns(1)
return dict(zip(keys, values))
def calculate_errors(self):
cumulative_error = 0
max_error = 0
for index, row in self.data.iterrows():
error = abs(self.calculate_euclidean_distance_between_two_points(self.points_dictionary[row['departure_point']], self.points_dictionary[row['arrival_point']]) - row['measurement_value'])
cumulative_error += error
if max_error < error:
max_error = error
return cumulative_error, max_error
def points_connections_counter(self):
connections_dictionary = dict(zip(self.unique_points, [0] * len(self.unique_points)))
for index, row in self.data.iterrows():
points = [row['departure_point'], row['arrival_point']]
for point in points:
connections_dictionary[point] += 1
connections_count = pd.DataFrame(columns=['point_name', 'count'])
for point_name in connections_dictionary.keys():
row = pd.DataFrame({'point_name': [point_name], 'count': [connections_dictionary[point_name]]})
connections_count = connections_count.append(row, ignore_index=True)
return connections_count.sort_values('count', ascending=False)
def get_point_connections_count(self, point_name):
return self.connections_count.loc[self.connections_count['point_name'] == point_name]['count'].item()
def optimize(self, mod=0.5, iterations=250, tol=0.001, optimized_points_file_name='optimized_points.pickle'):
cumulative_errors, max_errors = [], []
cumulative_error, max_error = self.calculate_errors()
cumulative_errors.append(cumulative_error)
max_errors.append(max_error)
for i in range(iterations):
data = self.data.sample(frac=1)
previous_points_dictionary = dict(self.points_dictionary.copy())
for index, row in data.iterrows():
distance = self.calculate_euclidean_distance_between_two_points(self.points_dictionary[row['departure_point']], self.points_dictionary[row['arrival_point']])
vector = self.calculate_vector_between_two_points(self.points_dictionary[row['departure_point']], self.points_dictionary[row['arrival_point']])
if row['departure_point'] != self.best_point_hub_name and row['arrival_point'] != self.best_point_hub_name:
point_to_move = random.choice([0, 1])
elif row['departure_point'] == self.best_point_hub_name:
point_to_move = 1
else:
point_to_move = 0
if distance > row['measurement_value']:
if point_to_move == 0:
for j in range(3):
self.points_dictionary[row['departure_point']][j] += mod * vector[j]
else:
for j in range(3):
self.points_dictionary[row['arrival_point']][j] -= mod * vector[j]
elif distance < row['measurement_value']:
if point_to_move == 0:
for j in range(3):
self.points_dictionary[row['departure_point']][j] -= mod * vector[j]
else:
for j in range(3):
self.points_dictionary[row['arrival_point']][j] += mod * vector[j]
cumulative_error, max_error = self.calculate_errors()
if cumulative_error > cumulative_errors[-1]:
self.points_dictionary = previous_points_dictionary
mod /= 1.05
if mod < tol:
break
else:
cumulative_errors.append(cumulative_error)
max_errors.append(max_error)
with open(optimized_points_file_name, 'wb') as f:
pickle.dump((self.points_dictionary, cumulative_errors, max_errors, self.duplicate_data), f)
self.show_optimization_stats(cumulative_errors, max_errors)
def show_optimization_stats(self, cumulative_errors, max_errors):
print(f'Cumulative error: {cumulative_errors[-1]}')
print(f'Average error: {cumulative_errors[-1] / len(self.data)}')
print(f'Max error: {max_errors[-1]}')
print(f'Data duplicated: {self.duplicate_data}')
def split_points_dictionary(self):
values, labels = [], []
for point_name in self.points_dictionary:
values.append(self.points_dictionary[point_name])
labels.append(point_name)
return np.array(values), labels
def create_dataframe_from_points_dictionary(self):
data = pd.DataFrame(columns=['point', 'x', 'y', 'z'])
for i in range(len(self.labels)):
point_coords = self.values[i]
row = pd.DataFrame({'point': [self.labels[i]],
'x': [point_coords[0]],
'y': [point_coords[1]],
'z': [point_coords[2]]})
data = data.append(row, ignore_index=True)
return data
@staticmethod
def calculate_euclidean_distance_between_two_points(point1, point2):
value = 0
for i in range(len(point1)):
value += (point1[i] - point2[i]) ** 2
return value ** (1 / 2)
@staticmethod
def calculate_vector_between_two_points(point1, point2):
vector = []
for i in range(len(point1)):
vector.append(point2[i] - point1[i])
return vector
def normalize_values(self): # <-1; 1>
for i in range(3):
a = min(self.values[:, i])
b = max(self.values[:, i])
for j in range(len(self.values)):
self.values[j, i] = ((2 * (self.values[j, i] - a)) / (b - a)) - 1
def calculate_average_points_distance_from_the_center(self):
sum_distances = 0
for point in self.values:
sum_distances += sum([point[i] ** 2 for i in range(len(point))])
print(f'Average points distance from the center: {sum_distances / len(self.values)}')
def draw_plot(self):
def merge_data():
if self.point_details_data is not None:
data = self.data_frame.join(self.point_details_data.set_index('point'), on='point')
else:
data = self.data_frame
return data
def get_basic_hover_data_dict():
return {'x': False, 'y': False, 'z': False, 'point': True}
def extend_hover_dict(basic_dict):
for column in self.point_details_data.columns:
if column != 'point':
basic_dict[column] = True
return basic_dict
plot_data = merge_data()
hover_data_dict = get_basic_hover_data_dict()
if self.point_details_data is not None:
fig = px.scatter_3d(plot_data, x='x', y='y', z='z', color=self.color_definition_column, hover_data=extend_hover_dict(hover_data_dict), color_discrete_map=self.color_dictionary)
else:
fig = px.scatter_3d(plot_data, x='x', y='y', z='z', color=self.color_definition_column, hover_data=hover_data_dict, color_discrete_map=self.color_dictionary)
fig.update_layout(
scene=dict(
xaxis={'showgrid': False, 'zeroline': False, 'showline': False, 'showticklabels': False, 'backgroundcolor': 'rgba(255, 255, 255, 0)', 'visible': False},
yaxis={'showgrid': False, 'zeroline': False, 'showline': False, 'showticklabels': False, 'backgroundcolor': 'rgba(255, 255, 255, 0)', 'visible': False},
zaxis={'showgrid': False, 'zeroline': False, 'showline': False, 'showticklabels': False, 'backgroundcolor': 'rgba(255, 255, 255, 0)', 'visible': False}
)
)
fig.show()
|
# -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-02-13
Function: 1~n整数中1出现的次数
"""
def numOf1Between1AndN(n):
"""
计数,累计1在1~n的总次数,时间效率较低
:param n:
:return:
"""
if n == 0:
return 0
if n < 0:
return None
count = 0
for i in range(1, n+1):
count += numOf1(i)
return count
def numOf1(num):
"""
每个整数含有1的个数
:param num:
:return:
"""
times = 0
while num:
if num % 10 == 1:
times += 1
num /= 10
return times
##########################################
"""
剑指Offer新思路
"""
##########################################
def get_digits(n):
"""
获取一整数的位数,23456为5位的整数
:param n:
:return:
"""
ret = 0
while n:
ret += 1
n /= 10
return ret
def getNumOf1EachDigit(digit):
"""
1位数,1-9中,1一共出现了1次;
2位数,10-99中,10-19的十位上一共出现了10*1=10次,对于每个十位开头的数字10-19、20-29,每个数个位上出现的是1-9中1出现的次数,共有9个区间9*1=9次;
3位数,100-999,100-199百位上出现了10**2=100次,对于每个百位数开头,例如100-199,200-299,低位上其实就是0-99这个区间上1出现的次数,一共9个区间 9*19=171次;
由此推测,对于1-9,10-99,100-999,每个n位数中包含1的个数公式为:
f(1) = 1
f(2) = 9 * f(1) + 10 ** 1
f(3) = 9 * f(2) + 10 ** 2
f(n) = 9 * f(n-1) + 10 ** (n-1)
:param digit:
:return:
"""
if digit <= 0:
return 0
if digit == 1:
return 1
number = 9 * getNumOf1EachDigit(digit-1) + 10 ** (digit-1)
return number + getNumOf1EachDigit(digit-1)
def numOf1Between1AndN_2(n):
"""
时间效率很高
:param n:
:return:
"""
if n <= 0:
return 0
if n >= 1 and n < 10:
return 1
digit = get_digits(n)
low_nums = getNumOf1EachDigit(digit-1)
high = int(str(n)[0])
low = n - high * 10 ** (digit-1)
if high == 1:
high_nums = low + 1
all_nums = high_nums
else:
high_nums = 10 ** (digit-1)
all_nums = high_nums + low_nums * (high - 1)
return low_nums + all_nums + numOf1Between1AndN_2(low)
if __name__ == '__main__':
# print numOf1Between1AndN(9923446)
# print get_digits(23456)
# print getNumOf1EachDigit(5)
print numOf1Between1AndN_2(9923446)
|
#
# Marco Panato
# PyMusicServer
#
import logging
import globals
from time import sleep
from database.sqlite3.dbManager import DbManager
from database.DataManager import DataManager
from music.manager import PyMusicManager
from settings.settingsprovider import SettingsProvider
from utils.threadingutils import runinanotherthread
from frontend.httpfrontend.httphandler import createHTTPServer
from frontend.httpsfrontend.httpshandler import createHTTPSServer
from parameters.parameterparser import parse_arguments
from music.download.youtubedlupdater import YoutubeDlUpdater
def main():
parse_arguments()
logging.info('PyMusicServer3 %s Marco Panato - %s' % (globals.REVISION, globals.DATE))
logging.info('[MAIN] Loading settings')
SettingsProvider.get_instance()
logging.info('[MAIN] Initializing DbManager')
DbManager.get_instance()
logging.info('[MAIN] Initializing DataManager')
DataManager.get_instance()
logging.info('[MAIN] Initializing MusicManager')
PyMusicManager.get_instance()
logging.info('[MAIN] Initializing youtube-dl updater')
ydlupdater = YoutubeDlUpdater()
ydlupdater.start()
logging.info('[MAIN] Creating HTTP frontend')
httpfrontend = createHTTPServer()
logging.info('[MAIN] Creating HTTPS frontend')
httpsfrontend = createHTTPSServer()
logging.info('[MAIN] Waiting for clients on port %s and %s...' % (
SettingsProvider.get_instance().readsetting('listenporthttp'),
SettingsProvider.get_instance().readsetting('listenporthttps')))
threadhttp = runinanotherthread(httpfrontend.serve_forever)
threadhttps = runinanotherthread(httpsfrontend.serve_forever)
try:
while True:
sleep(500)
except KeyboardInterrupt:
logging.info("[MAIN] CTRL-C catched! Closing...")
finally:
logging.info("[MAIN] Closing server")
httpfrontend.shutdown()
httpsfrontend.shutdown()
threadhttp.join(2)
threadhttps.join(2)
del httpfrontend
del httpsfrontend
logging.info('[MAIN] Closing youtube-dl updater')
ydlupdater.stop()
logging.info("[MAIN] Closing settings manager")
SettingsProvider.del_instance()
logging.info("[MAIN] Closing MusicManager")
PyMusicManager.del_instance()
logging.info("[MAIN] Closing DataManager")
DataManager.del_instance()
logging.info("[MAIN] Closing DbManager")
DbManager.del_instance()
logging.info("[MAIN] Bye")
if __name__ == "__main__":
main()
|
import logging
from aiogram import Bot, Dispatcher, executor
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.utils.executor import start_webhook
from app import handlers
from app.commands import set_commands
from app.settings import (WEBHOOK_IS_ACTIVE, WEBHOOK_URL, WEBHOOK_PATH,
WEBAPP_HOST, WEBAPP_PORT, TOKEN)
async def on_startup(dp: Dispatcher):
logging.warning('Setting handlers...')
await handlers.setup_all(dp)
logging.warning('Setting commands...')
await set_commands(dp)
if WEBHOOK_IS_ACTIVE:
logging.warning('Setting webhook...')
await bot.set_webhook(WEBHOOK_URL)
async def on_shutdown(dp: Dispatcher):
logging.warning('Shutting down..')
await dp.storage.close()
await dp.storage.wait_closed()
await bot.delete_webhook()
logging.warning('Webhook down')
if __name__ == '__main__':
bot = Bot(token=TOKEN, parse_mode='HTML')
dp = Dispatcher(bot, storage=MemoryStorage())
try:
if WEBHOOK_IS_ACTIVE:
start_webhook(
dispatcher=dp,
webhook_path=WEBHOOK_PATH,
on_startup=on_startup,
on_shutdown=on_shutdown,
skip_updates=True,
host=WEBAPP_HOST,
port=WEBAPP_PORT
)
else:
executor.start_polling(dp, on_startup=on_startup,
skip_updates=True)
except Exception as E:
logging.error(f'An error occurred while launching the bot - {E}')
|
#!/usr/bin/env python
# Copyright (c) 2014 Miguel Sarabia
# Imperial College London
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
import rospy
import time, subprocess
from speech.msg import SpcCmd # ros function, import 'Speech_String' type
from speech.msg import SpcNLP
from naoqi import ALModule, ALProxy # microphone and speaker
import alsaaudio # signal processing, a python module
#from speech.msg import NLPRes
#from real_time import real_time_recog
'''
def reg(mic, IP, wordlist):
#global mic
mic.start()
asr = ALProxy('ALSpeechRecognition', IP, 9559)
mem = ALProxy('ALMemory', IP, 9559)
asr.setLanguage('English')
asr.setVocabulary(wordlist, True)
asr.subscribe('hello')
mem.subscribeToEvent('e','WordRecognized', 'WordRecognized')
time.sleep(10)
rospy.loginfo(mem.getData('WordRecognized'))
pub = rospy.Publisher('/NLP_2_CNC', NLPRes, queue_size=1000)
rospy.sleep(1)
pub.publish(str(mem.getData('WordRecognized')[0]), 0)
rospy.loginfo('finished---------------')
#rospy.loginfo('-------\n'+str(type(['mem.WordRecognized']))+'\n\n')
# rospy.loginfo(['h', 'e'])
#rospy.loginfo(mem['WordRecognized'])
#njm.sayString(mem.WordRecognized)
mem.removeData('WordRecognized')
mem.unsubscribeToEvent('e', 'WordRecognized')
asr.unsubscribe('hello')
mic.stop()
'''
def runProcess(cmd):
rospy.logwarn('.....Start waiting........')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
process.wait()
rospy.logwarn('.....Finished waiting........')
s = process.stdout.read()
a = s.split('\n')
rospy.loginfo(a)
out = ''
for i in a:#
rospy.loginfo(i)
if i.find('::::')>=0:
out = i.split('::::')[1]
if i.find('????')>=0:
out = 'Google refused to recognise this speech or Google cannot understand what you said.'
if i.find(';;;;')>=0:
out = 'Could not request results from Google Speech Recognition service'
return out
def reg():
pub = rospy.Publisher('/SPC_2_NLP', SpcNLP, queue_size=1000)
# rospy.sleep(1)
#s = runProcess('')
#s = runProcess('sudo python /home/human/catkin_ws/src/speech/src/nao_speech_lib/real_time.py')
s = runProcess(['sudo', 'python', '/home/human/catkin_ws/src/speech/src/nao_speech_lib/real_time.py'])
pub.publish(str(s))
class NaoMic(ALModule):
'''
This is the module that connects remotely to the NAO microphones
'''
def __init__(self, name, ip, port):
#Init parent
super(NaoMic, self).__init__(name)
self.__ip = ip
self.__port = port
self.__pcm = alsaaudio.pcm = alsaaudio.PCM(
alsaaudio.PCM_PLAYBACK,
alsaaudio.PCM_NONBLOCK)
self.__pcm.setrate( 16000 )
self.__pcm.setchannels( 1 )
self.__pcm.setformat( alsaaudio.PCM_FORMAT_S16_LE )
self.__pcm.setperiodsize(1365)
def __del__(self):
self.stop()
def __get_proxy(self):
return ALProxy("ALAudioDevice", self.__ip, self.__port)
def start(self):
proxy = self.__get_proxy()
proxy.setClientPreferences( self.getName() , 16000, 3, 0 )
#This will call processRemote with new data from NAO
proxy.subscribe(self.getName())
def stop(self):
#Unsubscribe from microphone
proxy = self.__get_proxy()
proxy.unsubscribe( self.getName() )
def processRemote(self, channels, samples_by_channel, timestamp, raw_data ):
self.__pcm.write(raw_data)
class NaoSpeech:
def __init__(self, ip, port, mic, wordlist):
rospy.loginfo('test: hello')
self.__proxy = ALProxy("ALAnimatedSpeech", ip, port)
self.__subs = rospy.Subscriber("/CNC_2_SPC", SpcCmd, self.say)
self.ip = ip
#self.wordlist = wordlist
#self.mic = mic
try:
self.ledproxy = ALProxy("ALLeds", ip, port)
except Exception,e:
print "Could not create proxy to ALLeds"
print "Error was: ",e
sys.exit(1)
def say(self, msg):
rospy.loginfo(msg.question+ '0')
# set the local configuration
sayconfig = {"bodyLanguageMode":"contextual"}
ledname = 'EarLeds'
self.ledproxy.off(ledname)
self.__proxy.say( msg.question, sayconfig)
#reg(self.mic,self.ip, self.wordlist)
self.ledproxy.on(ledname)
if msg.spc_state != 100:
reg()
def sayString(self, msg):
rospy.loginfo(msg+ ' start')
self.__proxy.say( msg)
rospy.loginfo(msg+ ' end')
|
from deco.sources import Dataset
import numpy as np
def squeeze_rec(item):
if isinstance(item, list):
new_list = []
for subitem in item:
new_list.append(squeeze_rec(subitem))
if len(new_list) == 1:
new_list = new_list[0]
return new_list
else:
return item
class Squeeze(Dataset):
def __init__(self, parent):
self.parent = parent
def __iter__(self):
for item in self.parent:
yield squeeze_rec(item)
#yield np.squeeze(item)
def squeeze(self):
return Squeeze(self)
Dataset.squeeze = squeeze
|
"""
Index
- all bib files for use with `glottolog refsearch`
- all languoid info files for use with `glottolog langsearch`
This will take about
- about 15 minutes to create an index of about 450 MB for references and
- a couple of minutes and create an index of about 60 MB for languoids.
"""
from pyglottolog import fts
def run(args):
fts.build_index(args.repos, args.log)
fts.build_langs_index(args.repos, args.log)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0094-Binary-Tree-Inorder-Traversal.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-03-06
=================================================================="""
import sys
import time
from typing import List, Optional
# import collections
"""
LeetCode - 0094 - (Easy) - Binary Tree Inorder Traversal
https://leetcode.com/problems/binary-tree-inorder-traversal/
Description & Requirement:
Given the root of a binary tree,
return the inorder traversal of its nodes' values.
Example 1:
Input: root = [1,null,2,null,null,3]
Output: [1,3,2]
Example 2:
Input: root = []
Output: []
Example 3:
Input: root = [1]
Output: [1]
Constraints:
The number of nodes in the tree is in the range [0, 100].
-100 <= Node.val <= 100
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right # the left and right of leaf_node are both None
@staticmethod
def build_binary_tree_layer(val_list: List[int]):
if not isinstance(val_list, list) or len(val_list) <= 0:
return None
node_list = []
for v in val_list:
if v is None:
node_list.append(None)
else:
node_list.append(TreeNode(val=v))
len_node_list = len(node_list)
for idx, cur_node in enumerate(node_list):
if cur_node is not None:
cur_node_right_index = (idx + 1) << 1
cur_node_left_index = cur_node_right_index - 1
if cur_node_left_index < len_node_list:
cur_node.left = node_list[cur_node_left_index]
if cur_node_right_index < len_node_list:
cur_node.right = node_list[cur_node_right_index]
return node_list[0] # return root_node
@staticmethod
def show_binary_tree_pre_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
val_list.append(cur_node.val)
__dfs(cur_node.left)
__dfs(cur_node.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_mid_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
val_list.append(cur_node.val)
__dfs(cur_node.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_post_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
__dfs(cur_node.right)
val_list.append(cur_node.val)
__dfs(root_node)
return val_list
class Solution:
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
# exception case
if not isinstance(root, TreeNode):
return [] # no tree, just null
# main method: (DFS inorder Traversal)
return self._inorderTraversal(root)
def _inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
assert isinstance(root, TreeNode)
res = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
res.append(cur_node.val)
__dfs(cur_node.right)
__dfs(root)
return res
def main():
# Example 1: Output: [1,2,3]
root = [1, None, 2, None, None, 3]
# Example 2: Output: []
# root = []
# Example 3: Output: [1]
# root = [1]
root_node = TreeNode.build_binary_tree_layer(root)
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.inorderTraversal(root_node)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# print(TreeNode.show_binary_tree_mid_order(ans))
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example DAG demonstrating Kubernetes Pod Operator."""
# [START composer_kubernetespodoperator]
import datetime
from airflow import models
# [END composer_kubernetespodoperator]
from airflow.contrib.kubernetes import pod
from airflow.contrib.kubernetes import secret
# [START composer_kubernetespodoperator]
from airflow.contrib.operators import kubernetes_pod_operator
# [END composer_kubernetespodoperator]
# A Secret is an object that contains a small amount of sensitive data such as
# a password, a token, or a key. Such information might otherwise be put in a
# Pod specification or in an image; putting it in a Secret object allows for
# more control over how it is used, and reduces the risk of accidental
# exposure.
secret_file = secret.Secret(
# Mounts the secret as a file in RAM-backed tmpfs.
deploy_type='volume',
# File path of where to deploy the target, since deploy_type is 'volume'
# rather than 'env'.
deploy_target='/etc/sql_conn',
# Name of secret in Kubernetes, if the secret is not already defined in
# Kubernetes using kubectl the Pod will fail to find the secret, and in
# turn, fail to launch.
secret='airflow-secrets',
# Key of the secret within Kubernetes.
key='sql_alchemy_conn')
secret_env = secret.Secret(
# Expose the secret as environment variable.
deploy_type='env',
# The name of the environment variable, since deploy_type is `env` rather
# than `volume`.
deploy_target='SQL_CONN',
secret='airflow-secrets',
key='sql_alchemy_conn')
# [START composer_kubernetespodoperator]
YESTERDAY = datetime.datetime.now() - datetime.timedelta(days=1)
# If a Pod fails to launch, or has an error occur in the container, Airflow
# will show the task as failed, as well as contain all of the task logs
# required to debug.
with models.DAG(
dag_id='composer_sample_kubernetes_pod',
schedule_interval=datetime.timedelta(days=1),
start_date=YESTERDAY) as dag:
# Only name, namespace, image, and task_id are required to create a
# KubernetesPodOperator. In Cloud Composer, currently the operator defaults
# to using the config file found at `/home/airflow/composer_kube_config if
# no `config_file` parameter is specified. By default it will contain the
# credentials for Cloud Composer's Google Kubernetes Engine cluster that is
# created upon environment creation.
kubernetes_min_pod = kubernetes_pod_operator.KubernetesPodOperator(
# The ID specified for the task.
task_id='pod-ex-minimum',
# Name of task you want to run, used to generate Pod ID.
name='pod-ex-minimum',
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['echo'],
# The namespace to run within Kubernetes, default namespace is
# `default`. There is the potential for the resource starvation of
# Airflow workers and scheduler within the Cloud Composer environment,
# the recommended solution is to increase the amount of nodes in order
# to satisfy the computing requirements. Alternatively, launching pods
# into a custom namespace will stop fighting over resources.
namespace='default',
# Docker image specified. Defaults to hub.docker.com, but any fully
# qualified URLs will point to a custom repository. Supports private
# gcr.io images if the Composer Environment is under the same
# project-id as the gcr.io images.
image='gcr.io/gcp-runtimes/ubuntu_16_0_4')
# [END composer_kubernetespodoperator]
kubenetes_template_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-kube-templates',
name='ex-kube-templates',
namespace='default',
image='bash',
# All parameters below are able to be templated with jinja -- cmds,
# arguments, env_vars, and config_file. For more information visit:
# https://airflow.apache.org/code.html#default-variables
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['echo'],
# DS in jinja is the execution date as YYYY-MM-DD, this docker image
# will echo the execution date. Arguments to the entrypoint. The docker
# image's CMD is used if this is not provided. The arguments parameter
# is templated.
arguments=['{{ ds }}'],
# The var template variable allows you to access variables defined in
# Airflow UI. In this case we are getting the value of my_value and
# setting the environment variable `MY_VALUE`. The pod will fail if
# `my_value` is not set in the Airflow UI.
env_vars={'MY_VALUE': '{{ var.value.my_value }}'},
# Sets the config file to the specified airflow.cfg airflow home. If
# the configuration file does not exist or does not provide valid
# credentials the pod will fail to launch.
config_file="{{ conf.get('core', 'airflow_home') }}/config")
kubernetes_secret_vars_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-kube-secrets',
name='ex-kube-secrets',
namespace='default',
image='ubuntu',
# The secrets to pass to Pod, the Pod will fail to create if the
# secrets you specify in a Secret object do not exist in Kubernetes.
secrets=[secret_env, secret_file],
# env_vars allows you to specify environment variables for your
# container to use. env_vars is templated.
env_vars={'EXAMPLE_VAR': '/example/value'})
# [START composer_kubernetespodaffinity]
kubernetes_affinity_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-pod-affinity',
name='ex-pod-affinity',
namespace='default',
image='perl',
cmds=['perl'],
arguments=['-Mbignum=bpi', '-wle', 'print bpi(2000)'],
# affinity allows you to constrain which nodes your pod is eligible to
# be scheduled on, based on labels on the node. In this case, if the
# label 'cloud.google.com/gke-nodepool' with value
# 'nodepool-label-value' or 'nodepool-label-value2' is not found on any
# nodes, it will fail to schedule.
affinity={
'nodeAffinity': {
# requiredDuringSchedulingIgnoredDuringExecution means in order
# for a pod to be scheduled on a node, the node must have the
# specified labels. However, if labels on a node change at
# runtime such that the affinity rules on a pod are no longer
# met, the pod will still continue to run on the node.
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [{
'matchExpressions': [{
# When nodepools are created in Google Kubernetes
# Engine, the nodes inside of that nodepool are
# automatically assigned the label
# 'cloud.google.com/gke-nodepool' with the value of
# the nodepool's name.
'key': 'cloud.google.com/gke-nodepool',
'operator': 'In',
# The label key's value that pods can be scheduled
# on.
'values': [
'node-pool-name-1',
'node-pool-name-2',
]
}]
}]
}
}
})
# [END composer_kubernetespodaffinity]
kubernetes_full_pod = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-all-configs',
name='pi',
namespace='default',
image='perl',
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['perl'],
# Arguments to the entrypoint. The docker image's CMD is used if this
# is not provided. The arguments parameter is templated.
arguments=['-Mbignum=bpi', '-wle', 'print bpi(2000)'],
# The secrets to pass to Pod, the Pod will fail to create if the
# secrets you specify in a Secret object do not exist in Kubernetes.
secrets=[],
# Labels to apply to the Pod.
labels={'pod-label': 'label-name'},
# Timeout to start up the Pod, default is 120.
startup_timeout_seconds=120,
# The environment variables to be initialized in the container
# env_vars are templated.
env_vars={'EXAMPLE_VAR': '/example/value'},
# If true, logs stdout output of container. Defaults to True.
get_logs=True,
# Determines when to pull a fresh image, if 'IfNotPresent' will cause
# the Kubelet to skip pulling an image if it already exists. If you
# want to always pull a new image, set it to 'Always'.
image_pull_policy='Always',
# Annotations are non-identifying metadata you can attach to the Pod.
# Can be a large range of data, and can include characters that are not
# permitted by labels.
annotations={'key1': 'value1'},
# Resource specifications for Pod, this will allow you to set both cpu
# and memory limits and requirements.
resources=pod.Resources(),
# Specifies path to kubernetes config. If no config is specified will
# default to '~/.kube/config'. The config_file is templated.
config_file='/home/airflow/composer_kube_config',
# If true, the content of /airflow/xcom/return.json from container will
# also be pushed to an XCom when the container ends.
xcom_push=False,
# List of Volume objects to pass to the Pod.
volumes=[],
# List of VolumeMount objects to pass to the Pod.
volume_mounts=[],
# Affinity determines which nodes the Pod can run on based on the
# config. For more information see:
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity={})
|
#!/usr/bin/env python
"""
Run the Matplotlib test suite, using the mplcairo backend to patch out
Matplotlib's agg backend.
.. PYTEST_DONT_REWRITE
"""
from argparse import ArgumentParser
import os
from pathlib import Path
import sys
import warnings
os.environ["MPLBACKEND"] = "agg" # Avoid irrelevant framework issues on OSX.
import mplcairo.base # Need to come before matplotlib import on OSX.
import matplotlib as mpl
import matplotlib.backends.backend_agg
import matplotlib.testing.decorators
import pytest
_IGNORED_FAILURES = {}
def main(argv=None):
parser = ArgumentParser(
description="""\
Run the Matplotlib test suite, using the mplcairo backend to patch out
Matplotlib's agg backend.
To specify a single test module, use ``--pyargs matplotlib.tests.test_foo``.
""",
epilog="Other arguments are forwarded to pytest.")
parser.add_argument("--tolerance", type=float,
help="Set image comparison tolerance.")
args, rest = parser.parse_known_args(argv)
if "--pyargs" not in rest:
rest.extend(["--pyargs", "matplotlib"])
if args.tolerance is not None:
def _raise_on_image_difference(expected, actual, tol):
cmp = mpl.testing.compare.compare_images(
expected, actual, tol, in_decorator=True)
if cmp:
if cmp["rms"] < args.tolerance:
expected = Path(expected)
expected = expected.relative_to(expected.parent.parent)
_IGNORED_FAILURES[expected] = cmp["rms"]
else:
__orig_raise_on_image_tolerance(expected, actual, tol)
__orig_raise_on_image_tolerance = \
mpl.testing.decorators._raise_on_image_difference
mpl.testing.decorators._raise_on_image_difference = \
_raise_on_image_difference
mplcairo.base.get_hinting_flag = mpl.backends.backend_agg.get_hinting_flag
mplcairo.base.FigureCanvasAgg = \
mplcairo.base.FigureCanvasCairo
mplcairo.base.RendererAgg = \
mplcairo.base.GraphicsContextRendererCairo
mpl.backends.backend_agg = \
sys.modules["matplotlib.backends.backend_agg"] = mplcairo.base
mpl.use("agg", warn=False, force=True)
from matplotlib import pyplot as plt
__orig_switch_backend = plt.switch_backend
def switch_backend(backend):
__orig_switch_backend({
"gtk3agg": "module://mplcairo.gtk",
"qt5agg": "module://mplcairo.qt",
"tkagg": "module://mplcairo.tk",
"wxagg": "module://mplcairo.wx",
}.get(backend.lower(), backend))
plt.switch_backend = switch_backend
plt.switch_backend("agg")
return pytest.main(
["--rootdir", str(Path(mpl.__file__).parents[1]), "-p", "__main__"]
+ rest) # Py3.4 compat.
def pytest_collection_modifyitems(session, config, items):
if len(items) == 0:
pytest.exit("No tests found; Matplotlib was likely installed without "
"test data.")
knownfail_message = "Test known to fail with mplcairo."
irrelevant_message = "Test irrelevant for mplcairo."
textfail_message = ("Test failure with large diff due to different text "
"rendering by mplcairo.")
xfail_modules = {
"matplotlib.tests.test_compare_images": irrelevant_message,
"matplotlib.tests.test_mathtext": textfail_message,
"matplotlib.tests.test_constrainedlayout": textfail_message,
"matplotlib.tests.test_tightlayout": textfail_message,
}
xfail_nodeids = {
"matplotlib/tests/" + nodeid: message
for message, nodeids in [
(knownfail_message, [
"test_image.py::test_jpeg_alpha",
"test_image.py::test_figimage0[pdf]",
"test_image.py::test_figimage1[pdf]",
]),
(irrelevant_message, [
"test_agg.py::test_repeated_save_with_alpha",
"test_artist.py::test_cull_markers",
"test_axes.py::test_log_scales[png]",
"test_backend_bases.py::test_non_gui_warning",
"test_backend_pdf.py::test_composite_image",
"test_backend_pdf.py::test_multipage_keep_empty",
"test_backend_pdf.py::test_multipage_pagecount",
"test_backend_pdf.py::test_multipage_properfinalize",
"test_backend_ps.py::test_savefig_to_stringio[eps afm]",
"test_backend_ps.py::test_savefig_to_stringio[eps with usetex]",
"test_backend_ps.py::test_savefig_to_stringio[eps]",
"test_backend_ps.py::test_savefig_to_stringio[ps with distiller]",
"test_backend_ps.py::test_savefig_to_stringio[ps with usetex]",
"test_backend_ps.py::test_savefig_to_stringio[ps]",
"test_backend_ps.py::test_source_date_epoch",
"test_backend_svg.py::test_text_urls",
"test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[pdf]",
"test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[png]",
"test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[svg]",
"test_image.py::test_composite[True-1-ps- colorimage]",
"test_image.py::test_composite[False-2-ps- colorimage]",
"test_scale.py::test_logscale_mask[png]",
"test_simplification.py::test_throw_rendering_complexity_exceeded",
]),
(textfail_message, [
"test_figure.py::test_align_labels[pdf]",
"test_figure.py::test_align_labels[png]",
"test_figure.py::test_align_labels[svg]",
"test_figure.py::test_tightbbox",
])
]
for nodeid in nodeids
}
xfails = []
for item in items:
reason = (xfail_modules.get(item.module.__name__)
or xfail_nodeids.get(item.nodeid))
if reason:
xfails.append(item)
item.add_marker(pytest.mark.xfail(reason=reason))
invalid_xfails = ( # Py3.4 compat.
(set(xfail_modules) - {item.module.__name__ for item in xfails})
| (set(xfail_nodeids) - {item.nodeid for item in xfails}))
if invalid_xfails:
warnings.warn("Unused xfails:\n {}"
.format("\n ".join(sorted(invalid_xfails))))
def pytest_terminal_summary(terminalreporter, exitstatus):
write = terminalreporter.write
if _IGNORED_FAILURES:
write("\n"
"Ignored the following image comparison failures:\n"
"RMS\texpected\n")
for rms, expected in sorted(
((v, k) for k, v in _IGNORED_FAILURES.items()), reverse=True):
write("{:#.2f}\t{}\n".format(rms, expected))
if __name__ == "__main__":
sys.exit(main())
|
import os
import numpy as np
import pandas as pd
data=pd.read_csv("C:/Users/Dell/OneDrive/Desktop/spotify_dataset.csv")
print(data.Index)
data=pd.read_csv("C:/Users/Dell/OneDrive/Desktop/spotify_dataset.csv", index_col=0)
data.head()
data.shape
data.info()
data.isnull()
data.isnull().sum() # We don't have any null values
data.index
data.columns
data.size # 156x22=34232
data.memory_usage()
data.ndim
data.head(7)
data.tail(3)
data.at[12,'Song Name'] # when we are sure aboout column name
data.at[11,'Song Name']
data.iat[0,3] #When we are sure about row and column index
data.iat[4,3]
data.iat[0,2]
data.loc[0:15,'Song Name'] #To access a group of rows and columns
data.loc[0:10,['Song Name','Artist']]
data.dtypes
#data.get_dtype_counts()
data.select_dtypes(include= [object], exclude=None)
#numpy.unique(Array) tells the unique elements of a column
print(np.unique(data['Chord']))
# To consider '??' '???',etc as data=pd.read_csv("C:/Users/Dell/OneDrive/Desktop/spotify_dataset.csv", index_col=0,na_values=['?','??'])
data['Number of Times Charted']=data['Number of Times Charted'].astype('object')
data.info() # Data type changed
print(max(data['Duration (ms)'])/60000)
|
# AGC028a
def main():
n, m = map(int, input().split())
s = input()
t = input()
from fractions import gcd
def lcm_base(a, b):
return a * b // gcd(a, b)
l = lcm_base(len(s), len(t))
for i in range(l, l*2, l):
x = [-1]*i
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#written by Joshua Shaffer (jfshaffe@ucsc.edu)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
import math
def main():
data0 = input("experimental group technical replicate 1 experimental gene Cq/Ct value: ")
data1 = input("experimental group technical replicate 2 experimental gene Cq/Ct value: ")
data2 = input("experimental group technical replicate 3 experimental gene Cq/Ct value: ")
data3 = input("experimental group technical replicate 1 housekeeping gene Cq/Ct value: ")
data4 = input("experimental group technical replicate 2 housekeeping gene Cq/Ct value: ")
data5 = input("experimental group technical replicate 3 housekeeping gene Cq/Ct value: ")
data6 = input("control group technical replicate 1 experimental gene Cq/Ct value: ")
data7 = input("control group technical replicate 2 experimental gene Cq/Ct value: ")
data8 = input("control group technical replicate 3 experimental gene Cq/Ct value: ")
data9 = input("control group technical replicate 1 housekeeping gene Cq/Ct value: ")
data10 = input("control group technical replicate 2 housekeeping gene Cq/Ct value: ")
data11 = input("control group technical replicate 3 housekeeping gene Cq/Ct value: ")
dataA = float(data0)
dataB = float(data1)
dataC = float(data2)
dataD = float(data3)
dataE = float(data4)
dataF = float(data5)
dataG = float(data6)
dataH = float(data7)
dataI = float(data8)
dataJ = float(data9)
dataK = float(data10)
dataL = float(data11)
te = ((dataA+dataB+dataC))/3
he = ((dataD+dataE+dataF))/3
tc = ((dataG+dataH+dataI))/3
hc = ((dataJ+dataK+dataL))/3
deltaCtE = te - he
deltaCtC = tc - hc
deltadeltaCt = deltaCtE - deltaCtC
fc = 2**(-deltadeltaCt)
log2fc = math.log(fc,2)
print("fold change = ", fc)
print("log2 fold change = ", log2fc)
main()
# In[ ]:
|
#/usr/bin/env python
"""Parsers for the Sprinzl tRNA databases.
"""
from cogent.util.misc import InverseDict
from string import strip, maketrans
from cogent.core.sequence import RnaSequence
from cogent.core.info import Info as InfoClass
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Rob Knight", "Jeremy Widmann", "Sandra Smit"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Rob Knight"
__email__ = "rob@spot.colorado.edu"
__status__ = "Development"
def Rna(x, Info=None):
if isinstance(x, list):
x = ''.join(x)
if Info is None:
Info = {}
return RnaSequence(x.upper().replace('T','U'), Info=InfoClass(Info))
SprinzlFields =['Accession', 'AA', 'Anticodon', 'Species', 'Strain']
def OneLineSprinzlParser(infile):
"""Returns successive records from the tRNA database. First line labels.
This was the first attempt at the parser, and requires quite a lot of
preprocessing. Use SprinzlParser for something more general.
Works on a file obtained by the following method:
1. Do the default search.
2. Show all the columns and autofit them.
3. Delete the first column of numbers and all blank columns.
4. Name the first 5 columns "Accession, AA, Anticodon, Species, Strain".
5. Save the worksheet as plain text.
"""
first = True
for l in infile:
line = l.strip()
if not line:
continue
fields = line.split('\t')
if first: #label line
label_fields = fields[5:]
labels = InverseDict(enumerate(label_fields))
first = False
else:
info = dict(zip(SprinzlFields, map(strip, fields[0:5])))
info['Labels'] = labels
yield Rna(map(strip, fields[5:]), Info=info)
GenomicFields = ['', 'Accession', 'AA', '', 'Anticodon', '', 'Species', \
'', '', '', '', '', '', '', '', '', 'Strain', '', '', '', 'Taxonomy']
def _fix_structure(fields, seq):
"""Returns a string with correct # chars from db struct line.
fields should be the result of line.split('\t')
Implementation notes:
Pairing line uses strange format: = is pair, * is GU pair, and
nothing is unpaired. Cells are not padded out to the start or end of
the sequence length, presumably to infuriate the unwary.
I don't _think_ it's possible to convert these into ViennaStructures
since we don't know where each helix starts and ends, and the lengths
of each piece can vary. I'd be happy to be proven wrong on this...
For some reason, _sometimes_ spaces are inserted, and _sometimes_
the cells are left entirely blank. Also, when there's a noncanonical
pair in the helix, the helix is broken into two pieces, so counting
pieces isn't going to work for figuring out the ViennaStructure.
Expects as input the sequence and the raw structure line.
"""
num_blanks = 4
pieces = fields[num_blanks:]
result = ['.'] * len(seq)
for i, p in enumerate(pieces):
if p and (p != ' '):
result[i] = p
return ''.join(result)
def _fix_sequence(seq):
"""Returns string where terminal gaps are replaced with terminal CCA.
Some of the sequence in the Genomic tRNA Database have gaps where the
acceptor stem (terminal CCA) should be. This function checks the
number of terminal gaps and replaces with appropriate part of terminal
CCA.
"""
if seq.endswith('---'):
seq = seq[:-3]+'CCA'
elif seq.endswith('--'):
seq = seq[:-2]+'CA'
elif seq.endswith('-'):
seq = seq[:-1]+'A'
return seq
def GenomicSprinzlParser(infile,fix_sequence=False):
"""Parser for the Genomic tRNA Database.
Assumes the file has been prepared by the following method:
1. Set all search fields to empty.
2. Check all the results fields.
3. Perform the search (this takes a while).
4. Save the results worksheet as tab-delimited text.
Note that the alignment length is supposed to be 99 bases, but not all the
sequences have been padded out with the correct number of hyphens.
"""
num_blanks = 4
first = True
for l in infile:
#skip blank lines
line = l.rstrip()
if not line:
continue
fields = line.split('\t')
if first: #label line
#for unknown reasons, some of the field headers have '.' instead
#of '0', e.g. '7.' instead of '70'.
line = line.replace('.', '0')
fields = line.split('\t')
labels = InverseDict(enumerate(fields[num_blanks:]))
first = False
offset = 0
else: #expect 3 record lines at a time
if offset == 0: #label line
info = dict(zip(GenomicFields, map(strip, fields)))
#add in the labels
info['Labels'] = labels
#convert the taxonomy from a string to a list
info['Taxonomy'] = map(strip, info['Taxonomy'].split(';'))
#convert the anticodon into RNA
info['Anticodon'] = Rna(info['Anticodon'])
#get rid of the empty fields
del info['']
elif offset == 1: #sequence line
raw_seq = ''.join(map(strip, fields))
#for some reason, there are underscores in some sequences
raw_seq = raw_seq.replace('_', '-')
if fix_sequence:
raw_seq = _fix_sequence(raw_seq)
seq = Rna(raw_seq, Info=info)
elif offset == 2: #structure line
seq.Pairing = _fix_structure(fields, seq)
yield seq
#figure out which type of line we're expecting next
offset += 1
if offset > 2:
offset = 0
def get_pieces(struct, splits):
"""Breaks up the structure at fixed positions, returns the pieces.
struct: structure string in sprinzl format
splits: list or tuple of positions to split on
This is a helper function for the sprinzl_to_vienna function.
struct = '...===...===.'
splits = [0,3,7,-1,13]
pieces -> ['...','===.','..===','.']
"""
pieces = []
for x in range(len(splits)-1):
pieces.append(struct[splits[x]:splits[x+1]])
return pieces
def get_counts(struct_piece):
"""Returns a list of the lengths or the paired regions in the structure.
struct_pieces: string, piece of structure in sprinzl format
This is a helper function for the sprinzl_to_vienna function
struct_piece = '.===.=..'
returns [3,1]
"""
return map(len, filter(None, [i.strip('.') for i in \
struct_piece.split('.')]))
def sprinzl_to_vienna(sprinzl_struct):
"""Constructs vienna structure from sprinzl sec. structure format
sprinzl_struct: structure string in sprinzl format
Many things are hardcoded in here, so if the format or the alignment
changes, these values have to be adjusted!!!
The correctness of the splits has been tested on the GenomicDB
database from Jan 2006, containing 8163 sequences.
"""
assert len(sprinzl_struct) == 99
gu='*'
wc='='
splits = [0,8,19,29,38,55,79,-11,len(sprinzl_struct)]
direction = ['(','(',')','(',')','(',')',')']
#get structural pieces
s = sprinzl_struct.replace(gu,wc)
pieces = get_pieces(s, splits)
assert len(pieces) == len(splits)-1
#get counts of structured regions in each piece, check validity
counts = map(get_counts,pieces)
pairs = [(0,-1),(1,2),(3,4),(5,6)]
for i,j in pairs:
assert sum(counts[i]) == sum(counts[j])
#check counts matches directions
assert len(counts) == len(direction)
#construct string string of brackets
brackets = []
for lengths, br in zip(counts,direction):
for l in lengths:
brackets.append(l*br)
brackets = ''.join(brackets)
#build vienna structure
vienna = []
x=0
for sym in s:
if sym == '.':
vienna.append(sym)
else:
vienna.append(brackets[x])
x += 1
return ''.join(vienna)
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("CHIA_ROOT", "~/.chia/testnet4"))).resolve()
|
from __future__ import absolute_import
from .accuracy import accuracy
from .classification import evaluate_classification, show_confusion_matrix
from .distance import compute_distance_matrix
from .lfw import evaluate_lfw
from .rank import evaluate_rank
|
#
# PySNMP MIB module JUNIPER-V1-TRAPS-MPLS (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-V1-TRAPS-MPLS
# Produced by pysmi-0.3.4 at Wed May 1 14:01:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, iso, IpAddress, Bits, ModuleIdentity, Counter32, NotificationType, Gauge32, ObjectIdentity, NotificationType, Unsigned32, Integer32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "iso", "IpAddress", "Bits", "ModuleIdentity", "Counter32", "NotificationType", "Gauge32", "ObjectIdentity", "NotificationType", "Unsigned32", "Integer32", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
juniperMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 2636))
jnxMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3))
mpls = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 2))
mplsLspList = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 2, 3))
mplsLspEntry = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 2, 3, 1))
mplsLspName = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 2, 3, 1, 1))
mplsPathName = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 2, 3, 1, 17))
mplsLspUpV1 = NotificationType((1, 3, 6, 1, 4, 1, 2636) + (0,1)).setObjects(("JUNIPER-V1-TRAPS-MPLS", "mplsLspName"), ("JUNIPER-V1-TRAPS-MPLS", "mplsPathName"))
if mibBuilder.loadTexts: mplsLspUpV1.setDescription('An mplsLspUp trap signifies that the specified LSP is up. The current active path for the LSP is mplsPathName.')
mplsLspDownV1 = NotificationType((1, 3, 6, 1, 4, 1, 2636) + (0,2)).setObjects(("JUNIPER-V1-TRAPS-MPLS", "mplsLspName"), ("JUNIPER-V1-TRAPS-MPLS", "mplsPathName"))
if mibBuilder.loadTexts: mplsLspDownV1.setDescription('An mplsLspDown trap signifies that the specified LSP is down, because the current active path mplsPathName went down.')
mplsLspChangeV1 = NotificationType((1, 3, 6, 1, 4, 1, 2636) + (0,3)).setObjects(("JUNIPER-V1-TRAPS-MPLS", "mplsLspName"), ("JUNIPER-V1-TRAPS-MPLS", "mplsPathName"))
if mibBuilder.loadTexts: mplsLspChangeV1.setDescription("An mplsLspChange trap signifies that the the specified LSP has switched traffic to the new active path 'toLspPath'. The LSP maintains up state before and after the switch over")
mibBuilder.exportSymbols("JUNIPER-V1-TRAPS-MPLS", mpls=mpls, mplsLspChangeV1=mplsLspChangeV1, mplsLspName=mplsLspName, mplsPathName=mplsPathName, mplsLspUpV1=mplsLspUpV1, mplsLspEntry=mplsLspEntry, jnxMibs=jnxMibs, mplsLspDownV1=mplsLspDownV1, juniperMIB=juniperMIB, mplsLspList=mplsLspList)
|
import json
import ssl
import asyncio
import logging
import websockets
log = logging.getLogger('mattermostdriver.websocket')
log.setLevel(logging.INFO)
class Websocket:
def __init__(self, options, token):
self.options = options
if options['debug']:
log.setLevel(logging.DEBUG)
self._token = token
async def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there. Takes one argument.
:type event_handler: Function(message)
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
context = None
url = '{scheme:s}{url:s}{basepath:s}/websocket'.format(
scheme=scheme,
url=self.options['url'],
basepath=self.options['basepath']
)
websocket = await websockets.connect(
url,
ssl=context,
)
await self._authenticate_websocket(websocket, event_handler)
await self._start_loop(websocket, event_handler)
async def _start_loop(self, websocket, event_handler):
"""
We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect.
"""
log.debug('Starting websocket loop')
while True:
try:
await asyncio.wait_for(
self._wait_for_message(websocket, event_handler),
timeout=self.options['timeout']
)
except asyncio.TimeoutError:
await websocket.pong()
log.debug("Sending heartbeat...")
continue
async def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
log.debug('Authenticating websocket')
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
await websocket.send(json_data)
while True:
message = await websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
await event_handler(message)
if ('event' in status and status['event'] == 'hello') and \
('seq' in status and status['seq'] == 0):
log.info('Websocket authentification OK')
return True
log.error('Websocket authentification failed')
async def _wait_for_message(self, websocket, event_handler):
log.debug('Waiting for messages on websocket')
while True:
message = await websocket.recv()
await event_handler(message)
|
USE_MMDET = True
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/mot_challenge_det.py', '../_base_/default_runtime.py'
]
model = dict(
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(clip_border=False), num_classes=1)),
init_cfg=dict(
type='Pretrained',
checkpoint= # noqa: E251
'http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth' # noqa: E501
)))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
|
class ContainerModuleNotFound(Exception):
pass # Raised when Container module is not found
|
import pytest
from ..bootstrap import check_dependencies
def test_check_dependencies():
check_dependencies([])
check_dependencies(["which", "cd"])
with pytest.raises(AssertionError, match="this-does-not-exist is not installed"):
check_dependencies(["this-does-not-exist"])
|
class Solution:
# @param {string} s
# @param {string} p
# @return {boolean}
def isMatch(self, s, p):
dp=[[False for i in range(len(p)+1)] for j in range(len(s)+1)]
dp[0][0]=True
for i in range(1,len(p)+1):
if p[i-1]=='*':
if i>=2:
dp[0][i]=dp[0][i-2]
for i in range(1,len(s)+1):
for j in range(1,len(p)+1):
if p[j-1]=='.':
dp[i][j]=dp[i-1][j-1]
elif p[j-1]=='*':
dp[i][j]=dp[i][j-1] or dp[i][j-2] or (dp[i-1][j] and (s[i-1]==p[j-2] or p[j-2]=='.'))
else:
dp[i][j]=dp[i-1][j-1] and s[i-1]==p[j-1]
return dp[len(s)][len(p)]
|
uci_moves = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
for n1 in range(1, 9):
for l1 in letters:
for n2 in range(1, 9):
for l2 in letters:
uci = l1 + str(n1) + l2 + str(n2)
uci_moves.append(uci)
promotions_white = ['a7a8', 'b7b8', 'c7c8', 'd7d8', 'e7e8', 'f7f8', 'g7g8', 'h7h8',
'a7b8', 'b7a8', 'b7c8', 'c7b8', 'c7d8', 'd7c8', 'd7e8', 'e7d8', 'e7f8', 'f7e8', 'f7g8', 'g7f8',
'g7h8', 'h7g8']
promotions_black = ['a2a1', 'b2b1', 'c2c1', 'd2d1', 'e2e1', 'f2f1', 'g2g1', 'h2h1',
'a2b1', 'b2a1', 'b2c1', 'c2b1', 'c2d1', 'd2c1', 'd2e1', 'e2d1', 'e2f1', 'f2e1', 'f2g1', 'g2f1',
'g2h1', 'h2g1']
promotions = promotions_white + promotions_black
for piece in ['r', 'n', 'b', 'q']:
for promotion in promotions:
uci_moves.append(promotion + piece)
uci_to_index = {}
for i, uci in enumerate(uci_moves):
uci_to_index[uci] = i
|
import os
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("FMT")
src_path = "data/en/manifest_{}_en_cmon_clean.json"
tgt_path = "data/en/spk_{}/manifest_{}_en_cmon_clean.json"
tgt_txt_path = "data/en/spk_{}/manifest_{}_en_cmon_clean.txt"
for env in ["valid", "train"]:
with open(src_path.format(env), encoding="utf-8") as src_json:
for jn in src_json:
try:
jd = json.loads(jn)
audio_filepath = jd["audio_filepath"]
text = jd["text"]
duration = jd["duration"]
speaker = jd["speaker"]
text = str(text).strip("\n").strip()
text = text.replace("£", " euro ")
text = text.replace("€", " euro ")
text = text.replace(" ", " ")
manifest = {"audio_filepath": audio_filepath,
"text": text,
"duration": float(duration),
"speaker": int(speaker)}
os.makedirs("data/en/spk_{}".format(speaker), exist_ok=True)
with open(tgt_path.format(speaker, env), encoding="utf-8", mode="a") as tgt_json:
json.dump(manifest, tgt_json)
tgt_json.write("\n")
with open(tgt_txt_path.format(speaker, env), encoding="utf-8", mode="a") as tgt_txt:
tgt_txt.write("{}|{}|{}\n".format(audio_filepath, text, int(speaker)))
except Exception as e:
logger.exception(e)
|
##########################################################################
# Author: Samuca
#
# brief: plays a mp3 song
#
# this is a list exercise available on youtube:
# https://www.youtube.com/playlist?list=PLHz_AreHm4dm6wYOIW20Nyg12TAjmMGT-
##########################################################################
#pygame modulo has a lot of tools to work with games, such as:
# load images
# play songs
import pygame
#initializate the module
pygame.init()
#load the song
pygame.mixer.music.load("ex021.mp3")
#play the song
pygame.mixer.music.play()
#wait the song ends
pygame.event.wait()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the SYSTEMTIME structure implementation."""
from __future__ import unicode_literals
import decimal
import unittest
from dfdatetime import systemtime
class SystemtimeTest(unittest.TestCase):
"""Tests for the SYSTEMTIME structure."""
# pylint: disable=protected-access
def testInitialize(self):
"""Tests the initialization function."""
systemtime_object = systemtime.Systemtime()
self.assertIsNotNone(systemtime_object)
systemtime_object = systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31, 142))
self.assertIsNotNone(systemtime_object)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 20)
self.assertEqual(systemtime_object.minutes, 6)
self.assertEqual(systemtime_object.seconds, 31)
self.assertEqual(systemtime_object.milliseconds, 142)
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(1500, 8, 4, 12, 20, 6, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 13, 4, 12, 20, 6, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 7, 12, 20, 6, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 32, 20, 6, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 24, 6, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 61, 31, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 61, 142))
with self.assertRaises(ValueError):
systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31, 1001))
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
systemtime_object = systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31, 142))
normalized_timestamp = systemtime_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281643591.142'))
systemtime_object = systemtime.Systemtime()
normalized_timestamp = systemtime_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
systemtime_object = systemtime.Systemtime()
expected_number_of_seconds = 1281571200
systemtime_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 0)
self.assertEqual(systemtime_object.minutes, 0)
self.assertEqual(systemtime_object.seconds, 0)
self.assertEqual(systemtime_object.milliseconds, 0)
expected_number_of_seconds = 1281647191
systemtime_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 21)
self.assertEqual(systemtime_object.minutes, 6)
self.assertEqual(systemtime_object.seconds, 31)
self.assertEqual(systemtime_object.milliseconds, 0)
expected_number_of_seconds = 1281647191
systemtime_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 21)
self.assertEqual(systemtime_object.minutes, 6)
self.assertEqual(systemtime_object.seconds, 31)
self.assertEqual(systemtime_object.milliseconds, 546)
expected_number_of_seconds = 1281650791
systemtime_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875-01:00')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 22)
self.assertEqual(systemtime_object.minutes, 6)
self.assertEqual(systemtime_object.seconds, 31)
self.assertEqual(systemtime_object.milliseconds, 546)
expected_number_of_seconds = 1281643591
systemtime_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875+01:00')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 2010)
self.assertEqual(systemtime_object.month, 8)
self.assertEqual(systemtime_object.day_of_month, 12)
self.assertEqual(systemtime_object.hours, 20)
self.assertEqual(systemtime_object.minutes, 6)
self.assertEqual(systemtime_object.seconds, 31)
self.assertEqual(systemtime_object.milliseconds, 546)
expected_number_of_seconds = -11644387200
systemtime_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(
systemtime_object._number_of_seconds, expected_number_of_seconds)
self.assertEqual(systemtime_object.year, 1601)
self.assertEqual(systemtime_object.month, 1)
self.assertEqual(systemtime_object.day_of_month, 2)
self.assertEqual(systemtime_object.hours, 0)
self.assertEqual(systemtime_object.minutes, 0)
self.assertEqual(systemtime_object.seconds, 0)
self.assertEqual(systemtime_object.milliseconds, 0)
with self.assertRaises(ValueError):
systemtime_object.CopyFromDateTimeString('1600-01-02 00:00:00')
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
systemtime_object = systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31, 142))
date_time_string = systemtime_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 20:06:31.142')
systemtime_object = systemtime.Systemtime()
date_time_string = systemtime_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testGetDate(self):
"""Tests the GetDate function."""
systemtime_object = systemtime.Systemtime(
system_time_tuple=(2010, 8, 4, 12, 20, 6, 31, 142))
date_tuple = systemtime_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
systemtime_object = systemtime.Systemtime()
date_tuple = systemtime_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
if __name__ == '__main__':
unittest.main()
|
"""
REST APIs that are only used in v1 (the legacy API).
"""
|
import sqlite3
connection = sqlite3.connect('categories.db')
with open('categories.sql') as f:
connection.executescript(f.read())
cur = connection.cursor()
cur.execute("INSERT INTO categories (name) VALUES (?)",
('Food',)
)
connection.commit()
connection.close()
|
import unittest
import os
from web3 import Web3
from solcx import install_solc
# install_solc(version='latest')
install_solc(version='0.7.0')
from solcx import compile_source
EXTRA_GAS = int(os.environ.get("EXTRA_GAS", "0"))
proxy_url = os.environ.get('PROXY_URL', 'http://localhost:9090/solana')
proxy = Web3(Web3.HTTPProvider(proxy_url))
eth_account = proxy.eth.account.create('https://github.com/neonlabsorg/proxy-model.py/issues/147')
proxy.eth.default_account = eth_account.address
SUBSTRING_LOG_ERR_147 = 'Invalid Ethereum transaction nonce:'
STORAGE_SOLIDITY_SOURCE_147 = '''
pragma solidity >=0.7.0 <0.9.0;
/**
* @title Storage
* @dev Store & retrieve value in a variable
*/
contract Storage {
uint256 number;
/**
* @dev Store value in variable
* @param num value to store
*/
function store(uint256 num) public {
number = num;
}
/**
* @dev Return value
* @return value of 'number'
*/
function retrieve() public view returns (uint256){
return number;
}
}
'''
SOLIDITY_SOURCE_185 = '''
pragma solidity >=0.7.0 <0.9.0;
contract test_185 {
bytes public emprty_string = "";
function getKeccakOfEmptyString() public view returns (bytes32 variant) {
variant = keccak256(emprty_string);
}
bytes32 constant neonlabsHash = keccak256("neonlabs");
function endlessCycle() public view returns (bytes32 variant) {
variant = keccak256(emprty_string);
for(;neonlabsHash != variant;) {
variant = keccak256(abi.encodePacked(variant));
}
return variant;
}
bytes32 public value = "";
function initValue(string memory s) public {
value = keccak256(bytes(s));
}
function calculateKeccakAndStore(uint256 times) public {
for(;times > 0; --times) {
value = keccak256(abi.encodePacked(value));
}
}
function getValue() public view returns (bytes32) {
return value;
}
}
'''
class Test_eth_sendRawTransaction(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\nhttps://github.com/neonlabsorg/proxy-model.py/issues/147")
print('eth_account.address:', eth_account.address)
print('eth_account.key:', eth_account.key.hex())
cls.deploy_storage_147_solidity_contract(cls)
cls.deploy_test_185_solidity_contract(cls)
def deploy_storage_147_solidity_contract(self):
compiled_sol = compile_source(STORAGE_SOLIDITY_SOURCE_147)
contract_id, contract_interface = compiled_sol.popitem()
storage = proxy.eth.contract(abi=contract_interface['abi'], bytecode=contract_interface['bin'])
trx_deploy = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to='',
value=0,
data=storage.bytecode),
eth_account.key
)
print('trx_deploy:', trx_deploy)
trx_deploy_hash = proxy.eth.send_raw_transaction(trx_deploy.rawTransaction)
print('trx_deploy_hash:', trx_deploy_hash.hex())
trx_deploy_receipt = proxy.eth.wait_for_transaction_receipt(trx_deploy_hash)
print('trx_deploy_receipt:', trx_deploy_receipt)
self.deploy_block_hash = trx_deploy_receipt['blockHash']
self.deploy_block_num = trx_deploy_receipt['blockNumber']
print('deploy_block_hash:', self.deploy_block_hash)
print('deploy_block_num:', self.deploy_block_num)
self.storage_contract = proxy.eth.contract(
address=trx_deploy_receipt.contractAddress,
abi=storage.abi
)
def deploy_test_185_solidity_contract(self):
compiled_sol = compile_source(SOLIDITY_SOURCE_185)
contract_id, contract_interface = compiled_sol.popitem()
test_185_solidity_contract = proxy.eth.contract(abi=contract_interface['abi'], bytecode=contract_interface['bin'])
trx_deploy = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to='',
value=0,
data=test_185_solidity_contract.bytecode),
eth_account.key
)
print('trx_deploy:', trx_deploy)
trx_deploy_hash = proxy.eth.send_raw_transaction(trx_deploy.rawTransaction)
print('trx_deploy_hash:', trx_deploy_hash.hex())
trx_deploy_receipt = proxy.eth.wait_for_transaction_receipt(trx_deploy_hash)
print('trx_deploy_receipt:', trx_deploy_receipt)
self.test_185_solidity_contract = proxy.eth.contract(
address=trx_deploy_receipt.contractAddress,
abi=test_185_solidity_contract.abi
)
# @unittest.skip("a.i.")
def test_check_get_block_by_hash(self):
print("\ntest_check_get_block_by_hash")
block = proxy.eth.get_block(self.deploy_block_hash, full_transactions=True)
print('block:', block)
self.assertEqual(len(block['transactions']), 1)
self.assertEqual(block['transactions'][0]['blockHash'], self.deploy_block_hash)
# @unittest.skip("a.i.")
def test_check_get_block_by_number(self):
print("\ntest_check_get_block_by_number")
block = proxy.eth.get_block(int(self.deploy_block_num))
print('block:', block)
self.assertEqual(len(block['transactions']), 1)
# @unittest.skip("a.i.")
def test_01_call_retrieve_right_after_deploy(self):
print("\ntest_01_call_retrieve_right_after_deploy")
number = self.storage_contract.functions.retrieve().call()
print('number:', number)
self.assertEqual(number, 0)
# @unittest.skip("a.i.")
def test_02_execute_with_right_nonce(self):
print("\ntest_02_execute_with_right_nonce")
right_nonce = proxy.eth.get_transaction_count(proxy.eth.default_account)
trx_store = self.storage_contract.functions.store(147).buildTransaction({'nonce': right_nonce})
print('trx_store:', trx_store)
trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key)
print('trx_store_signed:', trx_store_signed)
trx_store_hash = proxy.eth.send_raw_transaction(trx_store_signed.rawTransaction)
print('trx_store_hash:', trx_store_hash.hex())
trx_store_receipt = proxy.eth.wait_for_transaction_receipt(trx_store_hash)
print('trx_store_receipt:', trx_store_receipt)
number = self.storage_contract.functions.retrieve().call()
print('number:', number)
self.assertEqual(number, 147)
# @unittest.skip("a.i.")
def test_03_execute_with_low_gas(self):
print("\ntest_03_execute_with_low_gas")
right_nonce = proxy.eth.get_transaction_count(proxy.eth.default_account)
trx_store = self.storage_contract.functions.store(148).buildTransaction({'nonce': right_nonce, 'gasPrice': 1})
print('trx_store:', trx_store)
trx_store['gas'] = trx_store['gas'] - 2 - EXTRA_GAS # less than estimated
print('trx_store:', trx_store)
trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key)
print('trx_store_signed:', trx_store_signed)
trx_store_hash = proxy.eth.send_raw_transaction(trx_store_signed.rawTransaction)
print('trx_store_hash:', trx_store_hash.hex())
trx_store_receipt = proxy.eth.wait_for_transaction_receipt(trx_store_hash)
print('trx_store_receipt:', trx_store_receipt)
self.assertEqual(trx_store_receipt['status'], 0) # false Transaction mined but execution failed
# @unittest.skip("a.i.")
def test_04_execute_with_bad_nonce(self):
test_nonce_map = {
'grade_up_one': 1,
'grade_down_one': -1,
}
for name, offset in test_nonce_map.items():
with self.subTest(name=name):
print("\ntest_04_execute_with_bad_nonce {} offsets".format(offset))
bad_nonce = offset + proxy.eth.get_transaction_count(proxy.eth.default_account)
trx_store = self.storage_contract.functions.store(147).buildTransaction({'nonce': bad_nonce})
print('trx_store:', trx_store)
trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key)
print('trx_store_signed:', trx_store_signed)
try:
trx_store_hash = proxy.eth.send_raw_transaction(trx_store_signed.rawTransaction)
print('trx_store_hash:', trx_store_hash)
self.assertTrue(False)
except Exception as e:
print('type(e):', type(e))
print('e:', e)
import json
response = json.loads(str(e).replace('\'', '\"').replace('None', 'null'))
print('response:', response)
print('code:', response['code'])
self.assertEqual(response['code'], -32002)
print('substring_err_147:', SUBSTRING_LOG_ERR_147)
logs = response['data']['logs']
print('logs:', logs)
log = [s for s in logs if SUBSTRING_LOG_ERR_147 in s][0]
print(log)
self.assertGreater(len(log), len(SUBSTRING_LOG_ERR_147))
file_name = 'src/entrypoint.rs'
self.assertTrue(file_name in log)
# @unittest.skip("a.i.")
def test_05_transfer_one_gwei(self):
print("\ntest_05_transfer_one_gwei")
one_gwei = 1_000_000_000
eth_account_alice = proxy.eth.account.create('alice')
eth_account_bob = proxy.eth.account.create('bob')
print('eth_account_alice.address:', eth_account_alice.address)
print('eth_account_bob.address:', eth_account_bob.address)
if True:
print("add funds to alice and bob")
print("alice")
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_alice.address,
value=one_gwei),
eth_account.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
print("bob")
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_bob.address,
value=one_gwei),
eth_account.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
alice_balance_before_transfer = proxy.eth.get_balance(eth_account_alice.address)
bob_balance_before_transfer = proxy.eth.get_balance(eth_account_bob.address)
print('alice_balance_before_transfer:', alice_balance_before_transfer)
print('bob_balance_before_transfer:', bob_balance_before_transfer)
print('one_gwei:', one_gwei)
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(eth_account_alice.address),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_bob.address,
value=one_gwei),
eth_account_alice.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
alice_balance_after_transfer = proxy.eth.get_balance(eth_account_alice.address)
bob_balance_after_transfer = proxy.eth.get_balance(eth_account_bob.address)
print('alice_balance_after_transfer:', alice_balance_after_transfer)
print('bob_balance_after_transfer:', bob_balance_after_transfer)
self.assertEqual(alice_balance_after_transfer, alice_balance_before_transfer - one_gwei)
self.assertEqual(bob_balance_after_transfer, bob_balance_before_transfer + one_gwei)
# @unittest.skip("a.i.")
def test_06_transfer_one_and_a_half_gweis(self):
print("\ntest_06_transfer_one_and_a_half_gweis")
eth_account_alice = proxy.eth.account.create('alice')
eth_account_bob = proxy.eth.account.create('bob')
print('eth_account_alice.address:', eth_account_alice.address)
print('eth_account_bob.address:', eth_account_bob.address)
one_gwei = 1_000_000_000
if True:
print("add funds to alice and bob")
print("alice")
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_alice.address,
value=one_gwei),
eth_account.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
print("bob")
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(proxy.eth.default_account),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_bob.address,
value=one_gwei),
eth_account.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
alice_balance_before_transfer = proxy.eth.get_balance(eth_account_alice.address)
bob_balance_before_transfer = proxy.eth.get_balance(eth_account_bob.address)
print('alice_balance_before_transfer:', alice_balance_before_transfer)
print('bob_balance_before_transfer:', bob_balance_before_transfer)
one_and_a_half_gweis = 1_500_000_000
print('one_and_a_half_gweis:', one_and_a_half_gweis)
trx_transfer = proxy.eth.account.sign_transaction(dict(
nonce=proxy.eth.get_transaction_count(eth_account_alice.address),
chainId=proxy.eth.chain_id,
gas=987654321,
gasPrice=0,
to=eth_account_bob.address,
value=one_and_a_half_gweis),
eth_account_alice.key
)
print('trx_transfer:', trx_transfer)
trx_transfer_hash = proxy.eth.send_raw_transaction(trx_transfer.rawTransaction)
print('trx_transfer_hash:', trx_transfer_hash.hex())
trx_transfer_receipt = proxy.eth.wait_for_transaction_receipt(trx_transfer_hash)
print('trx_transfer_receipt:', trx_transfer_receipt)
alice_balance_after_transfer = proxy.eth.get_balance(eth_account_alice.address)
bob_balance_after_transfer = proxy.eth.get_balance(eth_account_bob.address)
print('alice_balance_after_transfer:', alice_balance_after_transfer)
print('bob_balance_after_transfer:', bob_balance_after_transfer)
print('check https://github.com/neonlabsorg/neon-evm/issues/210')
one_gwei = 1_000_000_000
print('one_gwei:', one_gwei)
self.assertEqual(alice_balance_after_transfer, alice_balance_before_transfer - one_gwei)
self.assertEqual(bob_balance_after_transfer, bob_balance_before_transfer + one_gwei)
@unittest.skip("a.i.")
def test_07_execute_long_transaction(self):
print("\ntest_07_execute_long_transaction")
trx_initValue = self.test_185_solidity_contract.functions.initValue('185 init value').buildTransaction({'nonce': proxy.eth.get_transaction_count(proxy.eth.default_account)})
print('trx_initValue:', trx_initValue)
trx_initValue_signed = proxy.eth.account.sign_transaction(trx_initValue, eth_account.key)
print('trx_initValue_signed:', trx_initValue_signed)
trx_initValue_hash = proxy.eth.send_raw_transaction(trx_initValue_signed.rawTransaction)
print('trx_initValue_hash:', trx_initValue_hash.hex())
trx_initValue_receipt = proxy.eth.wait_for_transaction_receipt(trx_initValue_hash)
print('trx_initValue_hash_receipt:', trx_initValue_receipt)
value = self.test_185_solidity_contract.functions.getValue().call()
print('value:', value.hex())
self.assertEqual(value.hex(), '36fb9ea61aba18555110881836366c8d7701685174abe4926673754580ee26c5')
from datetime import datetime
start = datetime.now()
times_to_calculate = 10
trx_calculate = self.test_185_solidity_contract.functions.calculateKeccakAndStore(times_to_calculate).buildTransaction({'nonce': proxy.eth.get_transaction_count(proxy.eth.default_account)})
print('trx_calculate:', trx_calculate)
trx_calculate_signed = proxy.eth.account.sign_transaction(trx_calculate, eth_account.key)
print('trx_calculate_signed:', trx_calculate_signed)
trx_calculate_hash = proxy.eth.send_raw_transaction(trx_calculate_signed.rawTransaction)
print('trx_calculate_hash:', trx_calculate_hash.hex())
trx_calculate_receipt = proxy.eth.wait_for_transaction_receipt(trx_calculate_hash)
print('trx_calculate_hash_receipt:', trx_calculate_receipt)
time_duration = datetime.now() - start
value = self.test_185_solidity_contract.functions.getValue().call()
print('value:', value.hex())
self.assertEqual(value.hex(), 'e6d201b1e3aab3b3cc100ea7a0b76fcbb3c2fef88fc4e540f9866d8d2e6e2131')
print('times_to_calculate:', times_to_calculate)
print('time_duration:', time_duration)
if __name__ == '__main__':
unittest.main()
|
import sys, os, time, serial, logging, threading
DEFAULT_TTY = '/dev/ttyUSB0'
DEFAULT_BAUD = 9600
TIMEOUT = 3.0
log = logging.getLogger(__name__)
class ArduinoSerial(object):
def __init__(self,tty=DEFAULT_TTY,baud=DEFAULT_BAUD):
self._exit = threading.Event()
self.tty = tty
self.baud = baud
self.serial = None # created @ beginning of `run`
self.callbacks = []
self._thread = None
def start(self):
'''
Calls `self.run()` in a daemon thread.
'''
if self._thread is not None:
raise Exception('ArduinoSerial is already started!')
self._exit.clear()
self._thread = threading.Thread(
target=self.run,
name='ArduinoSerial @ %s' % self.tty)
self._thread.daemon = True
self._thread.start()
def stop(self):
'''
Tell the thread to exit, and wait a moment to
give the thread a chance to terminate
'''
self._exit.set()
if self._thread: self._thread.join(3)
self._exit.clear()
self._thread = None
def run(self):
'''
Listen on the serial channel and pass any valid data to the callbacks
'''
while not self._exit.is_set():
line = None
try:
if self.serial is None:
# allows the code to recover/ initialize if the USB device
# is unplugged after this code is running:
if not os.path.exists( self.tty ):
log.debug('TTY %s does not exist; Arduino is probably not plugged in...', self.tty)
self._exit.wait(10) # wait before retry
continue
self.serial = serial.Serial(
port=self.tty,
baudrate=self.baud,
timeout=TIMEOUT )
log.info("Opened serial port at %s", self.tty)
line = self.serial.readline()
if line: logging.debug("Arduino >> %s", line)
message = _parse( line )
if not message: continue
log.debug( "Parsed message to %s",message )
for cb in self.callbacks:
try: cb( message )
except:
log.exception(
"Callback threw exception for message: %s",
message )
except serial.SerialException as ex:
log.exception("Serial error: %s",ex)
if self.serial is not None:
# allow the serial to re-initialize
try: self.serial.close()
except: pass
self.serial = None
self._exit.wait(10) # sleep to avoid tight loop
except Exception, msg:
log.exception("Unexpected read error: %s for line: %s",msg,line)
self._exit.wait(1)
def add_callback(self,cb):
'''
Add a callback which will be fired for each valid message
received from the serial channel
'''
self.callbacks.append(cb)
def send(self,cmd):
'''
Send a command to the Arduino
'''
logging.debug("Arduino << %s", cmd)
if not self.serial:
logging.warn("Serial not open! Dropped message %r", cmd)
return
self.serial.write(cmd+ "\n")
def _parse(line):
'''
Parse the command into its parts
'''
if not line: return
return line.split()
if __name__ == '__main__':
# This is just test code to verify we can read data from the USB device.
logging.basicConfig(level=logging.DEBUG)
from optparse import OptionParser
opts = OptionParser()
opts.add_option("-t", "--tty", dest="tty",
help="TTY port", metavar="TTY")
(options, args) = opts.parse_args()
log.info("Starting serial on: %s", options.tty)
channel = ArduinoSerial(options.tty)
def callback(msg):
print "Got message: %s" % (msg,)
channel.add_callback(callback)
channel.start()
while 1: time.sleep(1)
channel.stop()
|
from pathlib import Path
import torch
from models.models import TfIdfModel
from data.data_preprocessing import TfIdfPreprocessor
def main():
preprocessor = TfIdfPreprocessor.load_from_checkpoint(
list(Path("test_checkpoints").glob("*.pkl"))[0]
)
model_checkpoint = torch.load(list(Path("test_checkpoints").glob("*.pth"))[0])
model = TfIdfModel(input_dim=5000, device="cpu")
model.load_state_dict(model_checkpoint)
model.eval()
while True:
review = input(">> ")
vectorized = preprocessor.transform([review])
print(
f"The review is {torch.sigmoid(model(vectorized)).item() * 100}% positive."
)
if __name__ == "__main__":
main()
|
from dataclasses import dataclass
from typing import Any
import torch
@dataclass
class EpochData:
epoch_id: int
duration_train: int
duration_test: int
loss_train: float
accuracy: float
loss: float
class_precision: Any
class_recall: Any
client_id: str = None
def to_csv_line(self):
delimeter = ','
values = self.__dict__.values()
values = [str(x) for x in values]
return delimeter.join(values)
@dataclass
class GANEpochData:
epoch_id: int
duration_train: int
duration_test: int
disc: Any
client_id: str = None
def to_csv_line(self):
delimeter = ','
values = self.__dict__.values()
values = [str(x) for x in values if str(x) != 'disc']
return delimeter.join(values)
@dataclass
class FeGANEpochData:
epoch_id: int
duration_train: int
duration_test: int
net: Any
client_id: str = None
def to_csv_line(self):
delimeter = ','
values = self.__dict__.values()
values = [str(x) for x in values if str(x) != 'net']
return delimeter.join(values)
|
def twoNumberSum(array, targetSum):
# Write your code here.
for i in range (len(array)-1):
firstNum = array[i]
for j in range (i+1,len(array)):
secondNum =array[j]
if firstNum +secondNum ==targetSum:
return [firstNum,secondNum]
return []
|
"""
paper: Memory Fusion Network for Multi-View Sequential Learning
From: https://github.com/pliang279/MFN
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['MFN']
class MFN(nn.Module):
def __init__(self, args):
super(MFN, self).__init__()
self.d_l,self.d_a,self.d_v = args.feature_dims
self.dh_l,self.dh_a,self.dh_v = args.hidden_dims
total_h_dim = self.dh_l+self.dh_a+self.dh_v
self.mem_dim = args.memsize
window_dim = args.windowsize
output_dim = args.num_classes if args.train_mode == "classification" else 1
attInShape = total_h_dim*window_dim
gammaInShape = attInShape+self.mem_dim
final_out = total_h_dim+self.mem_dim
h_att1 = args.NN1Config["shapes"]
h_att2 = args.NN2Config["shapes"]
h_gamma1 = args.gamma1Config["shapes"]
h_gamma2 = args.gamma2Config["shapes"]
h_out = args.outConfig["shapes"]
att1_dropout = args.NN1Config["drop"]
att2_dropout = args.NN2Config["drop"]
gamma1_dropout = args.gamma1Config["drop"]
gamma2_dropout = args.gamma2Config["drop"]
out_dropout = args.outConfig["drop"]
self.lstm_l = nn.LSTMCell(self.d_l, self.dh_l)
self.lstm_a = nn.LSTMCell(self.d_a, self.dh_a)
self.lstm_v = nn.LSTMCell(self.d_v, self.dh_v)
self.att1_fc1 = nn.Linear(attInShape, h_att1)
self.att1_fc2 = nn.Linear(h_att1, attInShape)
self.att1_dropout = nn.Dropout(att1_dropout)
self.att2_fc1 = nn.Linear(attInShape, h_att2)
self.att2_fc2 = nn.Linear(h_att2, self.mem_dim)
self.att2_dropout = nn.Dropout(att2_dropout)
self.gamma1_fc1 = nn.Linear(gammaInShape, h_gamma1)
self.gamma1_fc2 = nn.Linear(h_gamma1, self.mem_dim)
self.gamma1_dropout = nn.Dropout(gamma1_dropout)
self.gamma2_fc1 = nn.Linear(gammaInShape, h_gamma2)
self.gamma2_fc2 = nn.Linear(h_gamma2, self.mem_dim)
self.gamma2_dropout = nn.Dropout(gamma2_dropout)
self.out_fc1 = nn.Linear(final_out, h_out)
self.out_fc2 = nn.Linear(h_out, output_dim)
self.out_dropout = nn.Dropout(out_dropout)
def forward(self, text_x, audio_x, video_x):
'''
Args:
audio_x: tensor of shape (batch_size, sequence_len, audio_in)
video_x: tensor of shape (batch_size, sequence_len, video_in)
text_x: tensor of shape (batch_size, sequence_len, text_in)
'''
text_x = text_x.permute(1,0,2)
audio_x = audio_x.permute(1,0,2)
video_x = video_x.permute(1,0,2)
# x is t x n x d
n = text_x.size()[1]
t = text_x.size()[0]
self.h_l = torch.zeros(n, self.dh_l).to(text_x.device)
self.h_a = torch.zeros(n, self.dh_a).to(text_x.device)
self.h_v = torch.zeros(n, self.dh_v).to(text_x.device)
self.c_l = torch.zeros(n, self.dh_l).to(text_x.device)
self.c_a = torch.zeros(n, self.dh_a).to(text_x.device)
self.c_v = torch.zeros(n, self.dh_v).to(text_x.device)
self.mem = torch.zeros(n, self.mem_dim).to(text_x.device)
all_h_ls = []
all_h_as = []
all_h_vs = []
all_c_ls = []
all_c_as = []
all_c_vs = []
all_mems = []
for i in range(t):
# prev time step
prev_c_l = self.c_l
prev_c_a = self.c_a
prev_c_v = self.c_v
# curr time step
new_h_l, new_c_l = self.lstm_l(text_x[i], (self.h_l, self.c_l))
new_h_a, new_c_a = self.lstm_a(audio_x[i], (self.h_a, self.c_a))
new_h_v, new_c_v = self.lstm_v(video_x[i], (self.h_v, self.c_v))
# concatenate
prev_cs = torch.cat([prev_c_l,prev_c_a,prev_c_v], dim=1)
new_cs = torch.cat([new_c_l,new_c_a,new_c_v], dim=1)
cStar = torch.cat([prev_cs,new_cs], dim=1)
attention = F.softmax(self.att1_fc2(self.att1_dropout(F.relu(self.att1_fc1(cStar)))),dim=1)
attended = attention*cStar
cHat = torch.tanh(self.att2_fc2(self.att2_dropout(F.relu(self.att2_fc1(attended)))))
both = torch.cat([attended,self.mem], dim=1)
gamma1 = torch.sigmoid(self.gamma1_fc2(self.gamma1_dropout(F.relu(self.gamma1_fc1(both)))))
gamma2 = torch.sigmoid(self.gamma2_fc2(self.gamma2_dropout(F.relu(self.gamma2_fc1(both)))))
self.mem = gamma1*self.mem + gamma2*cHat
all_mems.append(self.mem)
# update
self.h_l, self.c_l = new_h_l, new_c_l
self.h_a, self.c_a = new_h_a, new_c_a
self.h_v, self.c_v = new_h_v, new_c_v
all_h_ls.append(self.h_l)
all_h_as.append(self.h_a)
all_h_vs.append(self.h_v)
all_c_ls.append(self.c_l)
all_c_as.append(self.c_a)
all_c_vs.append(self.c_v)
# last hidden layer last_hs is n x h
last_h_l = all_h_ls[-1]
last_h_a = all_h_as[-1]
last_h_v = all_h_vs[-1]
last_mem = all_mems[-1]
last_hs = torch.cat([last_h_l,last_h_a,last_h_v,last_mem], dim=1)
output = self.out_fc2(self.out_dropout(F.relu(self.out_fc1(last_hs))))
res = {
'M': output,
'L': last_hs
}
return res
|
"""
Handles incoming motion commands and translates them to actions.
Copyright (c) 2013 Sean Watson
Licensed under the MIT license
"""
import threading
import logging
import time
class MotionHandler(threading.Thread):
"""Translates motion commands to actions.
When a new motion command is received over the wireless
connection it is consumed by the MotionHandler. The
MotionHandler then scans through the motions it has been
set up to detect trying to match the command to the motions.
If a match is found and that motion is completed the corresponding
action is executed.
Attributes:
motion_queue: A Queue for incoming motion commands.
motions: The motions that should be detected
kill: A boolean flag for stopping the thread
timeout: The amount of time in seconds a composite motion is
allowed to take
"""
def __init__(self, motion_queue):
"""Initializes a new MotionHandler.
Args:
motion_queue: A queue where the incoming commands will be placed
"""
threading.Thread.__init__(self)
self.motion_queue = motion_queue
self.motions = []
self.kill = False
self.timeout = 1
logging.debug("Created MotionHandler %s . Reader %s",
self, self.motion_queue)
def run(self):
"""The main thread process.
Waits for commands to be received, processes them and calls the
appropriate actions when necessary.
"""
logging.debug("Starting MotionHandler thread")
start = time.time()
while not self.kill:
code = int(self.motion_queue.get(True)) # Blocking get call
logging.debug("Got code: %s", code)
# Reset all motions if stationary for too long
if(time.time() - start > self.timeout):
for mot in self.motions:
mot.position = 0
# Check the code against the handled motions
for mot in self.motions:
# Check if it matches the next code in the motion
if code == mot.motions[mot.position].code:
# If the motion is done, preform the movement
if mot.position == len(mot.motions) - 1:
mot.position = 0
mot.move()
logging.debug("Motion %s fully matched", mot)
# Otherwise start looking for the next motion
else:
mot.position += 1
logging.debug("Motion %s partially matched", mot)
# Reset the position for a wrong motion
else:
mot.position = 0
self.motion_queue.task_done()
start = time.time()
logging.info("MotionHandler thread stopped")
def stop(self):
"""Stops the thread."""
self.kill = True
self.motion_queue.put(0) # Needed to get out of blocking call
logging.debug("Stopping MotionHandler")
def add_motion(self, mot):
"""Adds a motion to detect.
Once added the handler will try to detect this motion.
Duplicate motions are allowed, but it will cause the action
to get executed multiple times for each detection.
Args:
The new motion to be detected.
"""
self.motions.append(mot)
logging.debug("Added Motion %s to MotionHandler %s",
mot, self)
|
import numpy as np
"""
scan_options ={
"l" : length,
'x' : exclude,
'c' : column,
'b' : partitions,
'D' : corrlength,
'V' : verbosity,
'o' : file_out
}
"""
def cond_entropy(bins, t, partitions):
"""
:param bins: The discrete version fo the times series
:type bins: array of ints
:param t: the time delay
:type t: int
:param partitions: The number of bins used for discretization
:type partitions: int
:return: conditional entropy (mutual)
:rtype: float
"""
count = 0
cond_ent = 0.0
h1 = np.zeros(partitions)
h11 = h1.copy()
h2 = np.zeros((partitions, partitions))
for i in range(t, len(bins)):
hii = bins[i]
hi = bins[i - t]
h1[hi] += 1
h11[hii] += 1
h2[hi][hii] += 1
count += 1
norm = 1.0 / count
cond_ent = 0.0
for i in range(partitions):
hpi = h1[i] * norm
if (hpi > 0.0):
for j in range(partitions):
hpj = h11[j] * norm
if (hpj > 0.0):
pij = h2[i][j] * norm
if (pij > 0.0):
cond_ent += pij * np.log(pij / hpj / hpi)
return cond_ent
def mutual(series, corrlength, partitions):
"""
:param series: array of the time series for times : 0,delta, 2 delta....
:type series: numpy array
:param corrlength: maximal time delay
:type corrlength: int
:param partitions: number of bins to discretize the data for MI
:type partitions: int
:return: array size corrlength +1 ( 0 : shanon entropy)
:rtype: numpy array of floats
"""
length = series.shape[0]
# Rescaling Data
mn = series.min()
interval = series.max() - mn
if interval == 0:
raise "Constant data"
series = (series - mn) / interval
bins = np.zeros(length)
bins = np.clip((series * partitions).astype(int), 0, partitions - 1)
if (corrlength >= length):
corrlength = length - 1
ent = []
for tau in range(corrlength + 1):
ent.append(cond_entropy(bins, tau, partitions))
return np.array(ent)
|
"""
This module contains implementation of REST API views for materials app.
"""
import json
import logging
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db.models import Count, F, Q, QuerySet
from django.shortcuts import get_object_or_404, render, Http404
from rest_framework.decorators import action
from rest_framework.exceptions import AuthenticationFailed, MethodNotAllowed
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import CreateAPIView, ListAPIView, RetrieveAPIView
from rest_framework.viewsets import (
ModelViewSet
)
from rest_framework.permissions import AllowAny
from surf.apps.communities.models import Team, Community
from surf.apps.filters.models import MpttFilterItem
from surf.apps.filters.serializers import MpttFilterItemSerializer
from surf.apps.materials.filters import (
CollectionFilter
)
from surf.apps.materials.models import (
Collection,
Material,
CollectionMaterial,
SharedResourceCounter,
RESOURCE_TYPE_MATERIAL,
RESOURCE_TYPE_COLLECTION
)
from surf.apps.materials.serializers import (
SearchSerializer,
KeywordsRequestSerializer,
SimilaritySerializer,
AuthorSuggestionSerializer,
MaterialsRequestSerializer,
CollectionSerializer,
CollectionMaterialsRequestSerializer,
MaterialShortSerializer,
CollectionMaterialPositionSerializer,
SharedResourceCounterSerializer
)
from surf.apps.materials.utils import (
add_extra_parameters_to_materials,
get_material_details_by_id,
add_material_themes,
add_material_disciplines,
add_search_query_to_elastic_index
)
from surf.apps.locale.models import Locale
from surf.apps.core.schema import SearchSchema
from surf.vendor.elasticsearch.api import ElasticSearchApiClient
logger = logging.getLogger(__name__)
def portal_material(request, *args, **kwargs):
material = _get_material_by_external_id(request, kwargs["external_id"])
if not material:
raise Http404(f"Material not found: {kwargs['external_id']}")
return render(request, "portal/index.html", {
'meta_title': f"{material[0]['title']} | Edusources",
'meta_description': material[0]["description"],
'matomo_id': settings.MATOMO_ID
})
def portal_single_page_application(request, *args):
site_description_translation = Locale.objects.filter(asset="meta-site-description").last()
site_description = getattr(site_description_translation, request.LANGUAGE_CODE, "Edusources")
return render(request, "portal/index.html", {
'meta_title': "Edusources",
'meta_description': site_description,
'matomo_id': settings.MATOMO_ID
})
def portal_page_not_found(request, exception, template_name=None):
site_description_translation = Locale.objects.filter(asset="meta-site-description").last()
site_description = getattr(site_description_translation, request.LANGUAGE_CODE, "Edusources")
return render(
request,
"portal/index.html",
context={
'meta_title': "Edusources",
'meta_description': site_description,
'matomo_id': settings.MATOMO_ID
},
status=404
)
class MaterialSearchAPIView(CreateAPIView):
"""
The main search endpoint.
Specify the search query in the ``search_text`` property of the body to do a simple search.
All other properties are optional and are described below
## Request body
Apart from search_text you can specify the following properties in the body of the request:
**page_size**: Number of results to return per page.
**page**: A page number within the paginated result set.
**ordering**: The external_id of a filter category to order results by (for instance: "publisher_date").
This will ignore relevance of results and order by the specified property.
By default ordering is ascending.
If you specify the minus sign (for instance: "-publisher_date") the ordering will be descending.
**filters**: Filters consist of objects that specify a external_id and an items property.
The external_id should be the external_id of a root filter category (for instance: "technical_type").
See the filter categories endpoint described below for more details on filter categories.
Next to the external_id you should specify an array under the items property.
Elements in this array should only consist of external_id values.
These external_ids are also filter category external_ids (for instance: "video"),
but the referenced filter categories should be a descendant of the root filter category specified earlier
("technical_type" in our example).
Filters under the same root filter category will function as an OR filter.
While multiple filter category items across root filter categories function as AND filters.
## Response body
**results**: An array containing the search results.
**filter_categories**: An array with all filter categories.
The count values of the filter categories will indicate how many results match the filter category.
**records_total**: Count of all available results
**page_size**: Number of results to return per page.
**page**: The current page number.
"""
serializer_class = SearchSerializer
permission_classes = (AllowAny,)
schema = SearchSchema()
def post(self, request, *args, **kwargs):
# validate request parameters
serializer = SearchSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
data["drilldown_names"] = [
mptt_filter.external_id for mptt_filter in MpttFilterItem.objects.filter(parent=None)
]
elastic = ElasticSearchApiClient()
res = elastic.search(**data)
records = res["records"]
if settings.PROJECT == "edusources":
records = add_extra_parameters_to_materials(request.user, records)
drill_down_dict = {item['external_id']: item for item in res["drilldowns"]}
drill_down_flat = {}
for external_id, drilldown in drill_down_dict.items():
if drilldown.get('count', None):
drill_down_flat.update({external_id: drilldown})
if drilldown['items']:
for el in drilldown['items']:
drill_down_flat.update({el['external_id']: el})
filter_category_tree = MpttFilterItem.objects.select_related("title_translations").get_cached_trees()
filter_categories = MpttFilterItemSerializer(
filter_category_tree,
many=True,
context={'search_counts': drill_down_flat}
)
if data['page'] == 1 and data["search_text"]:
add_search_query_to_elastic_index(res["recordcount"], data["search_text"], data["filters"])
rv = dict(records=records,
records_total=res["recordcount"],
filter_categories=filter_categories.data,
page=data["page"],
page_size=data["page_size"],
did_you_mean=res["did_you_mean"])
return Response(rv)
class KeywordsAPIView(ListAPIView):
"""
This endpoint returns suggestions about what a user may be typing.
Call this endpoint when a user is typing a search and display the results (for instance below the search bar).
This endpoint only completes queries that are at least 4 characters.
"""
serializer_class = KeywordsRequestSerializer
permission_classes = (AllowAny,)
schema = SearchSchema()
queryset = QuerySet()
pagination_class = None
filter_backends = []
def get(self, request, *args, **kwargs):
# validate request parameters
serializer = KeywordsRequestSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
elastic = ElasticSearchApiClient()
res = elastic.autocomplete(**data)
return Response(res)
class SimilarityAPIView(RetrieveAPIView):
"""
This endpoint returns similar documents as the input document.
These similar documents can be offered as suggestions to look at for the user.
"""
serializer_class = SimilaritySerializer
permission_classes = (AllowAny,)
schema = SearchSchema()
pagination_class = None
filter_backends = []
def get_object(self):
serializer = self.get_serializer(data=self.request.GET)
serializer.is_valid(raise_exception=True)
external_id = serializer.validated_data["external_id"]
language = serializer.validated_data["language"]
elastic = ElasticSearchApiClient()
result = elastic.more_like_this(external_id, language)
if settings.PROJECT == "edusources":
result["results"] = add_extra_parameters_to_materials(self.request.user, result["results"])
return result
class AuthorSuggestionsAPIView(RetrieveAPIView):
"""
This endpoint returns documents where the name of the author appears in the text or metadata,
but is not set as author in the authors field.
These documents can be offered to authors as suggestions for more content from their hand.
"""
serializer_class = AuthorSuggestionSerializer
permission_classes = (AllowAny,)
schema = SearchSchema()
pagination_class = None
filter_backends = []
def get_object(self):
serializer = self.get_serializer(data=self.request.GET)
serializer.is_valid(raise_exception=True)
author_name = serializer.validated_data["author_name"]
elastic = ElasticSearchApiClient()
result = elastic.author_suggestions(author_name)
if settings.PROJECT == "edusources":
result["results"] = add_extra_parameters_to_materials(self.request.user, result["results"])
return result
_MATERIALS_COUNT_IN_OVERVIEW = 4
class MaterialAPIView(APIView):
"""
View class that provides retrieving Material by its edurep id (external_id)
or retrieving overview of materials.
If external_id is exist in request data then `get()` method returns
material by external_id, otherwise it returns overview of materials.
"""
permission_classes = []
def get(self, request, *args, **kwargs):
# validate request parameters
serializer = MaterialsRequestSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
# default is false in the serializer
count_view = data["count_view"]
if "external_id" in kwargs:
return self.get_material(request,
kwargs["external_id"],
count_view=count_view,
shared=data.get("shared"))
if "external_id" in data:
res = _get_material_by_external_id(request,
data["external_id"],
shared=data.get("shared"))
else:
# return overview of newest Materials
elastic = ElasticSearchApiClient()
res = elastic.search('',
# sort by newest items first
ordering="-lom.lifecycle.contribute.publisherdate",
filters=[{
"external_id": "harvest_source",
"items": ["anatomy_tool"]
}],
page_size=_MATERIALS_COUNT_IN_OVERVIEW)
res = add_extra_parameters_to_materials(request.user,
res["records"])
return Response(res)
@staticmethod
def get_material(request, external_id, count_view, shared=None):
"""
Returns the list of materials by external id
:param request: request instance
:param external_id: external id of material
:param shared: share type of material
:param count_view: should the view be counted in the statistics?
:return:
"""
res = _get_material_by_external_id(request, external_id, shared=shared, count_view=count_view)
if not res:
raise Http404()
return Response(res[0])
def _get_material_by_external_id(request, external_id, shared=None, count_view=False):
"""
Get Materials by edured id and register unique view of materials
:param request:
:param external_id: edured id of material
:param shared: share type of material
:return: list of materials
"""
material, created = Material.objects.get_or_create(external_id=external_id)
if created:
material.sync_info()
# increase unique view counter
if count_view:
material.view_count = F('view_count') + 1
material.save()
if shared:
# increase share counter
counter_key = SharedResourceCounter.create_counter_key(
RESOURCE_TYPE_MATERIAL,
external_id,
share_type=shared)
SharedResourceCounter.increase_counter(counter_key, extra=shared)
rv = get_material_details_by_id(external_id)
rv = add_extra_parameters_to_materials(request.user, rv)
rv = add_share_counters_to_materials(rv)
return rv
class MaterialRatingAPIView(APIView):
def post(self, request, *args, **kwargs):
params = request.data.get('params')
external_id = params['external_id']
star_rating = params['star_rating']
material_object = Material.objects.get(external_id=external_id, deleted_at=None)
if star_rating == 1:
material_object.star_1 = F('star_1') + 1
if star_rating == 2:
material_object.star_2 = F('star_2') + 1
if star_rating == 3:
material_object.star_3 = F('star_3') + 1
if star_rating == 4:
material_object.star_4 = F('star_4') + 1
if star_rating == 5:
material_object.star_5 = F('star_5') + 1
material_object.save()
material_object.refresh_from_db()
return Response(material_object.get_avg_star_rating())
class MaterialApplaudAPIView(APIView):
def post(self, request, *args, **kwargs):
params = request.data.get('params')
external_id = params['external_id']
material_object = Material.objects.get(external_id=external_id, deleted_at=None)
material_object.applaud_count = F('applaud_count') + 1
material_object.save()
material_object.refresh_from_db()
return Response(material_object.applaud_count)
class CollectionMaterialPromotionAPIView(APIView):
def post(self, request, *args, **kwargs):
# only active and authorized users can promote materials in the collection
collection_instance = Collection.objects.get(id=kwargs['collection_id'])
# check whether the material is actually in the collection
external_id = kwargs['external_id']
collection_materials = CollectionMaterial.objects.filter(
collection=collection_instance, material__external_id=external_id)
if not collection_materials:
raise Http404()
# The material should only be in the collection once
assert len(collection_materials) == 1, f"Material with id {external_id} is in collection " \
f"{collection_instance} multiple times."
collection_material = collection_materials[0]
# promote or demote the material
collection_material.featured = not collection_material.featured
collection_material.save()
return Response(serializers.serialize('json', [collection_material]))
class CollectionViewSet(ModelViewSet):
"""
View class that provides CRUD methods for Collection and `get`, `add`
and `delete` methods for its materials.
"""
queryset = Collection.objects \
.filter(deleted_at=None) \
.annotate(community_cnt=Count('communities', filter=Q(deleted_at=None)))
serializer_class = CollectionSerializer
filter_class = CollectionFilter
permission_classes = []
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
shared = request.GET.get("shared")
if shared:
# increase sharing counter
counter_key = SharedResourceCounter.create_counter_key(
RESOURCE_TYPE_COLLECTION,
str(instance.id),
share_type=shared)
SharedResourceCounter.increase_counter(counter_key, extra=shared)
serializer = self.get_serializer(instance)
return Response(serializer.data)
def get_object(self):
obj = get_object_or_404(self.get_queryset(), pk=self.kwargs["pk"])
self.check_object_permissions(self.request, obj)
if self.request.method != 'GET':
check_access_to_collection(self.request.user, obj)
return obj
@action(methods=['get', 'post', 'delete'], detail=True)
def materials(self, request, pk=None, **kwargs):
instance = self.get_object()
if request.method == "GET":
# validate request parameters
serializer = CollectionMaterialsRequestSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
ids = [m.external_id for m in instance.materials.order_by("id").filter()]
rv = dict(records=[],
records_total=0,
filters=[],
page=data["page"],
page_size=data["page_size"])
if ids:
elastic = ElasticSearchApiClient()
res = elastic.get_materials_by_id(ids, 1, len(ids))
records = res.get("records", [])
records = add_extra_parameters_to_materials(request.user, records)
collection_materials = CollectionMaterial.objects.filter(
collection=instance, material__external_id__in=[r['external_id'] for r in records]
)
for collection_material in collection_materials:
record = next(r for r in records if r['external_id'] == collection_material.material.external_id)
record['featured'] = collection_material.featured
record['position'] = collection_material.position
rv["records"] = records
rv["records_total"] = res["recordcount"]
return Response(rv)
data = []
for d in request.data:
# validate request parameters
if request.method == "POST":
serializer = CollectionMaterialPositionSerializer(data=d)
elif request.method == "DELETE":
serializer = MaterialShortSerializer(data=d)
else:
raise MethodNotAllowed(request.method, detail="Method not supported")
serializer.is_valid(raise_exception=True)
data.append(serializer.validated_data)
if request.method == "POST":
self._add_materials(instance, data)
elif request.method == "DELETE":
self._delete_materials(instance, data)
res = MaterialShortSerializer(many=True).to_representation(
instance.materials.filter(deleted_at=None)
)
return Response(res)
@staticmethod
def _add_materials(instance, materials):
"""
Add materials to collection
:param instance: collection instance
:param materials: added materials
:return:
"""
for material in materials:
m_external_id = material["external_id"]
m_position = material["position"]
details = get_material_details_by_id(m_external_id)
if not details:
continue
keywords = details[0].get("keywords")
if keywords:
keywords = json.dumps(keywords)
m, _ = Material.objects.update_or_create(external_id=m_external_id)
add_material_themes(m, details[0].get("themes", []))
add_material_disciplines(m, details[0].get("disciplines", []))
CollectionMaterial.objects.create(collection=instance, material=m, position=m_position)
@staticmethod
def _delete_materials(instance, materials):
"""
Delete materials from collection
:param instance: collection instance
:param materials: materials that should be removed from collection
:return:
"""
materials = [m["external_id"] for m in materials]
materials = Material.objects.filter(external_id__in=materials).all()
instance.materials.remove(*materials)
def check_access_to_collection(user, instance=None):
"""
Check if user is active and owner of collection (if collection
is not None)
:param user: user
:param instance: collection instance
:return:
"""
if not user or not user.is_authenticated:
raise AuthenticationFailed()
try:
community = Community.objects.get(collections__in=[instance])
Team.objects.get(community=community, user=user)
except ObjectDoesNotExist as exc:
raise AuthenticationFailed(f"User {user} is not a member of a community that has collection {instance}. "
f"Error: \"{exc}\"")
except MultipleObjectsReturned as exc:
logger.warning(f"The collection {instance} is in multiple communities. Error:\"{exc}\"")
communities = Community.objects.filter(collections__in=[instance])
teams = Team.objects.filter(community__in=communities, user=user)
if len(teams) > 0:
logger.debug("At least one team satisfies the requirement of be able to delete this collection.")
else:
raise AuthenticationFailed(f"User {user} is not a member of any community with collection {instance}. "
f"Error: \"{exc}\"")
def add_share_counters_to_materials(materials):
"""
Add share counter values for materials.
:param materials: array of materials
:return: updated array of materials
"""
for m in materials:
key = SharedResourceCounter.create_counter_key(RESOURCE_TYPE_MATERIAL, m["external_id"])
qs = SharedResourceCounter.objects.filter(counter_key__contains=key)
m["sharing_counters"] = SharedResourceCounterSerializer(many=True).to_representation(qs.all())
return materials
class MaterialSetAPIView(APIView):
def get(self, request, *args, **kwargs):
serializer = MaterialShortSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
results = _get_material_by_external_id(request, data['external_id'])
parts = results[0]['has_parts']
api_client = ElasticSearchApiClient()
api_response = api_client.get_materials_by_id(parts, page_size=100)
return Response(api_response)
|
from conans import ConanFile
class TestConan(ConanFile):
name = "Test"
version = "0.1"
settings = "os", "compiler", "build_type", "arch"
description = "Package for Test"
url = "None"
license = "None"
def package(self):
self.copy("*", dst="lib", src="obj/libs")
self.copy("*.h", dst="include", src="code", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["ModuleA", "ModuleB"] # This would be the right names
|
import os
import shutil
import logging
import feedcache
import jinja2
import shelve
import staticrss.feed
def _update_feeds(feed_urls, storage):
"""Read urls from *feed_urls* and update the *storage*"""
cache_storage = shelve.open('.cache')
cache = feedcache.Cache(cache_storage)
for url in feed_urls:
logging.info("Fetching {0}".format(url))
feed = cache.fetch(url)
items = [staticrss.feed.Item(item, feed) for item in feed.entries]
storage.update(url, items)
storage.update_age()
cache_storage.close()
def _get_sorted_feed_entries(storage):
entries = [item for item in storage.items()]
return sorted(entries, key=lambda item: item.age)
def _fwalk(root, predicate):
for path, dirnames, filenames in os.walk(root):
dirnames[:] = [d for d in dirnames if predicate(d)]
yield path, dirnames, filenames
def _process_files(config, storage, env):
entries = _get_sorted_feed_entries(storage)
dest = config['destination']
predicate = lambda d: not d.startswith('_')
for path, dirs, files in _fwalk(config['source'], predicate):
dest_dir = os.path.join(dest, path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for filename in files:
if not filename.startswith(('.', '_')):
src = os.path.join(path, filename)
dst = os.path.join(dest_dir, filename)
if filename.endswith('.html'):
template = env.get_template(src)
with open(dst, 'w') as f:
html = template.render(entries=entries)
f.write(html.encode('utf-8'))
else:
src = os.path.join(path, filename)
dst = os.path.join(dest_dir, filename)
shutil.copy(src, dst)
def build(config):
loader = jinja2.FileSystemLoader([config['layouts'], '.'])
env = jinja2.Environment(loader=loader)
storage_backend = shelve.open('.storage')
storage = staticrss.feed.Storage(storage_backend)
_update_feeds(config['feeds'], storage)
_process_files(config, storage, env)
storage_backend.close()
|
import matplotlib.pyplot as plt
import numpy as np
def qm(x0, n):
x = np.empty(n+1)
x[0] = x0
for t in range(n):
x[t+1] = 4 * x[t] * (1 - x[t])
return x
x = qm(0.1, 250)
fig, ax = plt.subplots(figsize=(10, 6.5))
ax.plot(x, 'b-', lw=2, alpha=0.8)
ax.set_xlabel('time', fontsize=16)
plt.show()
|
from .version import __version__
from .bconfig import BConfig, Identity
from .binarize import *
|
from lan_lex import *
from lan_parser import *
from lan_ast import *
|
# fabfile
# Fabric command definitions for running lock tests.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Tue Jun 13 12:47:15 2017 -0400
#
# Copyright (C) 2017 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: fabfile.py [] benjamin@bengfort.com $
"""
Fabric command definitions for running lock tests.
"""
##########################################################################
## Imports
##########################################################################
import os
import random
from fabric.api import env, run, cd, parallel, get
from fabric.api import roles, task, execute, settings
class KeyGen(object):
def __init__(self, n=3):
self.n = n
self.conson = "BCDFGHJKLMNPQRSTVWXZ"
self.vowels = "AEIOUY"
self.keys = set([])
def generate(self):
word = ""
for idx in range(self.n):
if idx % 2 == 0:
word += random.choice(self.conson)
else:
word += random.choice(self.vowels)
if word in self.keys:
return self.generate()
self.keys.add(word)
return word
def __call__(self):
return self.generate()
def strpbool(arg):
if arg is False:
return False
if arg is True:
return True
arg = arg.lower().strip()
if arg in {'y', 'yes', 't', 'true', 'on', '1'}:
return True
elif arg in {'n', 'no', 'f', 'false', 'off', '0'}:
return False
else:
raise ValueError("invalid boolean value {!r:}".format(arg))
##########################################################################
## Environment
##########################################################################
# Names
NEVIS = "nevis.cs.umd.edu"
HYPERION = "hyperion.cs.umd.edu"
LAGOON = "lagoon.cs.umd.edu"
# Paths
workspace = "/data/honu"
# Fabric Env
env.colorize_errors = True
env.hosts = [NEVIS, HYPERION, LAGOON]
env.roledefs = {
"client": {HYPERION, LAGOON},
"server": {NEVIS},
}
env.user = "benjamin"
env.client_keys = KeyGen()
def multiexecute(task, n, host, *args, **kwargs):
"""
Execute the task n times on the specified host. If the task is parallel
then this will be parallel as well. All other args are passed to execute.
"""
# Do nothing if n is zero or less
if n < 1: return
# Return one execution of the task with the given host
if n == 1:
return execute(task, host=host, *args, **kwargs)
# Otherwise create a lists of hosts, don't dedupe them, and execute
hosts = [host]*n
with settings(dedupe_hosts=False):
execute(task, hosts=hosts, *args, **kwargs)
##########################################################################
## Honu Commands
##########################################################################
def _serve(relax=False, uptime="45s"):
relax = strpbool(relax)
with cd(workspace):
cmd = "honu serve -s -u {} -w server.jsonl".format(uptime)
if relax:
cmd += " -r"
run(cmd)
@parallel
@roles('server')
def serve(relax=False, uptime="45s"):
_serve(relax, uptime)
def _workload(multikey=False, duration="30s", server=NEVIS):
multikey = strpbool(multikey)
# Add the default port to the server
if ":" not in server:
server = server + ":3264"
with cd(workspace):
ckey = env.client_keys() if multikey else "FOO"
cmd = "honu run -A -a {} -d {} -k {} -w client.jsonl".format(
server, duration, ckey
)
run(cmd)
@parallel
@roles('client')
def workload(multikey=False, duration="30s", server=NEVIS):
_workload(multikey, duration, server)
@parallel
def experiment(relax=False,multikey=False,procs=2):
procs = int(procs)
cprocs = procs / 2
if procs % 2 == 1 and env.host == LAGOON:
cprocs += 1
if env.host in env.roledefs['client']:
multiexecute(_workload, cprocs, env.host, multikey=multikey)
elif env.host in env.roledefs['server']:
execute(_serve, host=env.host, relax=relax)
@parallel
def getmerge(localpath="."):
local = os.path.join(localpath, "%(host)s", "%(path)s")
remote = "client.jsonl" if env.host in env.roledefs['client'] else "server.jsonl"
remote = os.path.join("/data/honu", remote)
get(remote, local)
|
from output.models.sun_data.combined.xsd005.xsd005_xsd.xsd005 import (
Base,
Ext,
Root,
Rst,
)
__all__ = [
"Base",
"Ext",
"Root",
"Rst",
]
|
import numpy as np
from sklearn.tree import DecisionTreeClassifier as Tree
from sklearn.linear_model import LogisticRegression as LR
from sklearn.isotonic import IsotonicRegression as Iso
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
class CaliForest(ClassifierMixin, BaseEstimator):
def __init__(self,
n_estimators=300,
criterion="gini",
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
ctype="isotonic",
alpha0=100,
beta0=25):
self.n_estimators = n_estimators
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.ctype = ctype
self.alpha0 = alpha0
self.beta0 = beta0
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=False)
self.estimators = []
self.calibrator = None
for i in range(self.n_estimators):
self.estimators.append(Tree(criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_features="auto"))
if self.ctype=="logistic":
self.calibrator = LR(penalty="none",
solver="saga",
max_iter=5000)
elif self.ctype=="isotonic":
self.calibrator = Iso(y_min=0,
y_max=1,
out_of_bounds="clip")
n, m = X.shape
Y_oob = np.full((n, self.n_estimators), np.nan)
n_oob = np.zeros(n)
IB = np.zeros((n, self.n_estimators), dtype=int)
OOB = np.full((n, self.n_estimators), True)
for eid in range(self.n_estimators):
IB[:,eid] = np.random.choice(n, n)
OOB[IB[:,eid],eid] = False
for eid, est in enumerate(self.estimators):
ib_idx = IB[:,eid]
oob_idx = OOB[:,eid]
est.fit(X[ib_idx,:], y[ib_idx])
Y_oob[oob_idx,eid] = est.predict_proba(X[oob_idx,:])[:,1]
n_oob[oob_idx] += 1
oob_idx = n_oob > 1
Y_oob_ = Y_oob[oob_idx,:]
n_oob_ = n_oob[oob_idx]
z_hat = np.nanmean(Y_oob_, axis=1)
z_true = y[oob_idx]
beta = self.beta0 + np.nanvar(Y_oob_, axis=1) * n_oob_ / 2
alpha = self.alpha0 + n_oob_/2
z_weight = alpha / beta
if self.ctype=="logistic":
self.calibrator.fit(z_hat[:,np.newaxis], z_true, z_weight)
elif self.ctype=="isotonic":
self.calibrator.fit(z_hat, z_true, z_weight)
self.is_fitted_ = True
return self
def predict_proba(self, X):
X = check_array(X)
check_is_fitted(self, 'is_fitted_')
n, m = X.shape
n_est = len(self.estimators)
z = np.zeros(n)
y_mat = np.zeros((n,2))
for eid, est in enumerate(self.estimators):
z += est.predict_proba(X)[:,1]
z /= n_est
if self.ctype=="logistic":
y_mat[:,1] = self.calibrator.predict_proba(z[:,np.newaxis])[:,1]
elif self.ctype=="isotonic":
y_mat[:,1] = self.calibrator.predict(z)
y_mat[:,0] = 1 - y_mat[:,1]
return y_mat
def predict(self, X):
proba = self.predict_proba(X)
return np.argmax(proba, axis=1)
|
from .file_transfer_direction import FileTransferDirection
from .transfer_properties import TransferProperties
from .file_transfer_status import FileTransferStatus
from .transfer_statistics import TransferStatistics
from .file_transfer_exception import FileTransferException
from .file_transfer_status import FileTransferStatus
from .file_transfer_event_args import FileTransferEventArgs
from .file_transfer_error_event_args import FileTransferErrorEventArgs
from .client_error_event_args import ClientErrorEventArgs
from .batch_transfer_event_args import BatchTransferEventArgs
from .file_transfer_progress_event_args import FileTransferProgressEventArgs
__all__ = [
"FileTransferDirection",
"TransferProperties",
"FileTransferStatus",
"TransferStatistics",
"FileTransferException",
"FileTransferStatus",
"FileTransferEventArgs",
"FileTransferErrorEventArgs",
"ClientErrorEventArgs",
"BatchTransferEventArgs",
"FileTransferProgressEventArgs",
]
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from install_erpnext import exec_in_shell
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
import os
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def show_sites(self):
self.doclist=self.doc.clear_table(self.doclist,'site_status_details')
for filename in os.listdir(self.doc.sites_path):
sites = addchild(self.doc, 'site_status_details',
'Site Status Details', self.doclist)
sites.site_name = filename
if filename[-1] != '1':
sites.status = '1'
def make_enable_dissable(self):
for site in getlist(self.doclist, 'site_status_details'):
#make dissable
if site.status not in [1, '1'] and site.site_name[-1] != '1':
exec_in_shell("mv %(path)s/%(site_name)s/ %(path)s/%(site_name)s1"%{'path':self.doc.sites_path,'site_name':site.site_name})
self.update_site_details(site.site_name[-1], 0)
#make enable
if site.status == 1 and site.site_name[-1] == '1':
new_site_name = site.site_name[:-1]
exec_in_shell("mv %(path)s/%(site_name)s/ %(path)s/%(new_site_name)s"%{'path':self.doc.sites_path,'site_name':site.site_name, 'new_site_name':new_site_name})
self.update_site_details(site.site_name[-1], 1)
self.show_sites()
self.doc.save()
def update_site_details(self, site_name, status):
webnotes.conn.sql("update `tabSite Details` set is_active = '%s' where name = '%s'"%(status, site_name))
db_details = webnotes.conn.sql("select database_name, database_password from `tabSite Details` where name = '%s'"%(site_name),as_list=1)
self.test_db(db_details[0][0], db_details[0][1], status)
webnotes.conn.sql("commit")
def test_db(self):
import MySQLdb
myDB = MySQLdb.connect(user="%s"%user,passwd="%s"%pwd,db="%s"%user)
cHandler = myDB.cursor()
cHandler.execute("update `tabSingles` set is_active = '%s' where doctype='Global Defaults'"%(status))
cHandler.execute("commit")
|
from numpy import array
import utils.readers as rd
from pyrr import matrix44 as mat4
# classe um objeto generico
class Object3d:
# construtor
def __init__(self, name, color, nVet, vao, vbo):
self.name = name # nome
self.color = color # cor
self.model = mat4.create_identity() # matrix model
self.nVet = nVet # num de vertices
self.vao = vao
self.vbo = vbo
# translaçao
def translate(self, position):
translate = mat4.create_from_translation(position,dtype='f')
self.model = mat4.multiply(self.model, translate)
# escala
def scale(self, scale):
scale = mat4.create_from_scale(scale,dtype='f')
self.model = mat4.multiply(self.model, scale)
# rotacao
def rotate(self, ang, vect):
rotate = mat4.create_from_axis_rotation(vect, ang)
self.model = mat4.multiply(self.model, rotate)
# imprime as informações do objeto
def printInfo(self):
print('Name:', self.name)
print('Nº vert:\n', self.nVet)
print('color:', self.color)
print('Model:\n', self.model)
print('VAO:\n', self.vao)
print('VBO:\n', self.vbo)
print('%%%%%%%%%%%%%%%%%%%%%%%%')
# cubo
class Cube(Object3d):
def __init__(self, name, vao, vbo, color=[1.0,1.0,1.0]):
super().__init__(name, color, 42, vao, vbo)
# torus
class Torus(Object3d):
def __init__(self, name, vao, vbo, color=[1.0,1.0,1.0]):
super().__init__(name, color, 3462, vao, vbo)
# cone
class Cone(Object3d):
def __init__(self, name, vao, vbo, color=[1.0,1.0,1.0]):
super().__init__(name, color, 276, vao, vbo)
# ico
class Ico(Object3d):
def __init__(self, name, vao, vbo, color=[1.0,1.0,1.0]):
super().__init__(name, color, 15363, vao, vbo)
class Light:
def __init__(self, name, position):
self.name = name
self.position = position
|
import os
import logging
from flask import Flask
from flask_mail import Mail
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
__version__ = '0.1.dev'
app = Flask('databoard')
# Load default main config
app_stage = os.getenv('DATABOARD_STAGE', 'DEVELOPMENT').upper()
if app_stage in ['PROD', 'PRODUCTION']:
app.config.from_object('databoard.default_config.ProductionConfig')
elif app_stage in ['TEST', 'TESTING']:
app.config.from_object('databoard.default_config.TestingConfig')
elif app_stage in ['DEV', 'DEVELOP', 'DEVELOPMENT']:
app.config.from_object('databoard.default_config.DevelopmentConfig')
else:
msg = (
"Unknown databoard stage: {}"
"Please set the environment variable `DATABOARD_STAGE` to one of the "
"available stages : 'TESTING', 'DEVELOPMENT' or 'PRODUCTION'"
)
raise AttributeError(msg.format(app_stage))
# Load default database config
app.config.from_object('databoard.default_config.DBConfig')
# Load default internal config
app.config.from_object('databoard.default_config.RampConfig')
# Load user config
user_config = os.getenv('DATABOARD_CONFIG')
if user_config is not None:
app.config.from_json(user_config)
db = SQLAlchemy(app)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
login_manager.login_message = 'Please log in or sign up to access this page.'
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s: %(message)s',
filename=app.config['LOG_FILENAME'])
# get rid of annoying skimage debug messages
logging.getLogger('PIL.PngImagePlugin').disabled = True
####################################################################
ramp_config = app.config.get_namespace('RAMP_')
deployment_path = app.config.get('DEPLOYMENT_PATH')
ramp_kits_path = os.path.join(deployment_path, ramp_config['kits_dir'])
ramp_data_path = os.path.join(deployment_path, ramp_config['data_dir'])
from . import views # noqa
from . import model # noqa
from . import db_tools # noqa
|
# Load an example mesh.
#
import pyvista
from pyvista import examples
mesh = pyvista.read(examples.antfile)
mesh.plot(cpos='xz')
#
# Load a vtk file.
#
mesh = pyvista.read('my_mesh.vtk') # doctest:+SKIP
#
# Load a meshio file.
#
mesh = pyvista.read("mesh.obj") # doctest:+SKIP
|
import numpy as np
import cv2
import math
class Image:
def __init__(self, name, image):
self.original = image
self.name = name
self.ysize = image.shape[0]
self.xsize = image.shape[1]
# Convert image to Grayscale
def toGrayscale(self, image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Convert image to Gaussian Blur
def toGaussianBlur(self, image, params):
return cv2.GaussianBlur(image, (params.gaussianKernel, params.gaussianKernel), 0)
# Convert image to Canny Transform
def toCanny(self, image, params):
return cv2.Canny(image, params.cannyLowThreshold, params.cannyHighThreshold)
# Convert image to image of Region of Interest
def toRegion(self, image, params):
# Convert Percentage to Pixels
pixX1 = int(params.x1 * self.xsize / 100)
pixX2 = int(params.x2 * self.xsize / 100)
pixX3 = int(params.x3 * self.xsize / 100)
pixX4 = int(params.x4 * self.xsize / 100)
pixY1 = int(params.y1 * self.ysize / 100)
pixY2 = int(params.y2 * self.ysize / 100)
pixY3 = int(params.y3 * self.ysize / 100)
pixY4 = int(params.y4 * self.ysize / 100)
vertices = np.array([[(pixX1, pixY1),
(pixX2, pixY2),
(pixX3, pixY3),
(pixX4, pixY4)]],
dtype=np.int32)
mask = np.zeros_like(image)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(image.shape) > 2:
channel_count = image.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
return cv2.bitwise_and(image, mask)
# Convert image to Hough Transform
def toHough(self, image, params):
# Convert Deg to Rad
theta = np.pi/params.houghTheta
lines = cv2.HoughLinesP(image,
params.houghRho,
theta,
params.houghThreshold,
np.array([]),
minLineLength=params.houghMinLineLen,
maxLineGap=params.houghMaxLineGap)
outputImage = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
if (lines is not None):
# Draw Lines
for x1, y1, x2, y2 in lines[:,0]:
# Calculate Line Angle
angle = math.atan2(y2 - y1, x2 - x1) * 180.0 / np.pi
# Ignore Horizontal Angles
if not (-params.houghAngleIgnore < angle < params.houghAngleIgnore):
cv2.line(outputImage, (x1, y1), (x2, y2), [255, 0, 0], 2)
return outputImage
# Convert image to Hough Transform Lines Array
def toHoughLines(self, image, params):
# Convert Deg to Rad
theta = np.pi/params.houghTheta
lines = cv2.HoughLinesP(image,
params.houghRho,
theta,
params.houghThreshold,
np.array([]),
minLineLength=params.houghMinLineLen,
maxLineGap=params.houghMaxLineGap)
filteredLines = []
if (lines is not None):
for x1, y1, x2, y2 in lines[:,0]:
# Calculate Line Angle
angle = math.atan2(y2 - y1, x2 - x1) * 180.0 / np.pi
# Ignore Horizontal Angles
if not (-params.houghAngleIgnore < angle < params.houghAngleIgnore):
filteredLines.append([x1,y1,x2,y2])
return filteredLines
# Join Two Images
def toWeighted(self, initialImage, image):
return cv2.addWeighted(initialImage, 0.8, image, 1., 0.)
# Process Hough Lines to Find Lane Lines
def toLaneLines(self, lines, params):
linesLeft = []
linesRight = []
for l in lines:
x1,y1,x2,y2 = l
# Calculate Slope
if x2 - x1 == 0.:
slope = 999.
else:
slope = (y2 - y1) / (x2 - x1)
# Separate Lines into Left and Right Lines
if slope > 0:
linesRight.append([x1,y1,x2,y2,slope])
elif slope < 0:
linesLeft.append([x1,y1,x2,y2,slope])
# Slope
slopeSumLeft = 0
slopeSumRight = 0
# Line MidPoint
xSumLeft = 0
ySumLeft = 0
xSumRight = 0
ySumRight = 0
# Verify found Lines
foundLeftLine = False
foundRightLine = False
if len(linesLeft) :
foundLeftLine = True
if len(linesRight) :
foundRightLine = True
# Avarege Lines
for l in linesLeft:
xSumLeft += (l[2]+l[0])/2
ySumLeft += (l[3]+l[1])/2
slopeSumLeft += l[4]
for l in linesRight:
xSumRight += (l[2]+l[0])/2
ySumRight += (l[3]+l[1])/2
slopeSumRight += l[4]
outputLines = []
if(foundLeftLine):
slopeAvgLeft = slopeSumLeft / len(linesLeft)
xAvgLeft = xSumLeft / len(linesLeft)
yAvgLeft = ySumLeft / len(linesLeft)
# Calculate b in y = m*x + b
bLeft = yAvgLeft - (slopeAvgLeft*xAvgLeft)
# Calculate x1,y1,x2,y2 Coordinates
y1Left = self.ysize
x1Left = (self.ysize -bLeft)/slopeAvgLeft
# Define Upper Limit for Lines
yLimit = max(params.y1,params.y4)*self.ysize/100
y2Left = yLimit
x2Left = (y2Left -bLeft)/slopeAvgLeft
outputLines.append([x1Left, y1Left, x2Left, y2Left])
if(foundRightLine):
slopeAvgRight = slopeSumRight / len(linesRight)
xAvgRight = xSumRight / len(linesRight)
yAvgRight = ySumRight / len(linesRight)
# Calculate b in y = m*x + b
bRight = yAvgRight - (slopeAvgRight*xAvgRight)
# Calculate x1,y1,x2,y2 Coordinates
y1Right = self.ysize
x1Right = (self.ysize -bRight)/slopeAvgRight
# Define Upper Limit for Lines
yLimit = max(params.y1,params.y4)*self.ysize/100
y2Right = yLimit
x2Right = (y2Right -bRight)/slopeAvgRight
outputLines.append([x1Right, y1Right, x2Right, y2Right])
return outputLines
# Save Image to File
def save(self, outputPath, params):
cv2.imwrite(outputPath, self.getLaneLines(params))
print("Image " + self.name + " Saved!")
# Methods for Image Tuner GUI
# Step 1
# Return Original Image
def getOriginal(self):
return self.original
# Step 2
# Return Grayscale Image
def getGrayscale(self):
return self.toGrayscale(self.getOriginal())
# Step 3
# Return Canny Transform Image
def getCanny(self, params):
return self.toCanny(self.toGaussianBlur(self.getGrayscale(),params), params)
# Step 4
# Return Region of Interest Image
def getRegion(self, params):
return self.toRegion(self.getOriginal(), params)
# Step 5
# Return Hough Transform Image
def getHough(self, params):
return self.toHough(self.toRegion(self.getCanny(params), params),params)
# Step 6
# Return Final Image
def getDone(self, params):
return self.toWeighted(self.getOriginal(), self.getHough(params))
# Step 7
# Return Original Image with Lane Lines Finded
def getLaneLines(self, params):
houghLines = self.toHoughLines(self.toRegion(self.getCanny(params), params), params)
lines = self.toLaneLines(houghLines,params)
linesImage = np.zeros((self.ysize, self.xsize, 3), dtype=np.uint8)
for l in lines:
x1, y1, x2, y2 = l
cv2.line(linesImage, (int(x1), int(y1)), (int(x2), int(y2)), [255, 0, 0], 20)
return self.toWeighted(self.getOriginal(),linesImage)
|
import pyfmodex, time, yaml
import numpy as np
import os
import random
import ctypes
from pyfmodex.constants import *
import logging, copy
from smooth import SmoothVal
from sound_global_state import new_channel, from_dB, VelocityFilter
import sound_global_state
from auto_sounds import AutomationGroup
system = None
# get DSP clock (pyfmodex is incorrect)
def DSP_clock():
hi = ctypes.c_uint()
lo = ctypes.c_uint()
result = pyfmodex.fmodobject._dll.FMOD_System_GetDSPClock(system._ptr, ctypes.byref(hi), ctypes.byref(lo))
return (int(hi.value)<<32) | int(lo.value)
def set_channel_delay_start(channel, dsp_time):
pyfmodex.fmodobject._dll.FMOD_Channel_SetDelay(channel._ptr, FMOD_DELAYTYPE_DSPCLOCK_START, (dsp_time>>32), (dsp_time&0xffffffff))
def set_channel_delay_pause(channel, dsp_time):
pyfmodex.fmodobject._dll.FMOD_Channel_SetDelay(channel._ptr, FMOD_DELAYTYPE_DSPCLOCK_PAUSE, (dsp_time>>32), (dsp_time&0xffffffff))
class Sound(object):
"""Representation of a sound object. Each sound_descriptor is a single Sound object.
Transient sounds have a prototype Sound object which is configured on
start up. New sounds can be generated by calling spawn(), which copies
the object and assigns a channel from the channel group for that transient sound
Every transient sound must have a channel group.
update() must be called regularly to update the sound's position, gain and filtering. """
def __init__(self, sound_descriptor, channel_groups, base_path, dsp_jitter=0):
global system
system = sound_global_state.system # make sure FMOD object is initialised
self.name = sound_descriptor['name']
logging.debug("Creating sound %s, descriptor:\n%s" % (self.name, yaml.dump(sound_descriptor, indent=4)))
self.dsp_jitter = dsp_jitter
# create the sound
self.filename = sound_descriptor['file']
self.sound = system.create_sound(os.path.join(base_path, self.filename))
self.base_frequency = self.sound.default_frequency
logging.debug("Base frequency is %d Hz" % (self.base_frequency))
self.automation_blocks = sound_descriptor.get('automation', [])
logging.debug("Sound file %s loaded succesfully" % self.filename)
self.sound.min_distance = sound_descriptor.get('min_distance', 10.0)
self.position = SmoothVal(np.array(sound_descriptor.get('position', [0,0,0])), 0.01)
self.doppler = sound_descriptor.get('doppler', 0.0)
self.spread = sound_descriptor.get("stereo_spread", 0.0)
self.sound.position = self.position.state
self.velocity = VelocityFilter()
self.clear_velocity()
self.loop_points = sound_descriptor.get('loop_points', None)
self.looping = sound_descriptor.get('loop', False)
self.loop_count = sound_descriptor.get('loop_count', -1)
self.filter = sound_descriptor.get('filter', None)
self.gain = SmoothVal(sound_descriptor.get('gain', 0.0),0.05, linear=True)
self.frequency = SmoothVal(sound_descriptor.get('frequency', 1.0), 0.05)
self.filter_val = SmoothVal(0,0)
self.finished = False
self.filter = False
# add low pass filtering if needed
if 'filter' in sound_descriptor:
self.filter = True
self.filter_val = SmoothVal(sound_descriptor['filter'], 0.01)
self.channel_group = None
# set the channel group, if there is one
if 'channel_group' in sound_descriptor:
ch_group = channel_groups.get(sound_descriptor['channel_group'], None)
if ch_group is not None:
self.channel_group = ch_group
self.channel_group.register_sound(self)
else:
logging.debug("Tried to assign sound %s to non-existent channel group %s" % (self.name, ch_group))
self.channel_id = None
self.transient = sound_descriptor.get('transient', False)
if not self.transient:
# spawn immediately if layer
self.assign_channel(new_channel())
self.automations = AutomationGroup()
def set_doppler(self, doppler):
"""Set the doppler factor"""
self.doppler = doppler
channel = self.get_channel()
if channel:
self.channel.doppler_level = self.doppler
def clear_velocity(self):
self.velocity.clear()
def set_loop(self, loop, loop_points):
"""Set the looping state for this sound."""
channel = self.get_channel()
self.looping = loop
self.loop_points = loop_points
if self.looping:
channel.mode = FMOD_LOOP_NORMAL
self.sound.loop_count = self.loop_count
else:
channel.mode = FMOD_LOOP_OFF
if self.loop_points is not None:
channel.loop_points = (int(self.loop_points[0]*self.base_frequency), FMOD_TIMEUNIT_PCM), (int(self.loop_points[1]*self.base_frequency), FMOD_TIMEUNIT_PCM)
def assign_channel(self, channel_id):
"""Assign a channel to this sound, and configure the channel properties.
Channels start in the paused state (i.e. are silent). Call start() to start playback
"""
# create the channel for this layer
system.play_sound(self.sound, channelid=channel_id, paused=True)
channel = system.get_channel(channel_id)
logging.debug("Assigning channel to sound %s" % self.name)
channel.volume = from_dB(self.gain.state)
channel.paused = True
channel.doppler_level = self.doppler
channel.threed_spread = self.spread
# adjust delay to avoid machine gunning
dsp_time = DSP_clock()
dsp_time = dsp_time+random.randint(0, self.dsp_jitter)
# work around pyfmodex bug
pyfmodex.fmodobject._dll.FMOD_Channel_SetDelay(channel._ptr, FMOD_DELAYTYPE_DSPCLOCK_START, (dsp_time>>32), (dsp_time&0xffffffff))
channel.channel_group = self.channel_group.group
if self.filter:
lp_filter = system.create_dsp_by_type(FMOD_DSP_TYPE_LOWPASS)
lp_filter.set_param(0, self.filter_val.state)
channel.add_dsp(lp_filter)
self.lp_filter = lp_filter
self.channel_id = channel_id
self.set_loop(self.looping, self.loop_points)
system.update()
def test_channel(self, channel):
pl = ctypes.c_bool()
result = pyfmodex.fmodobject._dll.FMOD_Channel_IsPlaying(channel._ptr, ctypes.byref(pl))
return result==FMOD_OK
def get_channel(self):
if self.channel_id is None:
return None
channel = system.get_channel(self.channel_id)
if not self.test_channel(channel):
self.channel_id = None
self.finished = True
return None
return channel
def spawn(self):
"""Find a free channel in this sound's channel group, and then play
the sound on that channel. Returns a reference to a new Sound object
which has the active channel.
If no free channels are found, a random channel is overwritten.
"""
# find a channel in the assigned group, and play on that
if not self.channel_group:
logging.debug("Sound %s tried to spawn without a channel group" % self.name)
return
logging.debug("Sound %s spawning" % self.name)
chans = self.channel_group.sub_channels
potentials = []
for chan in chans:
channel = system.get_channel(chan)
if self.test_channel(channel):
if not channel.is_playing:
potentials.append(chan)
else:
potentials.append(chan)
# no free channels, choose a random one
if len(potentials)==0:
chan = random.choice(self.channel_group.sub_channels)
channel = system.get_channel(chan)
channel.stop()
system.update()
potentials.append(chan)
logging.debug("No free channel was available; randomly stopping one")
else:
logging.debug("Free channel found.")
channel = random.choice(potentials)
# now copy this sound and allocate it a channel
new_sound = copy.deepcopy(self)
new_sound.sound = self.sound # make sure reference is correct
new_sound.assign_channel(channel)
return new_sound
def mute(self):
if self.channel_id:
self.get_channel().mute = True
def unmute(self):
if self.channel_id:
self.get_channel().mute = False
def set_volume(self, gain, time=0.01):
self.gain.set_with_time(gain, time)
def set_filter(self, filter, time=0.01):
self.filter_val.set_with_time(filter, time)
def set_position(self, position, time=0.01):
self.position.set_with_time(position, time)
def start(self):
if self.channel_id:
self.update()
self.get_channel().paused = False
logging.debug("Sound %s started" % self.name)
def stop(self):
if self.channel:
self.get_channel().paused = True
logging.debug("Sound %s stopped" % self.name)
def update(self, dt):
# update the position, gain and filtering of this channel
# must have an active channel to update the state
channel = self.get_channel()
self.automations.update(dt)
if not channel:
return
self.frequency.update(dt)
self.position.update(dt)
channel.frequency = self.frequency.state * self.base_frequency
position = self.position.state + self.automations.get('position')
channel.position = position
self.velocity.new_sample(position)
channel.velocity = list(self.velocity.velocity)
self.gain.update(dt)
channel.volume = from_dB(self.gain.state + self.automations.get('gain'))
if self.filter:
self.filter_val.update(dt)
cutoff = self.filter_val.state + self.automations.get('filter')
if cutoff>20000:
self.lp_filter.bypass = True
else:
self.lp_filter.bypass = False
self.lp_filter.set_param(0, cutoff)
|
import numpy as np
import math
import scipy.stats
import estimator
class GaussEstimator(estimator.Estimator):
def __init__(self,k):
super().__init__(k)
self.mu = 0
self.std = 1
def solve(self):
self.mu = self.moments[1]
self.std = math.sqrt(self.moments[2] - self.mu*self.mu)
# xs = np.linspace(0,1,1000)
# values = scipy.stats.norm.pdf(
# xs, loc=self.mu, scale=self.std
# )
# return values
def estimate(self, p: float):
xloc = scipy.stats.norm.ppf(
p, loc=self.mu, scale=self.std
)
return xloc*(self.a_max-self.a_min) + self.a_min
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import re
def deserialize(d, **kw):
names = re.sub("(?:(?:#|//).*?[\r\n])|/[*](?:.|\n)*?[*]/", " ", d).split()
if names[:1] != ['Start']:
names.insert(0, 'Start')
return (
[('v%d' % i, v_name)
for i, v_name in enumerate(names)],
[('e%d' % i, None, 'v%d' % i, 'v%d' % (i + 1))
for i in range(len(names) - 1)])
|
from netapp.netapp_object import NetAppObject
class Nfsv4ClientStatsInfo(NetAppObject):
"""
structure containing statistics for NFSv4 operations
"""
_rename_ops = None
@property
def rename_ops(self):
"""
total 'rename' NFSv4 operations
Range : [0..2^64-1].
"""
return self._rename_ops
@rename_ops.setter
def rename_ops(self, val):
if val != None:
self.validate('rename_ops', val)
self._rename_ops = val
_setattr_ops = None
@property
def setattr_ops(self):
"""
total 'setattr' NFSv4 operations
Range : [0..2^64-1].
"""
return self._setattr_ops
@setattr_ops.setter
def setattr_ops(self, val):
if val != None:
self.validate('setattr_ops', val)
self._setattr_ops = val
_no_delegation_total = None
@property
def no_delegation_total(self):
"""
total calls where a delegation could not be granted
Range : [0..2^32-1].
"""
return self._no_delegation_total
@no_delegation_total.setter
def no_delegation_total(self, val):
if val != None:
self.validate('no_delegation_total', val)
self._no_delegation_total = val
_lookup_ops = None
@property
def lookup_ops(self):
"""
total 'lookup' NFSv4 operations
Range : [0..2^64-1].
"""
return self._lookup_ops
@lookup_ops.setter
def lookup_ops(self, val):
if val != None:
self.validate('lookup_ops', val)
self._lookup_ops = val
_lookupp_ops = None
@property
def lookupp_ops(self):
"""
total 'lookupp' NFSv4 operations
Range : [0..2^64-1].
"""
return self._lookupp_ops
@lookupp_ops.setter
def lookupp_ops(self, val):
if val != None:
self.validate('lookupp_ops', val)
self._lookupp_ops = val
_readlink_ops = None
@property
def readlink_ops(self):
"""
total 'readlink' NFSv4 operations
Range : [0..2^64-1].
"""
return self._readlink_ops
@readlink_ops.setter
def readlink_ops(self, val):
if val != None:
self.validate('readlink_ops', val)
self._readlink_ops = val
_open_downgrade_ops = None
@property
def open_downgrade_ops(self):
"""
total 'open_downgrade' NFSv4 operations
Range : [0..2^64-1].
"""
return self._open_downgrade_ops
@open_downgrade_ops.setter
def open_downgrade_ops(self, val):
if val != None:
self.validate('open_downgrade_ops', val)
self._open_downgrade_ops = val
_compound_ops = None
@property
def compound_ops(self):
"""
total 'compound' NFSv4 operations
Range : [0..2^64-1].
"""
return self._compound_ops
@compound_ops.setter
def compound_ops(self, val):
if val != None:
self.validate('compound_ops', val)
self._compound_ops = val
_access_ops = None
@property
def access_ops(self):
"""
total 'access' NFSv4 operations
Range : [0..2^64-1].
"""
return self._access_ops
@access_ops.setter
def access_ops(self, val):
if val != None:
self.validate('access_ops', val)
self._access_ops = val
_putrootfh_ops = None
@property
def putrootfh_ops(self):
"""
total 'putrootfh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._putrootfh_ops
@putrootfh_ops.setter
def putrootfh_ops(self, val):
if val != None:
self.validate('putrootfh_ops', val)
self._putrootfh_ops = val
_lockt_ops = None
@property
def lockt_ops(self):
"""
total 'lockt' NFSv4 operations
Range : [0..2^64-1].
"""
return self._lockt_ops
@lockt_ops.setter
def lockt_ops(self, val):
if val != None:
self.validate('lockt_ops', val)
self._lockt_ops = val
_badproc2_ops = None
@property
def badproc2_ops(self):
"""
total 'badproc2' NFSv4 operations
Range : [0..2^64-1].
"""
return self._badproc2_ops
@badproc2_ops.setter
def badproc2_ops(self, val):
if val != None:
self.validate('badproc2_ops', val)
self._badproc2_ops = val
_open_ops = None
@property
def open_ops(self):
"""
total 'open' NFSv4 operations
Range : [0..2^64-1].
"""
return self._open_ops
@open_ops.setter
def open_ops(self, val):
if val != None:
self.validate('open_ops', val)
self._open_ops = val
_verify_ops = None
@property
def verify_ops(self):
"""
total 'verify' NFSv4 operations
Range : [0..2^64-1].
"""
return self._verify_ops
@verify_ops.setter
def verify_ops(self, val):
if val != None:
self.validate('verify_ops', val)
self._verify_ops = val
_restorefh_ops = None
@property
def restorefh_ops(self):
"""
total 'restorefh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._restorefh_ops
@restorefh_ops.setter
def restorefh_ops(self, val):
if val != None:
self.validate('restorefh_ops', val)
self._restorefh_ops = val
_write_delegation_total = None
@property
def write_delegation_total(self):
"""
total calls where a write delegation was granted
Range : [0..2^32-1].
"""
return self._write_delegation_total
@write_delegation_total.setter
def write_delegation_total(self, val):
if val != None:
self.validate('write_delegation_total', val)
self._write_delegation_total = val
_delegpurge = None
@property
def delegpurge(self):
"""
total 'delegpurge' NFSv4 operations
Range : [0..2^64-1].
"""
return self._delegpurge
@delegpurge.setter
def delegpurge(self, val):
if val != None:
self.validate('delegpurge', val)
self._delegpurge = val
_read_delegation_total = None
@property
def read_delegation_total(self):
"""
total calls where a read delegation was granted
Range : [0..2^32-1].
"""
return self._read_delegation_total
@read_delegation_total.setter
def read_delegation_total(self, val):
if val != None:
self.validate('read_delegation_total', val)
self._read_delegation_total = val
_open_confirm_ops = None
@property
def open_confirm_ops(self):
"""
total 'open_confirm' NFSv4 operations
Range : [0..2^64-1].
"""
return self._open_confirm_ops
@open_confirm_ops.setter
def open_confirm_ops(self, val):
if val != None:
self.validate('open_confirm_ops', val)
self._open_confirm_ops = val
_remove_ops = None
@property
def remove_ops(self):
"""
total 'remove' NFSv4 operations
Range : [0..2^64-1].
"""
return self._remove_ops
@remove_ops.setter
def remove_ops(self, val):
if val != None:
self.validate('remove_ops', val)
self._remove_ops = val
_commit_ops = None
@property
def commit_ops(self):
"""
total 'commit' NFSv4 operations
Range : [0..2^64-1].
"""
return self._commit_ops
@commit_ops.setter
def commit_ops(self, val):
if val != None:
self.validate('commit_ops', val)
self._commit_ops = val
_acls_set_total = None
@property
def acls_set_total(self):
"""
number of ACLs set on files
Range : [0..2^32-1].
"""
return self._acls_set_total
@acls_set_total.setter
def acls_set_total(self, val):
if val != None:
self.validate('acls_set_total', val)
self._acls_set_total = val
_renew_ops = None
@property
def renew_ops(self):
"""
total 'renew' NFSv4 operations
Range : [0..2^64-1].
"""
return self._renew_ops
@renew_ops.setter
def renew_ops(self, val):
if val != None:
self.validate('renew_ops', val)
self._renew_ops = val
_setclntid_ops = None
@property
def setclntid_ops(self):
"""
total 'setclntid' NFSv4 operations
Range : [0..2^64-1].
"""
return self._setclntid_ops
@setclntid_ops.setter
def setclntid_ops(self, val):
if val != None:
self.validate('setclntid_ops', val)
self._setclntid_ops = val
_close_ops = None
@property
def close_ops(self):
"""
total 'close' NFSv4 operations
Range : [0..2^64-1].
"""
return self._close_ops
@close_ops.setter
def close_ops(self, val):
if val != None:
self.validate('close_ops', val)
self._close_ops = val
_lock_ops = None
@property
def lock_ops(self):
"""
total 'lock' NFSv4 operations
Range : [0..2^64-1].
"""
return self._lock_ops
@lock_ops.setter
def lock_ops(self, val):
if val != None:
self.validate('lock_ops', val)
self._lock_ops = val
_putfh_ops = None
@property
def putfh_ops(self):
"""
total 'putfh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._putfh_ops
@putfh_ops.setter
def putfh_ops(self, val):
if val != None:
self.validate('putfh_ops', val)
self._putfh_ops = val
_nverify_ops = None
@property
def nverify_ops(self):
"""
total 'nverify' NFSv4 operations
Range : [0..2^64-1].
"""
return self._nverify_ops
@nverify_ops.setter
def nverify_ops(self, val):
if val != None:
self.validate('nverify_ops', val)
self._nverify_ops = val
_setclntid_cfm_ops = None
@property
def setclntid_cfm_ops(self):
"""
total 'setclntid_cfm' NFSv4 operations
Range : [0..2^64-1].
"""
return self._setclntid_cfm_ops
@setclntid_cfm_ops.setter
def setclntid_cfm_ops(self, val):
if val != None:
self.validate('setclntid_cfm_ops', val)
self._setclntid_cfm_ops = val
_null_ops = None
@property
def null_ops(self):
"""
total 'null' NFSv4 operations
Range : [0..2^64-1].
"""
return self._null_ops
@null_ops.setter
def null_ops(self, val):
if val != None:
self.validate('null_ops', val)
self._null_ops = val
_create_ops = None
@property
def create_ops(self):
"""
total 'create' NFSv4 operations
Range : [0..2^64-1].
"""
return self._create_ops
@create_ops.setter
def create_ops(self, val):
if val != None:
self.validate('create_ops', val)
self._create_ops = val
_delegret_ops = None
@property
def delegret_ops(self):
"""
total 'delegret' NFSv4 operations
Range : [0..2^64-1].
"""
return self._delegret_ops
@delegret_ops.setter
def delegret_ops(self, val):
if val != None:
self.validate('delegret_ops', val)
self._delegret_ops = val
_locku_ops = None
@property
def locku_ops(self):
"""
total 'locku' NFSv4 operations
Range : [0..2^64-1].
"""
return self._locku_ops
@locku_ops.setter
def locku_ops(self, val):
if val != None:
self.validate('locku_ops', val)
self._locku_ops = val
_getattr_ops = None
@property
def getattr_ops(self):
"""
total 'getattr' NFSv4 operations
Range : [0..2^64-1].
"""
return self._getattr_ops
@getattr_ops.setter
def getattr_ops(self, val):
if val != None:
self.validate('getattr_ops', val)
self._getattr_ops = val
_write_ops = None
@property
def write_ops(self):
"""
total 'write' NFSv4 operations
Range : [0..2^64-1].
"""
return self._write_ops
@write_ops.setter
def write_ops(self, val):
if val != None:
self.validate('write_ops', val)
self._write_ops = val
_savefh_ops = None
@property
def savefh_ops(self):
"""
total 'savefh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._savefh_ops
@savefh_ops.setter
def savefh_ops(self, val):
if val != None:
self.validate('savefh_ops', val)
self._savefh_ops = val
_rlsowner_ops = None
@property
def rlsowner_ops(self):
"""
total 'rlsowner' NFSv4 operations
Range : [0..2^64-1].
"""
return self._rlsowner_ops
@rlsowner_ops.setter
def rlsowner_ops(self, val):
if val != None:
self.validate('rlsowner_ops', val)
self._rlsowner_ops = val
_readdir_ops = None
@property
def readdir_ops(self):
"""
total 'readdir' NFSv4 operations
Range : [0..2^64-1].
"""
return self._readdir_ops
@readdir_ops.setter
def readdir_ops(self, val):
if val != None:
self.validate('readdir_ops', val)
self._readdir_ops = val
_link_ops = None
@property
def link_ops(self):
"""
total 'link' NFSv4 operations
Range : [0..2^64-1].
"""
return self._link_ops
@link_ops.setter
def link_ops(self, val):
if val != None:
self.validate('link_ops', val)
self._link_ops = val
_secinfo_ops = None
@property
def secinfo_ops(self):
"""
total 'secinfo' NFSv4 operations
Range : [0..2^64-1].
"""
return self._secinfo_ops
@secinfo_ops.setter
def secinfo_ops(self, val):
if val != None:
self.validate('secinfo_ops', val)
self._secinfo_ops = val
_getfh_ops = None
@property
def getfh_ops(self):
"""
total 'getfh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._getfh_ops
@getfh_ops.setter
def getfh_ops(self, val):
if val != None:
self.validate('getfh_ops', val)
self._getfh_ops = val
_putpubfh_ops = None
@property
def putpubfh_ops(self):
"""
total 'putpubfh' NFSv4 operations
Range : [0..2^64-1].
"""
return self._putpubfh_ops
@putpubfh_ops.setter
def putpubfh_ops(self, val):
if val != None:
self.validate('putpubfh_ops', val)
self._putpubfh_ops = val
_openattr_ops = None
@property
def openattr_ops(self):
"""
total 'openattr' NFSv4 operations
Range : [0..2^64-1].
"""
return self._openattr_ops
@openattr_ops.setter
def openattr_ops(self, val):
if val != None:
self.validate('openattr_ops', val)
self._openattr_ops = val
_read_ops = None
@property
def read_ops(self):
"""
total 'read' NFSv4 operations
Range : [0..2^64-1].
"""
return self._read_ops
@read_ops.setter
def read_ops(self, val):
if val != None:
self.validate('read_ops', val)
self._read_ops = val
@staticmethod
def get_api_name():
return "nfsv4-client-stats-info"
@staticmethod
def get_desired_attrs():
return [
'rename-ops',
'setattr-ops',
'no-delegation-total',
'lookup-ops',
'lookupp-ops',
'readlink-ops',
'open-downgrade-ops',
'compound-ops',
'access-ops',
'putrootfh-ops',
'lockt-ops',
'badproc2-ops',
'open-ops',
'verify-ops',
'restorefh-ops',
'write-delegation-total',
'delegpurge',
'read-delegation-total',
'open-confirm-ops',
'remove-ops',
'commit-ops',
'acls-set-total',
'renew-ops',
'setclntid-ops',
'close-ops',
'lock-ops',
'putfh-ops',
'nverify-ops',
'setclntid-cfm-ops',
'null-ops',
'create-ops',
'delegret-ops',
'locku-ops',
'getattr-ops',
'write-ops',
'savefh-ops',
'rlsowner-ops',
'readdir-ops',
'link-ops',
'secinfo-ops',
'getfh-ops',
'putpubfh-ops',
'openattr-ops',
'read-ops',
]
def describe_properties(self):
return {
'rename_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'setattr_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'no_delegation_total': { 'class': int, 'is_list': False, 'required': 'optional' },
'lookup_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'lookupp_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'readlink_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'open_downgrade_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'compound_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'access_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'putrootfh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'lockt_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'badproc2_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'open_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'verify_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'restorefh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'write_delegation_total': { 'class': int, 'is_list': False, 'required': 'optional' },
'delegpurge': { 'class': int, 'is_list': False, 'required': 'optional' },
'read_delegation_total': { 'class': int, 'is_list': False, 'required': 'optional' },
'open_confirm_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'remove_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'commit_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'acls_set_total': { 'class': int, 'is_list': False, 'required': 'optional' },
'renew_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'setclntid_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'close_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'lock_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'putfh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'nverify_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'setclntid_cfm_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'null_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'create_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'delegret_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'locku_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'getattr_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'write_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'savefh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'rlsowner_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'readdir_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'link_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'secinfo_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'getfh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'putpubfh_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'openattr_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
'read_ops': { 'class': int, 'is_list': False, 'required': 'optional' },
}
|
import numpy as np
chislo = input('Загадай число от 0 до 99: ')
char_num = input('При делении // на 10 получается число отличное от 0?(да/нет): ')
if char_num == 'да':
odd_or_not = input('Число, полученное при делении // на 10 делится на 2 без остатка?(да/нет): ')
if odd_or_not == 'да':
char_num = input('Первая цифра числа больше 5?(да/нет): ')
if char_num == 'да':
cratnost_3 = input('Первая цифра числа кратна 3?(да/нет): ')
if cratnost_3 == 'да':
num1 = 6
else:
num1 = 8
else:
cratnost_4 = input('Первая цифра числа кратна 4?(да/нет): ')
if cratnost_4 == 'да':
num1 = 4
else:
num1 = 2
else:
char_num = input('Первая цифра числа больше 5?(да/нет): ')
if char_num == 'да':
cratnost_3 = input('Первая цифра числа кратна 3?(да/нет): ')
if cratnost_3 == 'да':
num1 = 9
else:
num1 = 7
else:
cratnost_3_ = input('Первая цифра числа кратна 4?(да/нет): ')
if cratnost_3_ == 'да':
num1 = 3
else:
num1 = 1
else:
num1 = 0
char2_ne_zero = input('При делении % на 10 получается число отличное от 0?(да/нет): ')
if char2_ne_zero =='да':
odd_or_not2 = input('Число, полученное при делении % на 10 делится на 2 без остатка?(да/нет): ')
if odd_or_not2 =='да':
char_num2 = input('Первая цифра числа больше 5?(да/нет): ')
if char_num2 == 'да':
cratnost_3_2 = input('Первая цифра числа кратна 3?(да/нет): ')
if cratnost_3_2 == 'да':
num2 = 6
else:
num2 = 8
else:
cratnost_4_2 = input('Первая цифра числа кратна 4?(да/нет): ')
if cratnost_4_2 == 'да':
num2 = 4
else:
num2 = 2
else:
char_num2 = input('Вторая цифра числа больше 5?(да/нет): ')
if char_num2 == 'да':
cratnost_3_2 = input('Вторая цифра числа кратна 3?(да/нет): ')
if cratnost_3_2 == 'да':
num2 = 9
else:
num2 = 7
else:
cratnost_3_2_ = input('Вторая цифра числа кратна 4?(да/нет): ')
if cratnost_3_2_ == 'да':
num2 = 3
else:
num2 = 1
else:
num2 = 0
if num1 != 0:
result = int(str(num1) + str(num2))
else:
result = int(num2)
print(result)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Term based tool to view *colored*, *incremental* diff in a *Git/Mercurial/Svn*
workspace or from stdin, with *side by side* and *auto pager* support. Requires
python (>= 2.5.0) and ``less``.
"""
import sys
import os
import re
import signal
import subprocess
import select
import difflib
META_INFO = {
'version' : '0.9.8',
'license' : 'BSD-3',
'author' : 'Matthew Wang',
'email' : 'mattwyl(@)gmail(.)com',
'url' : 'https://github.com/ymattw/cdiff',
'keywords' : 'colored incremental side-by-side diff',
'description' : ('View colored, incremental diff in a workspace or from '
'stdin, with side by side and auto pager support')
}
if sys.hexversion < 0x02050000:
raise SystemExit("*** Requires python >= 2.5.0") # pragma: no cover
# Python < 2.6 does not have next()
try:
next
except NameError:
def next(obj):
return obj.next()
try:
unicode
except NameError:
unicode = str
COLORS = {
'reset' : '\x1b[0m',
'underline' : '\x1b[4m',
'reverse' : '\x1b[7m',
'red' : '\x1b[31m',
'green' : '\x1b[32m',
'yellow' : '\x1b[33m',
'blue' : '\x1b[34m',
'magenta' : '\x1b[35m',
'cyan' : '\x1b[36m',
'lightred' : '\x1b[1;31m',
'lightgreen' : '\x1b[1;32m',
'lightyellow' : '\x1b[1;33m',
'lightblue' : '\x1b[1;34m',
'lightmagenta' : '\x1b[1;35m',
'lightcyan' : '\x1b[1;36m',
}
# Keys for revision control probe, diff and log with diff
VCS_INFO = {
'Git': {
'probe': ['git', 'rev-parse'],
'diff': ['git', 'diff', '--no-ext-diff'],
'log': ['git', 'log', '--patch'],
},
'Mercurial': {
'probe': ['hg', 'summary'],
'diff': ['hg', 'diff'],
'log': ['hg', 'log', '--patch'],
},
'Svn': {
'probe': ['svn', 'info'],
'diff': ['svn', 'diff'],
'log': ['svn', 'log', '--diff', '--use-merge-history'],
},
}
def colorize(text, start_color, end_color='reset'):
return COLORS[start_color] + text + COLORS[end_color]
class Hunk(object):
def __init__(self, hunk_headers, hunk_meta, old_addr, new_addr):
self._hunk_headers = hunk_headers
self._hunk_meta = hunk_meta
self._old_addr = old_addr # tuple (start, offset)
self._new_addr = new_addr # tuple (start, offset)
self._hunk_list = [] # list of tuple (attr, line)
def append(self, hunk_line):
"""hunk_line is a 2-element tuple: (attr, text), where attr is:
'-': old, '+': new, ' ': common
"""
self._hunk_list.append(hunk_line)
def mdiff(self):
r"""The difflib._mdiff() function returns an interator which returns a
tuple: (from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
"""
return difflib._mdiff(self._get_old_text(), self._get_new_text())
def _get_old_text(self):
out = []
for (attr, line) in self._hunk_list:
if attr != '+':
out.append(line)
return out
def _get_new_text(self):
out = []
for (attr, line) in self._hunk_list:
if attr != '-':
out.append(line)
return out
def is_completed(self):
old_completed = self._old_addr[1] == len(self._get_old_text())
new_completed = self._new_addr[1] == len(self._get_new_text())
return old_completed and new_completed
class UnifiedDiff(object):
def __init__(self, headers, old_path, new_path, hunks):
self._headers = headers
self._old_path = old_path
self._new_path = new_path
self._hunks = hunks
def is_old_path(self, line):
return line.startswith('--- ')
def is_new_path(self, line):
return line.startswith('+++ ')
def is_hunk_meta(self, line):
"""Minimal valid hunk meta is like '@@ -1 +1 @@', note extra chars
might occur after the ending @@, e.g. in git log. '## ' usually
indicates svn property changes in output from `svn log --diff`
"""
return (line.startswith('@@ -') and line.find(' @@') >= 8) or \
(line.startswith('## -') and line.find(' ##') >= 8)
def parse_hunk_meta(self, hunk_meta):
# @@ -3,7 +3,6 @@
a = hunk_meta.split()[1].split(',') # -3 7
if len(a) > 1:
old_addr = (int(a[0][1:]), int(a[1]))
else:
# @@ -1 +1,2 @@
old_addr = (int(a[0][1:]), 1)
b = hunk_meta.split()[2].split(',') # +3 6
if len(b) > 1:
new_addr = (int(b[0][1:]), int(b[1]))
else:
# @@ -0,0 +1 @@
new_addr = (int(b[0][1:]), 1)
return (old_addr, new_addr)
def parse_hunk_line(self, line):
return (line[0], line[1:])
def is_old(self, line):
"""Exclude old path and header line from svn log --diff output, allow
'----' likely to see in diff from yaml file
"""
return line.startswith('-') and not self.is_old_path(line) and \
not re.match(r'^-{72}$', line.rstrip())
def is_new(self, line):
return line.startswith('+') and not self.is_new_path(line)
def is_common(self, line):
return line.startswith(' ')
def is_eof(self, line):
# \ No newline at end of file
# \ No newline at end of property
return line.startswith(r'\ No newline at end of')
def is_only_in_dir(self, line):
return line.startswith('Only in ')
def is_binary_differ(self, line):
return re.match('^Binary files .* differ$', line.rstrip())
class PatchStream(object):
def __init__(self, diff_hdl):
self._diff_hdl = diff_hdl
self._stream_header_size = 0
self._stream_header = []
# Test whether stream is empty by read 1 line
line = self._diff_hdl.readline()
if not line:
self._is_empty = True
else:
self._stream_header.append(line)
self._stream_header_size += 1
self._is_empty = False
def is_empty(self):
return self._is_empty
def read_stream_header(self, stream_header_size):
"""Returns a small chunk for patch type detect, suppose to call once"""
for i in range(1, stream_header_size):
line = self._diff_hdl.readline()
if not line:
break
self._stream_header.append(line)
self._stream_header_size += 1
return self._stream_header
def __iter__(self):
for line in self._stream_header:
yield line
for line in self._diff_hdl:
yield line
class PatchStreamForwarder(object):
"""A blocking stream forwarder use `select` and line buffered mode. Feed
input stream to a diff format translator and read output stream from it.
Note input stream is non-seekable, and upstream has eaten some lines.
"""
def __init__(self, istream, translator):
assert isinstance(istream, PatchStream)
assert isinstance(translator, subprocess.Popen)
self._istream = iter(istream)
self._in = translator.stdin
self._out = translator.stdout
def _can_read(self, timeout=0):
return select.select([self._out.fileno()], [], [], timeout)[0]
def _forward_line(self):
try:
line = next(self._istream)
self._in.write(line)
except StopIteration:
self._in.close()
def __iter__(self):
while True:
if self._can_read():
line = self._out.readline()
if line:
yield line
else:
return
elif not self._in.closed:
self._forward_line()
class DiffParser(object):
def __init__(self, stream):
header = [decode(line) for line in stream.read_stream_header(100)]
size = len(header)
if size >= 4 and (header[0].startswith('*** ') and
header[1].startswith('--- ') and
header[2].rstrip() == '***************' and
header[3].startswith('*** ') and
header[3].rstrip().endswith(' ****')):
# For context diff, try use `filterdiff` to translate it to unified
# format and provide a new stream
#
self._type = 'context'
try:
# Use line buffered mode so that to readline() in block mode
self._translator = subprocess.Popen(
['filterdiff', '--format=unified'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, bufsize=1)
except OSError:
raise SystemExit('*** Context diff support depends on '
'filterdiff')
self._stream = PatchStreamForwarder(stream, self._translator)
return
for n in range(size):
if (header[n].startswith('--- ') and (n < size - 1) and
header[n + 1].startswith('+++ ')):
self._type = 'unified'
self._stream = stream
break
else:
# `filterdiff` translates unknown diff to nothing, fall through to
# unified diff give cdiff a chance to show everything as headers
#
sys.stderr.write("*** unknown format, fall through to 'unified'\n")
self._type = 'unified'
self._stream = stream
def get_diff_generator(self):
"""parse all diff lines, construct a list of UnifiedDiff objects"""
diff = UnifiedDiff([], None, None, [])
headers = []
for line in self._stream:
line = decode(line)
if diff.is_old_path(line):
# This is a new diff when current hunk is not yet genreated or
# is completed. We yield previous diff if exists and construct
# a new one for this case. Otherwise it's acutally an 'old'
# line starts with '--- '.
#
if (not diff._hunks or diff._hunks[-1].is_completed()):
if diff._old_path and diff._new_path and diff._hunks:
yield diff
diff = UnifiedDiff(headers, line, None, [])
headers = []
else:
diff._hunks[-1].append(diff.parse_hunk_line(line))
elif diff.is_new_path(line) and diff._old_path:
if not diff._new_path:
diff._new_path = line
else:
diff._hunks[-1].append(diff.parse_hunk_line(line))
elif diff.is_hunk_meta(line):
hunk_meta = line
try:
old_addr, new_addr = diff.parse_hunk_meta(hunk_meta)
except (IndexError, ValueError):
raise RuntimeError('invalid hunk meta: %s' % hunk_meta)
hunk = Hunk(headers, hunk_meta, old_addr, new_addr)
headers = []
diff._hunks.append(hunk)
elif diff._hunks and not headers and (diff.is_old(line) or
diff.is_new(line) or
diff.is_common(line)):
diff._hunks[-1].append(diff.parse_hunk_line(line))
elif diff.is_eof(line):
# ignore
pass
elif diff.is_only_in_dir(line) or \
diff.is_binary_differ(line):
# 'Only in foo:' and 'Binary files ... differ' are considered
# as separate diffs, so yield current diff, then this line
#
if diff._old_path and diff._new_path and diff._hunks:
# Current diff is comppletely constructed
yield diff
headers.append(line)
yield UnifiedDiff(headers, '', '', [])
headers = []
diff = UnifiedDiff([], None, None, [])
else:
# All other non-recognized lines are considered as headers or
# hunk headers respectively
#
headers.append(line)
# Validate and yield the last patch set if it is not yielded yet
if diff._old_path:
assert diff._new_path is not None
if diff._hunks:
assert len(diff._hunks[-1]._hunk_meta) > 0
assert len(diff._hunks[-1]._hunk_list) > 0
yield diff
if headers:
# Tolerate dangling headers, just yield a UnifiedDiff object with
# only header lines
#
yield UnifiedDiff(headers, '', '', [])
class DiffMarker(object):
def markup(self, diffs, side_by_side=False, width=0):
"""Returns a generator"""
if side_by_side:
for diff in diffs:
for line in self._markup_side_by_side(diff, width):
yield line
else:
for diff in diffs:
for line in self._markup_traditional(diff):
yield line
def _markup_traditional(self, diff):
"""Returns a generator"""
for line in diff._headers:
yield self._markup_header(line)
yield self._markup_old_path(diff._old_path)
yield self._markup_new_path(diff._new_path)
for hunk in diff._hunks:
for hunk_header in hunk._hunk_headers:
yield self._markup_hunk_header(hunk_header)
yield self._markup_hunk_meta(hunk._hunk_meta)
for old, new, changed in hunk.mdiff():
if changed:
if not old[0]:
# The '+' char after \x00 is kept
# DEBUG: yield 'NEW: %s %s\n' % (old, new)
line = new[1].strip('\x00\x01')
yield self._markup_new(line)
elif not new[0]:
# The '-' char after \x00 is kept
# DEBUG: yield 'OLD: %s %s\n' % (old, new)
line = old[1].strip('\x00\x01')
yield self._markup_old(line)
else:
# DEBUG: yield 'CHG: %s %s\n' % (old, new)
yield self._markup_old('-') + \
self._markup_mix(old[1], 'red')
yield self._markup_new('+') + \
self._markup_mix(new[1], 'green')
else:
yield self._markup_common(' ' + old[1])
def _markup_side_by_side(self, diff, width):
"""Returns a generator"""
wrap_char = colorize('>', 'lightmagenta')
def _normalize(line):
return line.replace(
'\t', ' ' * 8).replace('\n', '').replace('\r', '')
def _fit_with_marker(text, markup_fn, width, pad=False):
"""Wrap or pad input pure text, then markup"""
if len(text) > width:
return markup_fn(text[:(width - 1)]) + wrap_char
elif pad:
pad_len = width - len(text)
return '%s%*s' % (markup_fn(text), pad_len, '')
else:
return markup_fn(text)
def _fit_with_marker_mix(text, base_color, width, pad=False):
"""Wrap or pad input text which contains mdiff tags, markup at the
meantime, note only left side need to set `pad`
"""
out = [COLORS[base_color]]
count = 0
tag_re = re.compile(r'\x00[+^-]|\x01')
while text and count < width:
if text.startswith('\x00-'): # del
out.append(COLORS['reverse'] + COLORS[base_color])
text = text[2:]
elif text.startswith('\x00+'): # add
out.append(COLORS['reverse'] + COLORS[base_color])
text = text[2:]
elif text.startswith('\x00^'): # change
out.append(COLORS['underline'] + COLORS[base_color])
text = text[2:]
elif text.startswith('\x01'): # reset
out.append(COLORS['reset'] + COLORS[base_color])
text = text[1:]
else:
# FIXME: utf-8 wchar might break the rule here, e.g.
# u'\u554a' takes double width of a single letter, also
# this depends on your terminal font. I guess audience of
# this tool never put that kind of symbol in their code :-)
#
out.append(text[0])
count += 1
text = text[1:]
if count == width and tag_re.sub('', text):
# Was stripped: output fulfil and still has normal char in text
out[-1] = COLORS['reset'] + wrap_char
elif count < width and pad:
pad_len = width - count
out.append('%s%*s' % (COLORS['reset'], pad_len, ''))
else:
out.append(COLORS['reset'])
return ''.join(out)
# Set up number width, note last hunk might be empty
try:
(start, offset) = diff._hunks[-1]._old_addr
max1 = start + offset - 1
(start, offset) = diff._hunks[-1]._new_addr
max2 = start + offset - 1
except IndexError:
max1 = max2 = 0
num_width = max(len(str(max1)), len(str(max2)))
# Set up line width
if width <= 0:
# Autodetection of text width according to terminal size
try:
# Each line is like "nnn TEXT nnn TEXT\n", so width is half of
# [terminal size minus the line number columns and 3 separating
# spaces
#
width = (terminal_size()[0] - num_width * 2 - 3) // 2
except Exception:
# If terminal detection failed, set back to default
width = 80
# Setup lineno and line format
left_num_fmt = colorize('%%(left_num)%ds' % num_width, 'yellow')
right_num_fmt = colorize('%%(right_num)%ds' % num_width, 'yellow')
line_fmt = left_num_fmt + ' %(left)s ' + COLORS['reset'] + \
right_num_fmt + ' %(right)s\n'
# yield header, old path and new path
for line in diff._headers:
yield self._markup_header(line)
yield self._markup_old_path(diff._old_path)
yield self._markup_new_path(diff._new_path)
# yield hunks
for hunk in diff._hunks:
for hunk_header in hunk._hunk_headers:
yield self._markup_hunk_header(hunk_header)
yield self._markup_hunk_meta(hunk._hunk_meta)
for old, new, changed in hunk.mdiff():
if old[0]:
left_num = str(hunk._old_addr[0] + int(old[0]) - 1)
else:
left_num = ' '
if new[0]:
right_num = str(hunk._new_addr[0] + int(new[0]) - 1)
else:
right_num = ' '
left = _normalize(old[1])
right = _normalize(new[1])
if changed:
if not old[0]:
left = '%*s' % (width, ' ')
right = right.rstrip('\x01')
if right.startswith('\x00+'):
right = right[2:]
right = _fit_with_marker(
right, self._markup_new, width)
elif not new[0]:
left = left.rstrip('\x01')
if left.startswith('\x00-'):
left = left[2:]
left = _fit_with_marker(left, self._markup_old, width)
right = ''
else:
left = _fit_with_marker_mix(left, 'red', width, 1)
right = _fit_with_marker_mix(right, 'green', width)
else:
left = _fit_with_marker(
left, self._markup_common, width, 1)
right = _fit_with_marker(right, self._markup_common, width)
yield line_fmt % {
'left_num': left_num,
'left': left,
'right_num': right_num,
'right': right
}
def _markup_header(self, line):
return colorize(line, 'cyan')
def _markup_old_path(self, line):
return colorize(line, 'yellow')
def _markup_new_path(self, line):
return colorize(line, 'yellow')
def _markup_hunk_header(self, line):
return colorize(line, 'lightcyan')
def _markup_hunk_meta(self, line):
return colorize(line, 'lightblue')
def _markup_common(self, line):
return colorize(line, 'reset')
def _markup_old(self, line):
return colorize(line, 'lightred')
def _markup_new(self, line):
return colorize(line, 'green')
def _markup_mix(self, line, base_color):
del_code = COLORS['reverse'] + COLORS[base_color]
add_code = COLORS['reverse'] + COLORS[base_color]
chg_code = COLORS['underline'] + COLORS[base_color]
rst_code = COLORS['reset'] + COLORS[base_color]
line = line.replace('\x00-', del_code)
line = line.replace('\x00+', add_code)
line = line.replace('\x00^', chg_code)
line = line.replace('\x01', rst_code)
return colorize(line, base_color)
def markup_to_pager(stream, opts):
"""Pipe unified diff stream to pager (less).
Note: have to create pager Popen object before the translator Popen object
in PatchStreamForwarder, otherwise the `stdin=subprocess.PIPE` would cause
trouble to the translator pipe (select() never see EOF after input stream
ended), most likely python bug 12607 (http://bugs.python.org/issue12607)
which was fixed in python 2.7.3.
See issue #30 (https://github.com/ymattw/cdiff/issues/30) for more
information.
"""
pager_cmd = ['less']
if not os.getenv('LESS'):
# Args stolen from git source: github.com/git/git/blob/master/pager.c
pager_cmd.extend(['-FRSX', '--shift 1'])
pager = subprocess.Popen(
pager_cmd, stdin=subprocess.PIPE, stdout=sys.stdout)
diffs = DiffParser(stream).get_diff_generator()
marker = DiffMarker()
color_diff = marker.markup(diffs, side_by_side=opts.side_by_side,
width=opts.width)
for line in color_diff:
pager.stdin.write(line.encode('utf-8'))
pager.stdin.close()
pager.wait()
def check_command_status(arguments):
"""Return True if command returns 0."""
try:
return subprocess.call(
arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
except OSError:
return False
def revision_control_diff(args):
"""Return diff from revision control system."""
for _, ops in VCS_INFO.items():
if check_command_status(ops['probe']):
return subprocess.Popen(
ops['diff'] + args, stdout=subprocess.PIPE).stdout
def revision_control_log(args):
"""Return log from revision control system."""
for _, ops in VCS_INFO.items():
if check_command_status(ops['probe']):
return subprocess.Popen(
ops['log'] + args, stdout=subprocess.PIPE).stdout
def decode(line):
"""Decode UTF-8 if necessary."""
if isinstance(line, unicode):
return line
for encoding in ['utf-8', 'latin1']:
try:
return line.decode(encoding)
except UnicodeDecodeError:
pass
return '*** cdiff: undecodable bytes ***\n'
def terminal_size():
"""Returns terminal size. Taken from https://gist.github.com/marsam/7268750
but removed win32 support which depends on 3rd party extension.
"""
width, height = None, None
try:
import struct
import fcntl
import termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
height, width = struct.unpack('HHHH', x)[0:2]
except (IOError, AttributeError):
pass
return width, height
def main():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
from optparse import (OptionParser, BadOptionError, AmbiguousOptionError,
OptionGroup)
class PassThroughOptionParser(OptionParser):
"""Stop parsing on first unknown option (e.g. --cached, -U10) and pass
them down. Note the `opt_str` in exception object does not give us
chance to take the full option back, e.g. for '-U10' it will only
contain '-U' and the '10' part will be lost. Ref: http://goo.gl/IqY4A
(on stackoverflow). My hack is to try parse and insert a '--' in place
and parse again. Let me know if someone has better solution.
"""
def _process_args(self, largs, rargs, values):
left = largs[:]
right = rargs[:]
try:
OptionParser._process_args(self, left, right, values)
except (BadOptionError, AmbiguousOptionError):
parsed_num = len(rargs) - len(right) - 1
rargs.insert(parsed_num, '--')
OptionParser._process_args(self, largs, rargs, values)
supported_vcs = sorted(VCS_INFO.keys())
usage = """%prog [options] [file|dir ...]"""
parser = PassThroughOptionParser(
usage=usage, description=META_INFO['description'],
version='%%prog %s' % META_INFO['version'])
parser.add_option(
'-s', '--side-by-side', action='store_true',
help='enable side-by-side mode')
parser.add_option(
'-w', '--width', type='int', default=80, metavar='N',
help='set text width for side-by-side mode, 0 for auto detection, '
'default is 80')
parser.add_option(
'-l', '--log', action='store_true',
help='show log with changes from revision control')
parser.add_option(
'-c', '--color', default='auto', metavar='M',
help="""colorize mode 'auto' (default), 'always', or 'never'""")
# Hack: use OptionGroup text for extra help message after option list
option_group = OptionGroup(
parser, "Note", ("Option parser will stop on first unknown option "
"and pass them down to underneath revision control"))
parser.add_option_group(option_group)
opts, args = parser.parse_args()
if opts.log:
diff_hdl = revision_control_log(args)
if not diff_hdl:
sys.stderr.write(('*** Not in a supported workspace, supported '
'are: %s\n') % ', '.join(supported_vcs))
return 1
elif sys.stdin.isatty():
diff_hdl = revision_control_diff(args)
if not diff_hdl:
sys.stderr.write(('*** Not in a supported workspace, supported '
'are: %s\n\n') % ', '.join(supported_vcs))
parser.print_help()
return 1
else:
diff_hdl = (sys.stdin.buffer if hasattr(sys.stdin, 'buffer')
else sys.stdin)
stream = PatchStream(diff_hdl)
# Don't let empty diff pass thru
if stream.is_empty():
return 0
if opts.color == 'always' or \
(opts.color == 'auto' and sys.stdout.isatty()):
markup_to_pager(stream, opts)
else:
# pipe out stream untouched to make sure it is still a patch
byte_output = (sys.stdout.buffer if hasattr(sys.stdout, 'buffer')
else sys.stdout)
for line in stream:
byte_output.write(line)
if diff_hdl is not sys.stdin:
diff_hdl.close()
return 0
if __name__ == '__main__':
sys.exit(main())
# vim:set et sts=4 sw=4 tw=79:
|
# test_ptypes.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,W0108
# Standard library imports
import sys
import numpy
# Putil imports
import putil.ptypes
from putil.test import AE, AI
###
# Global variables
###
emsg = lambda msg: (
'[START CONTRACT MSG: {0}]Argument `*[argument_name]*` '
'is not valid[STOP CONTRACT MSG]'.format(msg)
)
###
# Helper functions
###
def check_contract(obj, name, value):
AE(obj, ValueError, emsg(name), obj=value)
###
# Test functions
###
def test_color_space_option_contract():
""" Tests for LineStyleOption pseudo-type """
obj = putil.ptypes.color_space_option
check_contract(obj, 'color_space_option', 5)
exmsg = (
"[START CONTRACT MSG: color_space_option]Argument "
"`*[argument_name]*` is not one of 'binary', 'Blues', 'BuGn', "
"'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', "
"'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', "
"'YlOrBr' or 'YlOrRd' (case insensitive)[STOP CONTRACT MSG]"
)
AE(obj, ValueError, exmsg, obj='x')
for item in [
'binary', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens',
'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd',
'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr',
'YlOrRd']:
putil.ptypes.color_space_option(item)
def test_csv_col_filter_contract():
""" Test CsvColFilter pseudo-type """
items = [True, 1.0, [], [1, True, 3], ['a', 'b', True]]
for item in items:
check_contract(putil.ptypes.csv_col_filter, 'csv_col_filter', item)
items = [None, 1, 'a', [1, 2], ['a']]
for item in items:
putil.ptypes.csv_col_filter(item)
def test_csv_col_sort_contract():
""" Test CsvColSort pseudo-type """
items = [
True, None, ['a', None], {(1, 2):'A'}, {'a':True}, {0:'hello'}, []
]
for item in items:
check_contract(putil.ptypes.csv_col_sort, 'csv_col_sort', item)
items = [
1,
'a',
{'a':'D'},
{0:'d'},
{1:'a'},
[1, 'a'],
[1, 'a', {'b':'d'}, {0:'A'}]
]
for item in items:
putil.ptypes.csv_col_sort(item)
def test_csv_data_filter_contract():
""" Test CsvDataFilter pseudo-type """
items = [
True,
(1, 2, 3),
(True, 'A'),
(True, ),
(None, True),
('A', 'A'),
({'B':1}, {'C':5}),
{2.0:5},
({2.0:5}, 'A'),
(['A', True], {'A':1}),
('A', {}),
([], {'A':1}),
({}, []),
{'dfilter':{'a':{'xx':2}}},
{'dfilter':{'a':[3, {'xx':2}]}}
]
for item in items:
check_contract(putil.ptypes.csv_data_filter, 'csv_data_filter', item)
items = [
None,
(None, ),
(None, None),
1,
'A',
['B', 1],
{'A':1},
{'A':1, 'B':2}
]
for item in items:
putil.ptypes.csv_data_filter(item)
def test_csv_filtered_contract():
""" Test CsvFiltered pseudo-type """
for item in [5, 'BC']:
check_contract(putil.ptypes.csv_filtered, 'csv_filtered', item)
for item in [True, False, 'B', 'b', 'C', 'c', 'R', 'r', 'N', 'n']:
putil.ptypes.csv_filtered(item)
def test_csv_row_filter_contract():
""" Test CsvRowFilter pseudo-type """
items = [
'a',
{5.0:10},
{'a':{'xx':2}},
{'a':[3, {'xx':2}]},
{'b':True}
]
for item in items:
check_contract(putil.ptypes.csv_row_filter, 'csv_row_filter', item)
exmsg = (
'[START CONTRACT MSG: csv_row_filter]Argument '
'`*[argument_name]*` is empty[STOP CONTRACT MSG]'
)
AE(putil.ptypes.csv_row_filter, ValueError, exmsg, obj={})
items = [None, {'x':5}]
for item in items:
putil.ptypes.csv_row_filter(item)
def test_engineering_notation_number():
""" Test EngineeringNotationNumber pseudo-type """
obj = putil.ptypes.engineering_notation_number
items = ['3.12b', 'f', 'a1b', ' + 123.45f ']
for item in items:
check_contract(obj, 'engineering_notation_number', item)
items = [' +123.45f ', ' -0 ']
for item in items:
obj(item)
def test_engineering_notation_suffix():
""" Test EngineeringNotationSuffix pseudo-type """
obj = putil.ptypes.engineering_notation_suffix
check_contract(obj, 'engineering_notation_suffix', 'b')
obj('u')
def test_file_name_contract():
""" Test for file_name custom contract """
@putil.pcontracts.contract(sfn='file_name')
def func(sfn):
""" Sample function to test file_name custom contract """
return sfn
items = [3, 'test\0']
for item in items:
AI(func, 'sfn', sfn=item)
func('some_file.txt')
# Test with Python executable (should be portable across systems), file
# should be valid although not having permissions to write it
func(sys.executable)
def test_file_name_exists_contract():
""" Test for file_name_exists custom contract """
@putil.pcontracts.contract(sfn='file_name_exists')
def func(sfn):
""" Sample function to test file_name_exists custom contract """
return sfn
items = [3, 'test\0']
for item in items:
AI(func, 'sfn', sfn=item)
exmsg = 'File _file_does_not_exist could not be found'
AE(func, OSError, exmsg, sfn='_file_does_not_exist')
# Test with Python executable (should be portable across systems)
func(sys.executable)
def test_function_contract():
""" Tests for Function pseudo-type """
def func1():
pass
check_contract(putil.ptypes.function, 'function', 'a')
items = (func1, None)
for item in items:
putil.ptypes.function(item)
def test_increasing_real_numpy_vector_contract():
""" Tests for IncreasingRealNumpyVector pseudo-type """
obj = putil.ptypes.increasing_real_numpy_vector
items = [
'a',
[1, 2, 3],
numpy.array([]),
numpy.array([[1, 2, 3], [4, 5, 6]]),
numpy.array(['a', 'b']),
numpy.array([1, 0, -3]),
numpy.array([10.0, 8.0, 2.0])
]
for item in items:
check_contract(obj, 'increasing_real_numpy_vector', item)
items = [
numpy.array([1, 2, 3]),
numpy.array([10.0, 12.1, 12.5]),
numpy.array([10.0])
]
for item in items:
obj(item)
def test_interpolation_option_contract():
""" Tests for InterpolationOption pseudo-type """
obj = putil.ptypes.interpolation_option
check_contract(obj, 'interpolation_option', 5)
exmsg = (
"[START CONTRACT MSG: interpolation_option]Argument "
"`*[argument_name]*` is not one of ['STRAIGHT', 'STEP', 'CUBIC', "
"'LINREG'] (case insensitive)[STOP CONTRACT MSG]"
)
AE(obj, ValueError, exmsg, obj='x')
obj(None)
for item in ['STRAIGHT', 'STEP', 'CUBIC', 'LINREG']:
obj(item)
obj(item.lower())
def test_line_style_option_contract():
""" Tests for LineStyleOption pseudo-type """
check_contract(putil.ptypes.line_style_option, 'line_style_option', 5)
exmsg = (
"[START CONTRACT MSG: line_style_option]Argument "
"`*[argument_name]*` is not one of ['-', '--', '-.', "
"':'][STOP CONTRACT MSG]"
)
AE(putil.ptypes.line_style_option, ValueError, exmsg, obj='x')
putil.ptypes.line_style_option(None)
for item in ['-', '--', '-.', ':']:
putil.ptypes.line_style_option(item)
def test_non_negative_integer():
""" Test PosInteger pseudo-type """
obj = putil.ptypes.non_negative_integer
items = ['b', True, -3, 5.2]
for item in items:
check_contract(obj, 'non_negative_integer', item)
items = [0, 2]
for item in items:
obj(item)
def test_offset_range_contract():
""" Tests for PositiveRealNumber pseudo-type """
items = ['a', [1, 2, 3], False, -0.1, -1.1]
for item in items:
check_contract(putil.ptypes.offset_range, 'offset_range', item)
items = [0, 0.5, 1]
for item in items:
putil.ptypes.offset_range(item)
def test_positive_real_num_contract():
""" Tests for PositiveRealNumber pseudo-type """
obj = putil.ptypes.positive_real_num
items = ['a', [1, 2, 3], False, -0.1, -2.0]
for item in items:
check_contract(obj, 'positive_real_num', item)
items = [1, 2.0]
for item in items:
obj(item)
def test_real_num_contract():
""" Tests for RealNumber pseudo-type """
items = ['a', [1, 2, 3], False]
for item in items:
check_contract(putil.ptypes.real_num, 'real_num', item)
items = [1, 2.0]
for item in items:
putil.ptypes.real_num(item)
def test_real_numpy_vector_contract():
""" Tests for RealNumpyVector pseudo-type """
obj = putil.ptypes.real_numpy_vector
items = [
'a',
[1, 2, 3],
numpy.array([]),
numpy.array([[1, 2, 3], [4, 5, 6]]),
numpy.array(['a', 'b']),
]
for item in items:
check_contract(obj, 'real_numpy_vector', item)
items = [
numpy.array([1, 2, 3]),
numpy.array([10.0, 8.0, 2.0]),
numpy.array([10.0])
]
for item in items:
obj(item)
|
#!/usr/bin/python
# parseacunetix.py
#
# By Adrien de Beaupre adriendb@gmail.com | adrien@intru-shun.ca
# Copyright 2011 Intru-Shun.ca Inc.
# v0.09
# 16 October 2011
#
# The current version of these scripts are at: http://dshield.handers.org/adebeaupre/ossams-parser.tgz
#
# Parses acunetix XML output
# http://www.acunetix.com
#
# This file is part of the ossams-parser.
#
# The ossams-parser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ossams-parser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ossams-parser. If not, see <http://www.gnu.org/licenses/>.
#
# parseacunetix function
def parseacunetix(time, os, root, filetoread, db, dbconnection, projectname, projectid, separator):
# Check to see if the document root is 'ScanGroup', exit if it is not
if root.tag != "ScanGroup":
print filetoread, "is not a acunetix XML report file"
return
# Take the root attributes and assign it to a dictionary
if root.attrib:
rootattribs = root.attrib
acunetixfile = filetoread.split(separator)
file = acunetixfile[-1]
filetime = time.ctime(os.path.getmtime(filetoread))
timenow = time.ctime()
db.execute("""
INSERT INTO tooloutput (toolname, filename, OSSAMSVersion, filedate, tooldate, inputtimestamp, projectname, projectid)
VALUES
('acunetix', '%s', 0.09, '%s', '%s', '%s', '%s', '%s')
""" % (file, filetime, rootattribs['ExportedOn'], timenow, projectname, projectid)
)
tooloutputnumber = int(db.lastrowid)
print "Processed acunetix report number:", tooloutputnumber
elements = ["Name", "ModuleName", "Details", "Affects", "IsFalsePositive", "Severity", "Type", "Impact", "Description", "Recommendation", "DetailedInformation"]
scans = root.findall('Scan')
for scan in scans:
starturl = scan.find('StartURL')
if starturl.text:
starturlval = starturl.text
noslashes = starturlval.replace('/','')
stripped = noslashes.split(':')
hostname = stripped[1]
port = stripped[-1]
else:
hostname = " "
port = "0"
banner = scan.find('Banner')
if banner is not None:
if banner.text:
bannerval = banner.text
else:
bannerval = " "
responsive = scan.find('Responsive')
if responsive is not None:
if responsive.text:
responsetext = responsive.text
if responsetext == "True":
recon = 1
portstate = 'open'
else:
recon = 0
portstate = 'closed'
else:
responsetext = " "
osguess = scan.find('Os')
if osguess is not None:
if osguess.text:
osvalue = osguess.text
else:
osvalue = osguess.text
db.execute("""
INSERT INTO hosts (tooloutputnumber, hostname, recon, hostcriticality, hostos)
VALUES
(%s, '%s', '%s', 0, '%s')
""" % (tooloutputnumber, hostname, recon, osvalue)
)
hostnumber = int(db.lastrowid)
print "Processed host:", hostnumber, "Name: ", hostname
db.execute("""
INSERT INTO ports (tooloutputnumber, hostnumber, protocol, portnumber, portstate, portbanner)
VALUES
('%s', '%s', 'TCP', '%s', '%s', '%s')
""" % (tooloutputnumber, hostnumber, port, portstate, dbconnection.escape_string(bannerval))
)
portnumber = int(db.lastrowid)
reportitems = scan.findall('ReportItems/ReportItem')
for reportitem in reportitems:
items = {}
for element in elements:
elementitem = reportitem.find(element)
if elementitem is not None:
if elementitem.text:
items[element] = elementitem.text
if items['IsFalsePositive'] == 'True':
falsepositive = 1
else:
falsepositive = 0
request = reportitem.find('TechnicalDetails/Request')
if request is not None:
if request.text:
httprequest = request.text
else:
httprequest = " "
response = reportitem.find('TechnicalDetails/Response')
if response is not None:
if response.text:
httpresponse = response.text
else:
httpresponse = " "
db.execute("""
INSERT INTO vulnerabilities (tooloutputnumber, hostnumber, vulnerabilityname, vulnerabilityrisk,
vulnerabilitydescription, vulnerabilitysolution, vulnerabilityextra, vulnerabilityvalidation,
vulnerabilityuri, portsnumber, falsepositive, httprequest, httpresponse)
VALUES
('%s', '%s', '%s', '%s', '%s', '%s', '%s', 0, '%s', '%s', '%s', '%s', '%s')
""" % (tooloutputnumber, hostnumber, items['Name'], items['Severity'], dbconnection.escape_string(items['Description']),
dbconnection.escape_string(items['Recommendation']), dbconnection.escape_string(items['Impact']), items['Affects'], portnumber, falsepositive,
dbconnection.escape_string(httprequest), dbconnection.escape_string(httpresponse))
)
vulnnumber = int(db.lastrowid)
references = reportitem.findall('References/Reference')
for reference in references:
database = reference.find('Database')
if database is not None:
if database.text:
referencetype = database.text
url = reference.find('URL')
if url is not None:
if url.text:
referencevalue= url.text
db.execute("""
INSERT INTO refs (tooloutputnumber, hostnumber, vulnerabilitynumber, referencetype, referencevalue )
VALUES
('%s', '%s', '%s', '%s', '%s')
""" % (tooloutputnumber, hostnumber, vulnnumber, referencetype, referencevalue)
)
return
|
import typing
from mcpython import shared
from mcpython.common.network.package.AbstractPackage import (
AbstractPackage,
DefaultPackage,
)
async def set_client_var(side, state):
shared.is_client = state
class NetworkManager:
PACKAGE_TYPES: typing.Dict[bytes, typing.Type[AbstractPackage]] = {}
def __init__(self):
self.side_manager = None
self.is_client = None
self.package_handlers: typing.Dict[bytes, typing.Callable] = {}
# An access point to the internal world or a wrapper of it
self.side_world = None
def spawn_client_network_instance(self):
import mcpython.common.network.NetworkBuilder
self.side_manager = (
mcpython.common.network.NetworkBuilder.RemoteNetworkConnector()
)
self.is_client = True
shared.async_side_instance.call_regular = self.handle_client
shared.async_side_instance.run_later.append(
shared.async_side_instance.sided_task_manager.invokeOnAll(
set_client_var, True
)
)
def spawn_server_network_instance(self):
import mcpython.common.network.NetworkBuilder
self.side_manager = mcpython.common.network.NetworkBuilder.NetworkBuilder()
self.is_client = False
shared.async_side_instance.call_regular = self.handle_server
shared.async_side_instance.run_later.append(
shared.async_side_instance.sided_task_manager.invokeOnAll(
set_client_var, False
)
)
def disconnect(self):
if self.side_manager is not None:
self.side_manager.disconnect()
self.is_client = None
shared.async_side_instance.call_regular = None
shared.async_side_instance.run_later.append(
shared.async_side_instance.sided_task_manager.invokeOnAll(
set_client_var, None
)
)
def send(self, package: AbstractPackage, target: int = -1):
if self.is_client is True:
self.side_manager.send_queue.append(package.serialize())
elif self.is_client is False:
self.side_manager.connected_clients[target].send(package.serialize())
else:
raise RuntimeError("network not set up correctly!")
async def handle_client(self):
self.side_manager.handle()
for head, size, body in self.side_manager.receive_queue:
package_type = self.PACKAGE_TYPES[head]
package = package_type.deserialize(body)
if head in self.package_handlers:
await self.package_handlers[head](package)
async def handle_server(self):
for client in self.side_manager.connected_clients:
while len(client.receive_queue):
head, size, body = client.receive_queue.pop()
package_type = self.PACKAGE_TYPES[head]
package = package_type.deserialize(body)
if head in self.package_handlers:
await self.package_handlers[head](client, package)
async def setup(side):
from mcpython.common.network.NetworkManager import NetworkManager
shared.network_manager = NetworkManager()
|
import os
from lib.fmd.workflow import FileManagementWorkflow
from lib.fmd.decorators import GetStage, ListStage
from lib.file.tmpfile import TemporaryFile
class GetAction(object):
def execute(self, context):
fadm = FileManagementWorkflow()
if context.fid == "all":
context.log.status('Gathering list of files to download')
record_list = fadm.execute(context, ListStage)
context.fidlist = [record['fid'] for record in record_list['records']]
context.log.status('Found %d entries to download' % len(context.fidlist))
counter = 0
for fid in context.fidlist:
context.fid = fid
counter += 1
context.log.status('Processing fid [%s]' % os.path.basename(fid), counter , len(context.fidlist))
fadm.execute(context, GetStage)
TemporaryFile.instance().cleanup()
else:
context.fidlist = [context.fid]
output = fadm.execute(context, GetStage)
context.log.status(context.filename)
|
from tests.utils import W3CTestCase
class TestRtlIb(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'rtl-ib'))
|
import json
from datetime import datetime
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonService = autoclass('org.kivy.android.PythonService')
TaskScheduler = autoclass('org.atq.atq.TaskScheduler')
def _to_millis(time: datetime):
return int(time.timestamp() * 1000)
def schedule_task(task_time: datetime):
context = PythonActivity.mActivity or PythonService.mService
task_time = _to_millis(task_time)
task_scheduler = TaskScheduler(context)
task_scheduler.scheduleTask(task_time)
|
from typing import Dict, List, Tuple, Union, Any
import torch
import numpy as np
import os
import logging
from torch.nn.modules.rnn import LSTMCell
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.nn.util import get_text_field_mask
from allennlp.nn import InitializerApplicator
from allennlp.training.metrics import Perplexity
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util
from allennlp_series.common.constants import *
from allennlp_series.training.metrics import CocovalsMeasures
from allennlp_series.training.metrics.diversity_evals import DiversityEvals
from allennlp_series.training.metrics.program_activation_analysis import (
ProgramActivationEvals,
)
import allennlp_series.model.utils as utils
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
logger = logging.getLogger(__name__)
#
# class _SoftmaxLoss(torch.nn.Module):
# """
# Given some embeddings and some targets, applies a linear layer
# to create logits over possible words and then returns the
# negative log likelihood.
# """
#
# def __init__(self, num_words: int, embedding_dim: int) -> None:
# super().__init__()
#
# self.tie_embeddings = False
#
# self.softmax_w = torch.nn.Parameter(
# torch.randn(embedding_dim, num_words) / np.sqrt(embedding_dim)
# )
# self.softmax_b = torch.nn.Parameter(torch.zeros(num_words))
#
# def forward(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
#
# # embeddings is size (n, embedding_dim)
# # targets is (batch_size, ) with the correct class id
# # Does not do any count normalization / divide by batch size
# probs = torch.nn.functional.log_softmax(
# torch.matmul(embeddings, self.softmax_w) + self.softmax_b, dim=-1
# )
#
# return torch.nn.functional.nll_loss(probs, targets.long(), reduction="sum")
@Model.register("cond_language_model")
class ConditionalLanguageModel(Model):
"""
The `LanguageModel` applies a "contextualizing"
`Seq2SeqEncoder` to uncontextualized embeddings, using a `SoftmaxLoss`
module (defined above) to compute the language modeling loss.
If bidirectional is True, the language model is trained to predict the next and
previous tokens for each token in the input. In this case, the contextualizer must
be bidirectional. If bidirectional is False, the language model is trained to only
predict the next token for each token in the input; the contextualizer should also
be unidirectional.
If your language model is bidirectional, it is IMPORTANT that your bidirectional
`Seq2SeqEncoder` contextualizer does not do any "peeking ahead". That is, for its
forward direction it should only consider embeddings at previous timesteps, and for
its backward direction only embeddings at subsequent timesteps. Similarly, if your
language model is unidirectional, the unidirectional contextualizer should only
consider embeddings at previous timesteps. If this condition is not met, your
language model is cheating.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
contextualizer : `Seq2SeqEncoder`
Used to "contextualize" the embeddings. As described above,
this encoder must not cheat by peeking ahead.
dropout : `float`, optional (default: None)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
num_samples : `int`, optional (default: None)
If provided, the model will use `SampledSoftmaxLoss`
with the specified number of samples. Otherwise, it will use
the full `_SoftmaxLoss` defined above.
sparse_embeddings : `bool`, optional (default: False)
Passed on to `SampledSoftmaxLoss` if True.
bidirectional : `bool`, optional (default: False)
Train a bidirectional language model, where the contextualizer
is used to predict the next and previous token for each input token.
This must match the bidirectionality of the contextualizer.
"""
def __init__(
self,
vocab: Vocabulary,
program_emb_size: int = 5,
hidden_dim: int = 5,
embedding_dim: int = 5,
dropout: float = None,
target_namespace: str = "tokens",
initializer: InitializerApplicator = None,
eval_sanity_check_mode: bool = False,
use_activation_evals: bool = False,
model_programs=None,
max_decoding_steps_generate: int = 10,
model_name: str = None,
use_bertscore_evals: bool = False,
use_bow_decoder: bool = False,
decoding_method: str = "greedy",
sampling_top_p: float = 0.9,
sampling_top_k: int = None,
add_prog_emb_to_inp: bool = False,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.eval_sanity_check_mode = eval_sanity_check_mode
self._target_namespace = target_namespace
self._program_emb_size = program_emb_size
self._target_embedding_dim = embedding_dim # program_emb_size #5
self._decoder_output_dim = hidden_dim # program_emb_size #5
# self._softmax_loss = _SoftmaxLoss(
# num_words=vocab.get_vocab_size(), embedding_dim=self._target_embedding_dim
# ) # not used **
self.decoding_method = decoding_method
self.sampling_top_p = sampling_top_p
# This buffer is now unused and exists only for backwards compatibility reasons.
self.register_buffer("_last_average_loss", torch.zeros(1))
self._perplexity = Perplexity()
self._ngram_overlap_eval = CocovalsMeasures(
sanity_check_mode=eval_sanity_check_mode,
compute_bert_score=use_bertscore_evals,
)
self._diversity_eval = DiversityEvals(model_name=model_name)
self.use_activation_evals = use_activation_evals
if use_activation_evals:
self._program_activation_evals = ProgramActivationEvals(
programs=model_programs
)
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = lambda x: x
self._start_index = self.vocab.get_token_index(
START_SYMBOL, self._target_namespace
)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._target_embedder = Embedding(num_classes, self._target_embedding_dim)
self._add_prog_emb_to_inp = add_prog_emb_to_inp
self._decoder_input_dim = self._target_embedding_dim
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
# self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
# self._output_projection_layer = Linear(self._decoder_output_dim+program_emb_size, num_classes)
self._output_projection_layer = Linear(self._decoder_output_dim * 2, num_classes)
# self._output_projection_layer = self._target_embedder.weight
# self._output_projection_layer_bias = torch.nn.Parameter(torch.zeros(num_classes))
self._program_to_hidden_projection = Linear(
program_emb_size, self._decoder_output_dim
)
if self._add_prog_emb_to_inp:
self._program_to_inp_projection = Linear(
program_emb_size, self._target_embedding_dim
)
# self._program_to_output_projection = Linear(program_emb_size, num_classes)
self._max_decoding_steps = max_decoding_steps_generate
self.use_bow_decoder = use_bow_decoder
if use_bow_decoder:
self.bow_decoder_matrix = Linear(program_emb_size, num_classes)
##
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
num_params = sum([np.prod(p.size()) for p in model_parameters])
print("[COND-LM] num params language model = ", num_params)
##
if initializer is not None:
initializer(self)
def process_batch(
self,
program_embedding=None,
num_decoding_steps=None,
targets=None,
generate_or_ppl="ppl",
decoding_method: str = "greedy",
sampling_top_p: float = 0.9,
sampling_top_k: int = None,
):
use_gold_targets = False
# print("num_decoding_steps = ", num_decoding_steps)
if self.training:
assert generate_or_ppl == "ppl"
use_gold_targets = True
else:
if generate_or_ppl == "ppl":
use_gold_targets = True
else:
use_gold_targets = False
last_predictions = None
if False: # True:
batch_size = program_embedding.size()[0]
decoder_hidden = self._program_to_hidden_projection(
program_embedding
) # bs, h
else: # lesser params to train
batch_size = targets.size()[0]
decoder_hidden = torch.zeros(batch_size, self._decoder_output_dim)
decoder_context = torch.zeros(batch_size, self._decoder_output_dim)
if torch.cuda.is_available():
decoder_hidden = decoder_hidden.cuda()
decoder_context = decoder_context.cuda()
step_logits = []
step_probabilities = []
step_predictions = []
for timestep in range(num_decoding_steps):
if use_gold_targets:
input_choices = targets[:, timestep]
else:
if timestep == 0:
input_choices = targets[:, timestep] # init with start symbols
else:
input_choices = last_predictions
decoder_input = self._prepare_decode_step_input(
input_choices, decoder_hidden, program_embedding
)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input, (decoder_hidden, decoder_context)
)
# (batch_size, num_classes)
if len(program_embedding.size()) == 1:
program_embedding = program_embedding.view(1, -1)
program_embedding_proj = self._program_to_hidden_projection(
program_embedding
)
output_projections = self._output_projection_layer(
torch.cat([decoder_hidden, program_embedding_proj], dim=1)
)
# output_projections = F.linear(input=decoder_hidden,
# weight=self._output_projection_layer,
# bias=self._output_projection_layer_bias)
# program_proj = self._program_to_output_projection(program_embedding)
# output_projections = output_projections + program_proj
if self.use_bow_decoder:
output_projections = self.bow_decoder_matrix(program_embedding)
step_logits.append(output_projections.unsqueeze(1)) # bs,1,vocab
class_probabilities = F.softmax(output_projections, dim=-1)
step_probabilities.append(class_probabilities.unsqueeze(1))
if self.use_bow_decoder:
_, predicted_classes = torch.kthvalue(
-class_probabilities, k=timestep + 1, dim=1
) ## generation through argmax
# does assume that number of steps is smaller than the vocab
# print("predicted_classes = ", predicted_classes)
else:
if decoding_method == "greedy":
_, predicted_classes = torch.max(
class_probabilities, 1
) ## generation through argmax
elif decoding_method == "sample":
# _, predicted_classes = torch.max(class_probabilities, 1)
# print(" *** predicted_classes : ", predicted_classes.size())
temperature = 1.0
if sampling_top_k is None:
top_k = 0
else:
top_k = sampling_top_k
top_p = sampling_top_p # 0.9
bs = output_projections.size()[0]
assert bs == 1, "found bs = " + str(bs) + " , but wanted bs = 1"
# print("output_projections : ", output_projections.size())
logits = output_projections.view(-1) / temperature
filtered_logits = utils.top_k_top_p_filtering(
logits, top_k=top_k, top_p=top_p
)
probabilities = F.softmax(filtered_logits, dim=-1)
predicted_classes = torch.multinomial(probabilities, 1)
# print("predicted_classes : ", predicted_classes.size())
else:
raise NotImplementedError
last_predictions = predicted_classes
# (batch_size, 1)
step_predictions.append(last_predictions.unsqueeze(1))
# for self.use_bow_decoder mode,
# for now picking the topk words
# note that due to output_projections = self.bow_decoder_matrix(program_embedding)
# projections are same at every step
return {
"step_logits": step_logits,
"step_probabilities": step_probabilities,
"step_predictions": step_predictions,
}
def forward( # type: ignore
self,
program_embedding: torch.FloatTensor,
target_tokens: [str, torch.LongTensor],
metadata: List[Dict[str, Any]] = None,
mode="ppl",
selected_program_id: List[int] = None,
) -> Dict[str, torch.Tensor]:
# print("[CondLM] program_embedding = ", program_embedding)
targets = None
if len(program_embedding.size()) == 1:
program_embedding = program_embedding.view(1, -1)
batch_size = program_embedding.size()[0]
# print("[CondLM] : target_tokens = ", target_tokens)
assert mode in ["ppl", "generate"]
if target_tokens:
targets = target_tokens["tokens"]
target_sequence_length = targets.size()[1]
# The last input from the target is either padding or the end symbol. Either way, we
# don't have to process it.
num_decoding_steps = (
target_sequence_length - 1
) # --> start will suupply here explicitly
# targets: bs, timesteps
# shape (batch_size, timesteps, embedding_size)
# embeddings = self._target_embedder.forward(targets)
else:
num_decoding_steps = self._max_decoding_steps
# print("program_embedding: ", program_embedding)
# print("targets: ", type(targets))
vals_loss = self.process_batch(
program_embedding, num_decoding_steps, targets, generate_or_ppl=mode
)
step_logits, step_probabilities, step_predictions = (
vals_loss["step_logits"],
vals_loss["step_probabilities"],
vals_loss["step_predictions"],
)
# step_logits is a list containing tensors of shape (batch_size, 1, num_classes)
# This is (batch_size, num_decoding_steps, num_classes)
logits = torch.cat(step_logits, 1)
class_probabilities = torch.cat(step_probabilities, 1)
all_predictions = torch.cat(step_predictions, 1)
output_dict = {
"logits": logits,
"class_probabilities": class_probabilities,
"predictions": all_predictions,
}
if target_tokens:
target_mask = get_text_field_mask(target_tokens)
# loss = self._get_loss(logits, targets, target_mask)
loss = self._get_loss(logits, targets, target_mask, average=None)
output_dict["logprob_ylabel_given_z"] = -loss
average_loss = torch.mean(loss) # loss #.data.cpu().numpy()
# if self.unconditional_lm:
# output_dict["loss"] = average_loss # torch.mean(loss)
if not self.training:
if mode == "generate":
all_generated = []
all_target = []
i2v = self.vocab.get_index_to_token_vocabulary("tokens")
num_decoding_steps = self._max_decoding_steps # ** new
vals_sample = self.process_batch(
program_embedding,
num_decoding_steps,
targets,
generate_or_ppl="generate",
decoding_method=self.decoding_method,
sampling_top_p=self.sampling_top_p,
)
step_predictions = vals_sample["step_predictions"]
# step_predictions: time_steps,batch_size,1
for b in range(batch_size):
# print("batch_size = ", batch_size)
step_predictions_b = [
pred_i[b].data.item() for pred_i in step_predictions
]
step_predictions_b = [p for p in step_predictions_b if p != 0]
if self.use_bow_decoder:
pass
else:
end_idx = (
step_predictions_b.index(self._end_index)
if self._end_index in step_predictions_b
else len(step_predictions_b)
)
if end_idx != -1:
step_predictions_b = step_predictions_b[:end_idx]
predicted_str = " ".join([i2v[pi] for pi in step_predictions_b])
targets_b = [pred_i.data.item() for pred_i in targets[b]]
targets_b = [p for p in targets_b if p != 0]
targets_b = targets_b[1:-1] # ** removing start and end index
target_str = " ".join([i2v[pi] for pi in targets_b])
# logger.debug(f" ************** [generate] target_str = {target_str}")
# logger.debug(f" ************** [generate] predicted_str = {predicted_str}")
print(" ************** [generate] target_str =", target_str)
print(" ************** [generate] predicted_str = ", predicted_str)
all_generated.append(predicted_str)
all_target.append(target_str)
id = metadata[b]["idx"]
self._ngram_overlap_eval(predicted_str, target_str, id)
self._diversity_eval(predicted_str, typ="generated")
self._diversity_eval(target_str, typ="gt")
if self.use_activation_evals:
self._program_activation_evals(
predicted_str,
typ="generated",
program_id=selected_program_id[b],
)
self._program_activation_evals(
target_str, typ="gt", program_id=selected_program_id[b]
)
output_dict.update(
{
"generate_all_generated": all_generated,
"generate_all_target": all_target,
}
)
else:
if mode == "generate":
raise NotImplementedError(
"generate mode not implemented for training mode"
)
return output_dict
def log_ppl(self, avg_loss):
self._perplexity(avg_loss)
def get_metrics(self, reset: bool = False):
ret = {"perplexity": self._perplexity.get_metric(reset=reset)}
if (not self.training) and reset: # and (not self.unconditional_lm):
ret.update(self._ngram_overlap_eval.get_metric(reset))
ret.update(self._diversity_eval.get_metric(reset))
if self.use_activation_evals:
ret.update(self._program_activation_evals.get_metric(reset))
return ret
def _prepare_decode_step_input(
self,
input_indices: torch.LongTensor,
decoder_hidden_state: torch.LongTensor = None,
program_embedding: torch.FloatTensor = None,
) -> torch.Tensor:
embedded_input = self._target_embedder.forward(input_indices)
# if not self.unconditional_lm:
if self._add_prog_emb_to_inp:
# program_emb_extended = program_embedding
program_emb_extended = self._program_to_inp_projection(
program_embedding
)
embedded_input = torch.cat([embedded_input, program_emb_extended], -1)
# print("embedded_input : ", embedded_input.size())
# (batch_size, encoder_output_dim + target_embedding_dim)
return embedded_input
@staticmethod
def _get_loss(
logits: torch.FloatTensor,
targets: torch.LongTensor,
target_mask: torch.LongTensor,
average: str = "batch",
) -> torch.FloatTensor:
"""
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of ``targets`` is expected to be greater than that of ``logits`` because the
decoder does not need to compute the output corresponding to the last timestep of
``targets``. This method aligns the inputs appropriately to compute the loss.
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# relevant_targets = targets.contiguous() # (batch_size, num_decoding_steps)
# relevant_mask = target_mask.contiguous() # (batch_size, num_decoding_steps)
relevant_targets = targets[
:, 1:
].contiguous() # (batch_size, num_decoding_steps)
relevant_mask = target_mask[
:, 1:
].contiguous() # (batch_size, num_decoding_steps)
loss = util.sequence_cross_entropy_with_logits(
logits, relevant_targets, relevant_mask, average=average
)
# print('_get loss : ', loss)
return loss
|
from rich import box
from ...helper import deprecate_by
class PlotMixin:
"""Provide helper functions for :class:`Document` to plot and visualize itself."""
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.summary()
def __rich_console__(self, console, options):
yield f":page_facing_up: [b]Document[/b]: [cyan]{self.id}[cyan]"
from rich.table import Table
my_table = Table('Attribute', 'Value', width=80, box=box.ROUNDED)
for f in self.non_empty_fields:
if f in ('embedding', 'tensor'):
from .rich_embedding import ColorBoxEmbedding
my_table.add_row(f, ColorBoxEmbedding(getattr(self, f)))
elif f not in ('id', 'chunks', 'matches'):
my_table.add_row(f, str(getattr(self, f)))
if my_table.rows:
yield my_table
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from rich import print
print(self._plot_recursion())
def _plot_recursion(self, tree=None):
if tree is None:
from rich.tree import Tree
tree = Tree(self)
else:
tree = tree.add(self)
for a in ('matches', 'chunks'):
if getattr(self, a):
if a == 'chunks':
_icon = ':diamond_with_a_dot:'
else:
_icon = ':large_orange_diamond:'
_match_tree = tree.add(f'{_icon} [b]{a.capitalize()}[/b]')
for d in getattr(self, a):
d._plot_recursion(_match_tree)
return tree
def display(self):
"""Plot image data from :attr:`.tensor` or :attr:`.uri`."""
from IPython.display import Image, display
if self.uri:
if self.mime_type.startswith('audio') or self.uri.startswith('data:audio/'):
uri = _convert_display_uri(self.uri, self.mime_type)
_html5_audio_player(uri)
elif self.mime_type.startswith('video') or self.uri.startswith(
'data:video/'
):
uri = _convert_display_uri(self.uri, self.mime_type)
_html5_video_player(uri)
elif self.uri.startswith('data:image/'):
_html5_image(self.uri)
else:
display(Image(self.uri))
elif self.tensor is not None:
try:
import PIL.Image
p = PIL.Image.fromarray(self.tensor)
if p.mode != 'RGB':
raise
display(p)
except:
import matplotlib.pyplot as plt
plt.matshow(self.tensor)
else:
self.summary()
plot = deprecate_by(display, removed_at='0.5')
def _convert_display_uri(uri, mime_type):
import urllib
from .helper import _to_datauri, _uri_to_blob
scheme = urllib.parse.urlparse(uri).scheme
if scheme not in ['data', 'http', 'https']:
blob = _uri_to_blob(uri)
return _to_datauri(mime_type, blob)
return uri
def _html5_image(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<image src="{uri}" height="200px">
</body>
'''
display(HTML(src)) # noqa
def _html5_video_player(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{uri}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src)) # noqa
def _html5_audio_player(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<audio controls="controls" style="width:320px" >
<source src="{uri}"/>
Your browser does not support the audio element.
</audio>
</body>
'''
display(HTML(src)) # noqa
|
import h5py
from whacc import utils
import matplotlib.pyplot as plt
import numpy as np
H5_list = utils.get_h5s('/Users/phil/Dropbox/Autocurator/testing_data/MP4s/')
H5_FILE = H5_list[0]
with h5py.File(H5_FILE, 'r') as hf:
for k in hf.keys():
print(k)
print(hf['trial_nums_and_frame_nums'][:])
# with h5py.File(H5_FILE, 'r') as hf:
# plt.plot(hf['in_range'][:])
# print(hf['in_range'][:])
with h5py.File(H5_FILE, 'r') as hf:
# for k in hf['trial_nums_and_frame_nums'][1, :]:
cumsum_frames = np.concatenate((np.asarray([0]), np.cumsum(hf['trial_nums_and_frame_nums'][1, :])))
tot_frames = np.sum(hf['trial_nums_and_frame_nums'][1, :])
start_pole = 2000
stop_pole = 3000
b = np.vstack((start_pole+cumsum_frames[:-1], cumsum_frames[1:]-1)).astype('int')
b = np.min(b, axis = 0)
a = np.vstack((stop_pole+cumsum_frames[:-1], cumsum_frames[1:])).astype('int')
a = np.min(a, axis = 0)
keep_mask = np.zeros(tot_frames.astype('int'))
for k1, k2 in zip(b, a):
keep_mask[k1:k2] = 1
plt.plot(keep_mask)
|
# Natural Language Toolkit: Parser Utility Functions
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Utility functions for parsers.
"""
######################################################################
#{ Test Suites
######################################################################
from featurechart import load_earley
class TestGrammar(object):
"""
Unit tests for CFG.
"""
def __init__(self, grammar, suite, accept=None, reject=None):
self.test_grammar = grammar
self.cp = load_earley(grammar, trace=0)
self.suite = suite
self._accept = accept
self._reject = reject
def run(self, show_trees=False):
"""
Sentences in the test suite are divided into two classes:
- grammatical (C{accept}) and
- ungrammatical (C{reject}).
If a sentence should parse accordng to the grammar, the value of
C{trees} will be a non-empty list. If a sentence should be rejected
according to the grammar, then the value of C{trees} will be C{None}.
"""
for test in self.suite:
print test['doc'] + ":",
for key in ['accept', 'reject']:
for sent in test[key]:
tokens = sent.split()
trees = self.cp.parse(tokens)
if show_trees and trees:
print
print sent
for tree in trees:
print tree
if key=='accept':
if trees == []:
raise ValueError, "Sentence '%s' failed to parse'" % sent
else:
accepted = True
else:
if trees:
raise ValueError, "Sentence '%s' received a parse'" % sent
else:
rejected = True
if accepted and rejected:
print "All tests passed!"
|
"""
Labels display text to users
"""
from kivy.uix.label import Label
from kivy.properties import StringProperty, NumericProperty, BooleanProperty, ColorProperty
__all__ = [
'CupertinoLabel'
]
class CupertinoLabel(Label):
"""
iOS style Label
.. image:: ../_static/label/demo.png
"""
text = StringProperty(' ')
"""
A :class:`~kivy.properties.StringProperty` defining the text of :class:`CupertinoLabel`
.. image:: ../_static/label/text.png
**Python**
.. code-block:: python
CupertinoLabel(text='Hello World')
**KV**
.. code-block::
CupertinoLabel:
text: 'Hello World'
"""
font_name = StringProperty('San Francisco')
"""
Font of :class:`CupertinoLabel`. To comply with iOS standard, use `San Francisco` or `New York`
.. image:: ../_static/label/font_name.png
**Python**
.. code-block:: python
CupertinoLabel(font_name='New York')
**KV**
.. code-block::
CupertinoLabel:
font_name: 'New York'
"""
font_size = NumericProperty(15)
"""
Size of the font of :class:`CupertinoLabel`
.. image:: ../_static/label/font_size.png
**Python**
.. code-block:: python
CupertinoLabel(font_size=20)
**KV**
.. code-block::
CupertinoLabel:
font_size: 20
"""
bold = BooleanProperty(False)
"""
If :attr:`text` :class:`CupertinoLabel` is bold
.. image:: ../_static/label/bold.png
**Python**
.. code-block:: python
CupertinoLabel(bold=True)
**KV**
.. code-block::
CupertinoLabel:
bold: True
"""
italic = BooleanProperty(False)
"""
If :attr:`text` of :class:`CupertinoLabel` is italic
.. image:: ../_static/label/italic.png
**Python**
.. code-block:: python
CupertinoLabel(italic=True)
**KV**
.. code-block::
CupertinoLabel:
italic: True
"""
color = ColorProperty([0, 0, 0, 1])
"""
Color of :attr:`text` :class:`CupertinoLabel`
.. image:: ../_static/label/color.png
**Python**
.. code-block:: python
CupertinoLabel(color=(1, 0, 0, 1))
**KV**
.. code-block::
CupertinoLabel:
color: 1, 0, 0, 1
"""
|
# Assemble release folder
import os
import sys
import shutil
import glob
import pathlib
target = "ogm_release"
binext = ""
libext = ".so"
_from = "."
if len(sys.argv) >= 2:
_from = sys.argv[1]
if os.name == 'nt':
binext = ".exe"
libext = ".dll"
dlibext = "d" + libext
if os.path.exists(target):
print ("removing existing " + target)
shutil.rmtree(target)
os.mkdir(target)
def copytree(src, dst):
print("copytree " + src + " -> " + dst)
return shutil.copytree(src, dst)
def copy(src, dst):
print("copy " + src + " -> " + dst)
return shutil.copy(src, dst)
# etc/
os.mkdir(target + "/etc")
endings = ["*.png", "*.gif", "*.ico"]
for ending in endings:
for file in glob.glob('etc/' + ending):
copy(file, target + "/etc")
# demo/
copytree("demo", target + "/demo")
# binaries
copy(os.path.join(_from, "ogm" + binext), target)
os.chmod(target + "/ogm" + binext, 777)
copy(os.path.join(_from, "ogm-test" + binext), target)
os.chmod(target + "/ogm-test" + binext, 777)
for file in pathlib.Path(_from).rglob('*' + libext):
file = str(file)
if os.path.dirname(file) == target:
continue
print("found " + file)
copy(file, target)
# licenses
copy("LICENSE", target + "/LICENSE_opengml")
copy("external/xbr/xbrjs.license", target + "/LICENSE_xbrjs")
copy("external/pugixml/LICENCE.md", target + "/LICENSE_pugixml")
copy("external/include/nlohmann/LICENCE.MIT", target + "/LICENSE_nlohmann")
copy("external/include/rectpack2D/LICENSE.md", target + "/LICENSE_rectpack2d")
copy("external/include/simpleini/LICENCE.txt", target + "/LICENSE_simpleini")
copy("external/include/ThreadPool_zlib_license.txt", target + "/LICENCE_ThreadPool")
copy("external/include/rapidcsv.license", target + "/LICENSE_rapidcsv")
copy("external/include/base64.license", target + "/LICENSE_base64")
copy("external/soloud/LICENSE", target + "/LICENSE_soloud")
|
'''
Renames cTAKES XMI output files (named like doc%d.xmi) to use the original document name.
E.g. Plaintext abc.txt -> cTAKES doc0.xmi -> renamed abc.xmi
'''
import os
from ctakes.format import XMI
if __name__ == '__main__':
def _cli():
import optparse
parser = optparse.OptionParser(usage='Usage: %prog XMIDIR')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit()
(xmidir,) = args
return xmidir
xmidir = _cli()
print("Renaming files in %s..." % xmidir)
for f in os.listdir(xmidir):
if os.path.splitext(f)[1] == '.xmi':
path = os.path.join(xmidir, f)
docID = XMI.getDocumentID(path)
new_path = os.path.join(xmidir, '%s.xmi' % os.path.splitext(docID)[0])
if not os.path.isfile(new_path):
os.rename(path, new_path)
print(" >> Renamed %s to %s" % (path, new_path))
elif path != new_path:
print("[NAME COLLISION] File %s already exists (skipping %s)" % (new_path, path))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.