content stringlengths 5 1.05M |
|---|
from enum import Enum
# 抽象类型
class AbstractType(object):
def __init__(self, name):
self.name = name
self.original_typedef = None
def complete_name(self):
pass
def accept_printer(self, printer):
pass
def accept_visitor(self, visitor):
raise Exception('This type should never be directly visited')
def __str__(self):
return self.complete_name()
# 得到全名
def cat_ns(namespace, name):
return '::'.join([namespace, name]) if namespace else name
# 原始类型
class PrimitiveType(AbstractType):
def __init__(self, name):
super(PrimitiveType, self).__init__(name)
def complete_name(self):
return self.name
def accept_printer(self, printer):
return printer.visit_primitive_type(self)
def accept_visitor(self, visitor):
return visitor.visit_primitive_type(self)
# 递归类型
class RecursiveType(AbstractType):
def __init__(self, name, namespace):
super(RecursiveType, self).__init__(name)
self.namespace = namespace
def complete_name(self):
return cat_ns(self.namespace, self.name)
def accept_printer(self, printer):
return printer.visit_recursive_type(self)
def accept_visitor(self, visitor):
return visitor.visit_recursive_type(self)
# 枚举成员类型
class EnumConstant:
def __init__(self, name, value):
self.name = name
self.value = value
def accept_printer(self, printer):
return printer.visit_enum_constant(self)
# location类型
class Location:
def __init__(self, filename: str, line: str, column: str):
self.filename = filename
self.line = line
self.column = column
# 枚举类型
class DefinedEnum(AbstractType):
def __init__(self, name, namespace, constants, defined_in_header, location):
super(DefinedEnum, self).__init__(name)
self.namespace = namespace
self.header = defined_in_header
self.constants = constants
self.location = location
def complete_name(self):
return cat_ns(self.namespace, self.name)
def accept_printer(self, printer):
return printer.visit_abstract_defined_enum(self)
def accept_visitor(self, visitor):
return visitor.visit_enum(self)
# 引用类型
class RefType(Enum):
LVALUE = 1
RVALUE = 2
POINTER = 3
# 访问属性
class AccessSpecifier(Enum):
PRIVATE = 1
PROTECTED = 2
PUBLIC = 3
# Api(类中的method)的特性类
class ApiTraits(object):
def __init__(self, is_const, is_virtual, is_abstract, is_static, annotation=None):
self.is_const = is_const
self.is_virtual = is_virtual
self.is_abstract = is_abstract
self.is_static = is_static
self.annotation = annotation
# Api(类中的方法类)类
class Api(object):
def __init__(self, name, access_specifier, returns, param_types, traits, location):
self.name = name
self.access_specifier = access_specifier
self.returns = returns
self.param_types = param_types
self.traits = traits
self.location = location
def accept_printer(self, printer):
return printer.visit_abstract_api(self)
def has_codegen_tag(self):
return self.traits.annotation == 'generate_binds'
# Field类
class Field(object):
def __init__(self, name, access_specifier, type_info, traits, init_value, location):
self.name = name
self.access_specifier = access_specifier
self.type_info = type_info
self.traits = traits
self.init_value = init_value
self.location = location
def accept_printer(self, printer):
return printer.visit_abstract_field(self)
# 函数参数类
class Param(object):
def __init__(self, name, type_info, traits, location):
self.name = name
self.type_info = type_info
self.traits = traits
self.location = location
def accept_printer(self, printer):
return printer.visit_abstract_param(self)
# 函数返回值类
class ApiReturns(object):
def __init__(self, type_info, traits):
self.type_info = type_info
self.traits = traits
def accept_printer(self, printer):
return printer.visit_api_return_type(self)
# Trait类
class TypeTraits(object):
def __init__(self, is_const, ref_type):
self.is_const = is_const
self.ref_type = ref_type
# 类原型类
class DeclaredClass(AbstractType):
def __init__(self, name, namespace, template_args, header):
super(DeclaredClass, self).__init__(name)
self.namespace = namespace
self.header = header
self.template_args = template_args
def complete_name(self):
full_name = cat_ns(self.namespace, self.name)
if self.template_args:
arg_name = self.template_args[0].complete_name()
return full_name + '<' + (arg_name if arg_name else '') + '>'
else:
return full_name
def accept_printer(self, printer):
return printer.visit_abstract_declared_class(self)
def accept_visitor(self, visitor):
return visitor.visit_declared_class(self)
# 类类型
class DefinedClass(DeclaredClass):
def __init__(self, name, namespace, template_args, members, methods, bases, defined_in_header, location, annotation=None):
super(DefinedClass, self).__init__(name, namespace, template_args, defined_in_header)
self.members = members
self.methods = methods
self.bases = bases
self.location = location
self.dependent_headers = None
self.annotation = annotation
def complete_name(self):
return super(DefinedClass, self).complete_name()
def accept_printer(self, printer):
return printer.visit_abstract_defined_class(self)
def accept_visitor(self, visitor):
return visitor.visit_defined_class(self)
|
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# Time complexity: O(n)
# Space complexity: O(n)
# Recursive approach
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
def reverse(prev_node, curr_node):
if curr_node == None:
return prev_node
head = reverse(curr_node, curr_node.next)
curr_node.next = prev_node
return head
return reverse(None, head)
# Time complexity: O(n)
# Space complexity: O(1)
# Iterative approach
def reverseList2(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev, curr = None, head
while curr:
next_node = curr.next
curr.next = prev
prev, curr = curr, next_node
return prev |
############################################################################
#
# AVI CONFIDENTIAL
# __________________
#
# [2013] - [2018] Avi Networks Incorporated
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of Avi Networks Incorporated and its suppliers, if any. The intellectual
# and technical concepts contained herein are proprietary to Avi Networks
# Incorporated, and its suppliers and are covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law, and other laws. Dissemination of this information or
# reproduction of this material is strictly forbidden unless prior written
# permission is obtained from Avi Networks Incorporated.
###
'''This is a custom script for avi test drive arm template.
This is to configure the cloud on controller.
This script is avaialble on test-drive controller at location /var/lib/waagent/custom-script/download/0/
'''
import requests
import json
import time
import urllib3
import logging
log = logging.getLogger('azure test drive custom script')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_resource_group():
res = None
headers = {'Metadata': True}
uri = 'http://169.254.169.254/metadata/instance/compute/resourceGroupName?api-version=2017-08-01&format=text'
res = requests.get(uri, headers=headers)
return res.content
def get_controller_ip():
headers = {'Metadata': True}
uri = "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-04-02&format=text"
res = requests.get(uri, headers=headers)
if res.status_code != 200:
raise res.status_code, Exception(str(res.text))
else:
return res.content
def get_default_password():
default_password = ''
default_password_file = '/opt/avi/bootstrap/default_password'
try:
for line in open(default_password_file, "r"):
default_password = str(line[0:-1])
return default_password
except Exception as e:
log.info(str(e))
return None
def get_headers(controller_ip):
headers = ''
count = 10
for retry in range(count):
try:
uri = 'http://%s/api/initial-data' % controller_ip
res = requests.get(uri, verify=False)
if res.status_code != 200:
raise RuntimeError("Initial data API status not 200")
res = json.loads(res.content)
avi_version = res['version']['Version']
headers = {'Content-Type': 'application/json', 'X-Avi-Version': avi_version, 'X-Avi-Tenant': 'admin'}
return headers
except Exception as e:
if retry == (count - 1):
raise RuntimeError(str(e))
else:
log.info('Retrying number %s' % retry)
time.sleep(30)
return headers
def print_user_data():
try:
for line in open('/var/lib/cloud/instance/user-data.txt','r'):
log.info(line)
except Exception as e:
log.info(str(e))
pass
def get_cloud_data(controller_ip, headers, default_password):
res = ''
data = {'name': 'Default-Cloud'}
uri = 'https://%s/api/cloud?name=Default-Cloud' % controller_ip
try:
res = requests.get(uri, auth=('admin', default_password), headers=headers, verify=False)
if res.status_code != 200:
raise Exception(str(res.text))
_data = res.json()
_data = _data['results'][0]
return 200, _data
except Exception as e:
return res.status_code, e
try:
lvl = logging.INFO
log.setLevel(lvl)
ch = logging.StreamHandler()
ch.setLevel(lvl)
formatter = logging.Formatter(
'%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
print_user_data()
resource_group = get_resource_group()
controller_ip = get_controller_ip()
default_password = get_default_password()
if not default_password:
exit(0)
time.sleep(60)
headers = get_headers(controller_ip)
status_code, _data = get_cloud_data(controller_ip, headers, default_password)
if status_code != 200:
log.info(_data)
exit()
_uuid = _data['uuid']
_data['azure_configuration']['resource_group'] = resource_group
uri = 'https://%s/api/cloud/%s'%(controller_ip, _uuid)
vnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s"%("6ac6cebc-1d7d-4b3c-8a6f-74093246ae02", resource_group, "servers-vnet")
_data['azure_configuration']['network_info'][0]["virtual_network_id"] = vnet_id
log.info('Put api to uri %s'%(uri))
res = requests.put(uri, auth=('admin', default_password), headers=headers, data=json.dumps(_data), verify=False)
except Exception as e:
log.info(str(e))
|
import re
from flask import Flask, render_template, redirect, abort
from flask.ext.bootstrap import Bootstrap
from flask.ext.wtf import Form, TextField, ValidationError
app = Flask(__name__)
app.config['BOOTSTRAP_USE_MINIFIED'] = True
app.config['BOOTSTRAP_USE_CDN'] = True
app.config['BOOTSTRAP_FONTAWESOME'] = True
app.config['SECRET_KEY'] = 'devkey'
app.config.from_object(__name__)
app.config.from_envvar('FLASKAPP_SETTINGS', silent=True)
Bootstrap(app)
def valid_url(string):
return re.match('http(s){0,1}://www.evernote.com/shard/.+',
string, re.I) is not None
class URLForm(Form):
url = TextField('Evernote share URL',
description='Please enter Evernote share URL.')
def validate_url(form, field):
if not valid_url(field.data):
raise ValidationError('This does not appear to be ' /
'a valid Evernote Share URL')
@app.route('/', methods=['GET', 'POST'])
def index():
form = URLForm()
if form.validate_on_submit():
url = form.url.data
return redirect("/note/{}".format(url))
return render_template('index.html',
form=form,
)
@app.route('/note/<path:noteurl>/')
def view_note(noteurl):
print noteurl
if not valid_url(noteurl):
abort(404)
return render_template('view.html', url=noteurl)
if '__main__' == __name__:
app.run(debug=True, host='0.0.0.0', port=80)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 09:37:42 2019
@author: Rashid Haffadi
"""
import numpy as np
from functools import partial
# Activations
acts = ["relu", "sigmoid", "softmax", "tanh", "leaky_relu", "selu", "elu"]
def get_act(index, *args, **kwargs):
if index==0:
return relu
elif index==1:
return sigmoid
elif index==2:
return softmax
elif index==3:
return tanh
elif index==4:
if kwargs['leak'] and kwargs['leak'] is not None:
return partial(leaky_relu, leak=kwargs['leak'])
else: return leaky_relu
elif index==5:
f = selu
if kwargs['alpha'] and kwargs['alpha'] is not None:
f = partial(f, alpha=kwargs['alpha'])
if kwargs['lamda'] and kwargs['lamda'] is not None:
f = partial(f, lamda=kwargs['lamda'])
return f
elif index==6:
if kwargs['alpha'] and kwargs['alpha'] is not None:
return partial(elu, alpha=kwargs['alpha'])
else: return elu
else: raise ActivationFunctionNotFound
class Activation():
# Initialize The Activation With Activation Name
# Note: if we choose leaky_relu we must specify leak=?
def __init__(self, act='relu', *args, **kwargs):
if isinstance(act, str):
index = acts.index(act)
self.activation = get_act(index)
else:
self.activation = act
def __call__(self, h, *args, **kwargs):
if len(h.shape) != 1:
h = h.squeeze()
return self.activation(h, args, kwargs)
def relu(h):
if len(h.shape) != 1:
h = h.squeeze()
return h * (h > 0)
def sigmoid(h):
if len(h.shape) != 1:
h = h.squeeze()
return 1/(1+np.exp(-h))
def softmax(h):
if len(h.shape) != 1:
h = h.squeeze()
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(h - np.max(h))
return e_x / e_x.sum()
def tanh(h):
if len(h.shape) != 1:
h = h.squeeze()
return (np.exp(h) - np.exp(-h))/(np.exp(h) + np.exp(-h))
def leaky_relu(h, leak:float=0.2):
if len(h.shape) != 1:
h = h.squeeze()
return h * (h > 0) + leak * h * (h <= 0)
def elu(h, alpha=0.2):
if len(h.shape) != 1:
h = h.squeeze()
return (h > 0)*h + (h <= 0)*alpha*(np.exp(h) - 1)
def selu(h, alpha=1.6732, lamda=1.0507):
if len(h.shape) != 1:
h = h.squeeze()
return lamda*((h > 0)*h + (h <= 0)*alpha*(np.exp(h) - 1))
# =============================================================================
# We have to use Linear with each activation
# Activation(linear)
# =============================================================================
def linear(x, w, b):
return x @ w + b
# =============================================================================
# relu_ = Activation('relu')
# relu_(linear(x, w, b))
# =============================================================================
# =============================================================================
# ##########################"Exceptions########################################
# =============================================================================
class ActivationFunctionNotFound(Exception):
pass |
"""Authentication middleware"""
from django.shortcuts import redirect
from django.utils.http import urlquote
from social_core.exceptions import SocialAuthBaseException
from social_django.middleware import SocialAuthExceptionMiddleware
class SocialAuthExceptionRedirectMiddleware(SocialAuthExceptionMiddleware):
"""
This middleware subclasses SocialAuthExceptionMiddleware and overrides
process_exception to provide an implementation that does not use
django.contrib.messages and instead only issues a redirect
"""
def process_exception(self, request, exception):
"""
Note: this is a subset of the SocialAuthExceptionMiddleware implementation
"""
strategy = getattr(request, "social_strategy", None)
if strategy is None or self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend = getattr(request, "backend", None)
backend_name = getattr(backend, "name", "unknown-backend")
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
if url:
url += ("?" in url and "&" or "?") + "message={0}&backend={1}".format(
urlquote(message), backend_name
)
return redirect(url)
|
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "proxlib"
__summary__ = "Prox-operator library for optimization"
__uri__ = "https://github.com/UW-AMO/proxlib"
__version__ = "0.0.1"
__author__ = "Peng Zheng"
__email__ = "zhengp@uw.eud"
__license__ = "BSD 2-Clause License"
__copyright__ = f"Copyright 2021 {__author__}"
|
def name_shuffler(name: str) -> str:
return ' '.join(reversed(name.split())) |
class Solution:
def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:
def compute_depth(u):
if depths[u] == -1:
depths[u] = 0
for v in graph[u]:
depths[u] = max(depths[u], 1 + compute_depth(v))
return depths[u]
graph = [[] for _ in range(n)]
indegrees = [0] * n
for u, v in dependencies:
u -= 1
v -= 1
graph[u].append(v)
indegrees[v] += 1
depths = [-1] * n
for u in range(n):
depths[u] = compute_depth(u)
frees = [(-depths[u], u) for u in range(n) if not indegrees[u]]
heapq.heapify(frees)
ans = 0
while frees:
ans += 1
ntakes = min(len(frees), k)
nxts = []
for _ in range(ntakes):
_, u = heapq.heappop(frees)
for v in graph[u]:
indegrees[v] -= 1
if indegrees[v] == 0:
nxts.append((-depths[v], v))
for item in nxts:
heapq.heappush(frees, item)
return ans
|
from matplotlib import pyplot as plt
import tensorflow_addons as tfa
import tensorflow as tf
import numpy as np
import unittest
import os
from dataset_creation.image_transformations.data_augmentation import get_random_params, tf_augment_image
def get_image_directory():
return os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'unittest_data', 'example_colors'))
class TestDataAugmentation(unittest.TestCase):
def test_get_random_params_nochange(self):
rotation, \
width_shift, height_shift, \
brightness, \
horizontal_flip, vertical_flip = get_random_params(seed=None,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=0.,
horizontal_flip=False,
vertical_flip=False)
# no paramter should be set to something other than 0 or False
self.assertEqual(rotation, 0)
self.assertEqual(width_shift, 0)
self.assertEqual(height_shift, 0)
self.assertEqual(brightness, 0)
self.assertEqual(horizontal_flip, False)
self.assertEqual(vertical_flip, False)
def test_get_random_params(self):
rotation, \
width_shift, height_shift, \
brightness, \
horizontal_flip, vertical_flip = get_random_params(seed=None,
rotation_range=90.,
width_shift_range=10.,
height_shift_range=20.,
brightness_range=0.9,
horizontal_flip=True,
vertical_flip=False)
self.assertLess(rotation, 90)
self.assertGreater(rotation, 0)
self.assertLess(width_shift, 10)
self.assertLess(height_shift, 20)
self.assertLess(brightness, 0.9)
self.assertGreater(brightness, -0.9)
def test_augment_image(self):
directory = get_image_directory()
image = tf.io.read_file(os.path.join(directory, "img_red_0.png"))
image = tf.image.decode_image(image, 3)
image = tf.image.resize(image, (2048, 2048))
image = tf.cast(image, dtype=tf.dtypes.float32)
image = tf.expand_dims(image, 0)
image = tf.concat((image, image, image, image), 0)
image_aug = tf_augment_image(image, random_augmentation=False,
intensity=0.1,
rotation=45.,
width_shift=100.,
height_shift=500.,
brightness=0.3,
horizontal_flip=False,
vertical_flip=False,
fill_mode='constant',
cval=255,
)
# visually check: image should be rotated by 45° to the left, shifted down (upper part is white),
# shifted a bit to the right and it should be of lighter color than before
plt.subplot(1, 2, 1)
plt.imshow(image[0]/255)
plt.subplot(1, 2, 2)
plt.imshow(image_aug[0]/255)
plt.show()
|
import argparse
from io import open
import random
import time
import os.path as osp
from pathlib import Path
import numpy as np
from ruamel.yaml import YAML
from easydict import EasyDict as edict
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataset import TongueMocapDataset
from models import TongueFormer, save_checkpoint
from losses import L2L1Loss, HuberLoss, ShrinkageLoss
from logger.model_logger import ModelLogger
random.seed(78373)
torch.manual_seed(78373)
np.random.seed(78373)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--local_config_path', type=str,
help='Path to the local machine configuration')
parser.add_argument('-c', '--config_path', type=str,
help='Path to the experiment configuration')
parser.add_argument('-gid', '--gpu_id', type=int, default=0,
help='GPU to be used for training')
return parser.parse_args()
def print_training_header(training):
print('\nTraining Model')
print('=' * 70)
print(f'Num Epochs: {training.num_epochs}\nbatch size: {training.batch_sz}')
print(f'Loss: {training.loss.label} Params: {training.loss.params}')
print('=' * 70)
print()
def get_criterion(loss):
"""Creates a criterion from a label for MSE, Huber, and L1, otherwise None
Args:
loss (dict): dictionary with the loss label and params to construct the criterion
Returns:
nn.criterion: pytorch loss calculator
"""
if loss.label == 'mse':
return nn.MSELoss()
elif loss.label == 'huber':
return HuberLoss(delta=loss.params.delta)
elif loss.label == 'smooth_l1':
return nn.SmoothL1Loss(beta=loss.params.beta)
elif loss.label == 'l1':
return nn.L1Loss()
elif loss.label == 'cross_entropy':
return nn.CrossEntropyLoss()
elif loss.label == 'l2l1':
return L2L1Loss(betas=loss.params.betas)
elif loss.label == 'shrinkage':
return ShrinkageLoss(speed=loss.params.speed, loc=loss.params.loc)
return None
def get_optimizer(model, optim):
"""Creates an optimizer from a label, if not in the list returns None
Args:
optim (dict): dictionary with the params to construct the optimizer
Returns:
nn.optim: pytorch optimizer
"""
if optim.label == 'adam':
return torch.optim.Adam(model.parameters(), lr=optim.params.lr, weight_decay=optim.params.weight_decay)
if optim.label == 'adamw':
return torch.optim.AdamW(model.parameters(), lr=optim.params.lr, weight_decay=optim.params.weight_decay)
return None
def get_scheduler(scheduler, optimizer):
"""Creates an optimizer from a label, if not in the list returns None
Args:
optim (dict): dictionary with the params to construct the optimizer
Returns:
nn.optim: pytorch optimizer
"""
if scheduler.label == 'exp_lr':
return torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=scheduler.gamma)
return None
def train(model, model_optimizer, criterion, scheduler, dataloaders, save_dir,
model_logger, log_start_step,
batch_size, n_epochs, device, optim,
output_idx, output_full=False,
start_epoch=1, early_stop=10,
multi_gpu=False,
print_every=200):
best_score = edict(value=1e9, epoch=0)
iter_times_list = list()
global_step = log_start_step
last_val_loss = 1e9
for epoch in range(start_epoch, start_epoch+n_epochs):
print(f'Epoch {epoch}/{start_epoch+n_epochs-1}')
print('-' * 70)
###------ Train ------###
print('Training phase')
phase = 'train'
model.train(True)
epoch_start_time = time.time()
iter_start_time = time.time()
dataloader_iter = iter(dataloaders['train'])
running_loss = 0.0
batch_idx = 0
train_loss = 0.0
for source_tensor, target_pos_tensor, _ in dataloader_iter:
# skip the batch that is not complete
if source_tensor.shape[0] != batch_size:
continue
global_step += 1
batch_idx += 1
# Pass to device
source_tensor = source_tensor.to(device).float()
output_tensor = target_pos_tensor if output_full else target_pos_tensor[:, output_idx, :]
pos_pred = model(source_tensor)
loss = criterion(pos_pred, output_tensor.to(device).float())
if batch_idx % print_every == 0:
model_logger.train.add_scalar('loss/iter', loss, global_step)
# We can perfectly fit a batch in a single pass
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if batch_idx % print_every == 0:
iter_time = time.time() - iter_start_time
iter_times_list.append(iter_time)
iter_start_time = time.time()
print(f'[train] Epoch {epoch} Iter Time:{iter_time:.3f} Step:{batch_idx}/{len(dataloaders["train"]):<8} l: {running_loss/batch_idx:<10}')
train_loss = running_loss/len(dataloaders[phase])
epoch_time_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - epoch_start_time))
print(f'Training totals: loss: {train_loss} time: {epoch_time_str}')
print()
###------ Validate every 10 epochs ------###
if epoch % 10 == 0:
print('Validation phase')
phase = 'valid'
model.eval()
with torch.no_grad():
running_loss = 0.0
epoch_start_time = time.time()
iter_start_time = time.time()
dataloader_iter = iter(dataloaders['valid'])
running_loss = 0.0
batch_idx = 0
val_loss = 0.0
for source_tensor, target_pos_tensor, _ in dataloader_iter:
# skip the batch that is not complete
if source_tensor.shape[0] != batch_size:
continue
batch_idx += 1
# Pass to device
source_tensor = source_tensor.to(device).float()
output_tensor = target_pos_tensor if output_full else target_pos_tensor[:, output_idx, :]
pos_pred = model(source_tensor)
loss = criterion(pos_pred, output_tensor.to(device).float())
running_loss += loss.item()
if batch_idx % print_every == 0:
iter_time = time.time() - iter_start_time
iter_times_list.append(iter_time)
iter_start_time = time.time()
print(f'[valid] Epoch {epoch} Iter Time:{iter_time:.3f} Step:{batch_idx}/{len(dataloaders["valid"]):<8} l: {running_loss/batch_idx:<10}')
val_loss = running_loss/len(dataloaders['valid'])
epoch_time_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - epoch_start_time))
print(f'Validation totals: loss: {val_loss} time: {epoch_time_str}')
print()
model_logger.val.add_scalar('loss/iter', val_loss, global_step)
# -- Early Stop ---
if val_loss < best_score.value:
best_score.value = val_loss
best_score.epoch = epoch
if (epoch - best_score.epoch) >= early_stop:
print(f'Early stop at epoch {epoch}, previous best: {best_score.value} @ {best_score.epoch}')
break
last_val_loss = val_loss
# TODO: HACK! add as a config parameter
if (last_val_loss < 2.0) or (epoch % 10 == 0):
save_checkpoint(epoch=epoch,
model=model,
model_params=model.module.params if multi_gpu else model.params,
optimizer=model_optimizer,
optimizer_params=dict(optim.params),
loss=train_loss,
global_step=global_step,
save_path=osp.join(save_dir, f'{epoch:02d}.pt'))
if scheduler is not None:
scheduler.step()
total_train_time_str = time.strftime("%H:%M:%S", time.gmtime(sum(iter_times_list)))
print(f'Total training time: {total_train_time_str}')
return train_loss, val_loss
def main(local_config, config):
# Training params
start_epoch = 1
# Data loading params
train_dataset_path = osp.join(local_config.datasets_dir, config.data.train.path)
valid_dataset_path = osp.join(local_config.datasets_dir, config.data.valid.path)
# Training save dir
model_save_dir = osp.join(local_config.models_dir, config.model.save_dir)
Path(model_save_dir).mkdir(parents=True, exist_ok=True)
# Log
log_dir = osp.join(local_config.logs_dir, config.log.save_dir)
Path(log_dir).mkdir(parents=True, exist_ok=True)
log_start_step = 0
model_logger = ModelLogger(log_dir, config.name)
# Create device under which the model will be trained
device_str = 'cpu'
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
target_gid = int(args.gpu_id)
if target_gid >= 0 and target_gid < num_gpus:
device_str = f'cuda:{target_gid}'
device = torch.device(device_str)
print(f'Training on device: {device}')
# Create dataset
print('Loading Training data')
train_dataset = TongueMocapDataset(train_dataset_path,
num_files=config.data.train.num_files,
win_sz=config.data.train.win_sz,
stride=config.data.train.win_stride,
pose_only=config.data.train.pose_only)
print(f'Training samples: {len(train_dataset)}')
print('Loading Validation data')
valid_dataset = TongueMocapDataset(valid_dataset_path,
num_files=config.data.valid.num_files,
win_sz=config.data.valid.win_sz,
stride=config.data.valid.win_stride,
pose_only=config.data.valid.pose_only)
print(f'Validation samples: {len(valid_dataset)}')
# Create dataloaders
train_dataloader = DataLoader(train_dataset,
batch_size=config.training.batch_sz,
shuffle=True,
num_workers=config.data.train.num_workers,
pin_memory=True)
valid_dataloader = DataLoader(valid_dataset,
batch_size=config.training.batch_sz,
shuffle=False,
num_workers=config.data.valid.num_workers,
pin_memory=True)
dataloaders = dict(train=train_dataloader,
valid=valid_dataloader)
# Build if no checkpoint is given
if 'checkpoint' not in config:
print('Building new model')
model = TongueFormer(**config.model.params)
model.to(device)
print('Building new optimizer')
model_optimizer = get_optimizer(model, config.optim)
else:
print('Loading model checkpoint')
checkpoint_path = osp.join(local_config.models_dir, config.checkpoint.path)
checkpoint = torch.load(checkpoint_path)
model = TongueFormer(**config.model.params)
trained_dict = checkpoint['model_state_dict']
model_dict = model.state_dict()
diff = set(model_dict.keys()) - set(trained_dict.keys())
froce_reset_optim = False
if not diff:
model.load_state_dict(trained_dict)
else:
if 'deeper_fc' in config.model.params:
if config.model.params['deeper_fc']:
froce_reset_optim = True
# let's remove head.1 since it's size is now
# different and not matching the checkpoint
for i in range(2):
del trained_dict['head.{}.weight'.format(i)]
del trained_dict['head.{}.bias'.format(i)]
model_dict.update(trained_dict)
model.load_state_dict(model_dict)
model.to(device)
reset = False
if 'reset' in config.checkpoint:
reset = config.checkpoint.reset
if not (reset or froce_reset_optim):
print('Loading optimizer checkpoint')
model_optimizer = optim.Adam(model.parameters(), lr=checkpoint['optimizer_params']['lr'])
model_optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = int(Path(checkpoint_path).stem) + 1
log_start_step = checkpoint['global_step']
else:
print('RESET is TRUE - Building new optimizer')
model_optimizer = get_optimizer(model, config.optim)
# Criterion
criterion = get_criterion(config.training.loss)
criterion.to(device)
# Scheduler
scheduler = get_scheduler(config.scheduler, model_optimizer) if 'scheduler' in config else None
print_training_header(config.training)
if start_epoch > 1:
print(f'Resuming training from epoch {start_epoch}')
print(f'Checkpoints save dir: {model_save_dir}')
print(f'Log save dir: {log_dir}')
print()
train(model=model,
model_optimizer=model_optimizer,
criterion=criterion,
scheduler=scheduler,
dataloaders=dataloaders,
save_dir=model_save_dir,
model_logger=model_logger,
log_start_step=log_start_step,
batch_size=config.training.batch_sz,
n_epochs=config.training.num_epochs,
device=device,
optim=config.optim,
output_idx=config.training.output_idx,
output_full=config.training.output_full, # TODO: add to all the tongueformer training config files
start_epoch=start_epoch,
early_stop=config.training.early_stop,
multi_gpu=config.model.multi_gpu,
print_every=200)
if __name__ == '__main__':
# Parse input arguments
args = parse_args()
# Load configurations
yaml = YAML(typ='safe')
# -- machine configuration
local_config = edict(yaml.load(open(args.local_config_path)))
# -- training configuration
config = edict(yaml.load(open(args.config_path)))
# TODO: fix None load
config.model.params.qk_scale = None
config.model.params.norm_layer = None
main(local_config, config) |
# -*- coding: utf-8 -*-
__all__ = [
'extend_docs'
]
try:
from inspect import cleandoc
from pyment.docstring import DocString
def extend_docs(orig_func, translate=False):
"""Decorator to extend the docstring with the docstring of the given function.
:param orig_func: function to get docstring from
"""
def wrapped(func):
"""
Cleans doc from both functions and concatenates using 2 newlines in between.
:param func: function whose docstring will be extended
"""
orig_doc = orig_func.__doc__ or ""
if translate:
orig_doc = _parse_docstring(orig_doc)
func.__doc__ = cleandoc(func.__doc__) + '\n\n' + cleandoc(orig_doc)
return func
return wrapped
def _parse_docstring(docstring):
docstring = DocString('', docs_raw=docstring, output_style='reST')
docstring.parse_docs()
return docstring.get_raw_docs().replace("'''", "")
except:
def extend_docs(orig_func, translate=False):
"""Decorator to extend the docstring with the docstring of the given function.
:param orig_func: function to get docstring from
"""
def wrapped(func):
"""
Cleans doc from both functions and concatenates using 2 newlines in between.
:param func: function whose docstring will be extended
"""
orig_doc = orig_func.__doc__ or ""
if translate:
print("Install pyment to translate the docs")
func.__doc__ = cleandoc(func.__doc__) + '\n\n' + cleandoc(orig_doc)
return func
return wrapped
|
import click
from .zotero import zotero
from .markdown import markdown
@click.group()
def sync():
"""
Collection of Sync commands for various services
"""
pass
sync.add_command(zotero)
sync.add_command(markdown)
|
# Generated by Django 3.2.7 on 2021-09-26 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicesApp', '0002_alter_service_service_title'),
]
operations = [
migrations.AddField(
model_name='service',
name='description',
field=models.TextField(blank=True),
),
]
|
# MODULE: Typerig / Proxy / Font (Objects)
# -----------------------------------------------------------
# (C) Vassil Kateliev, 2017-2020 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#------------------------------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -------------------------
from __future__ import print_function
import json
import json.scanner
import FL as legacy
import fontlab as fl6
import fontgate as fgt
import PythonQt as pqt
from typerig.core.objects.collection import treeDict
from typerig.core.objects.collection import extBiDict
from typerig.proxy.fl.objects.glyph import pGlyph, eGlyph
# - Init ---------------------------------
__version__ = '0.28.2'
# - Classes -------------------------------
class pFontMetrics(object):
'''
An Abstract Font Metrics getter/setter of a flPackage.
Constructor:
pFontMetrics() - default represents the current glyph and current font
pFontMetrics(flPackage)
'''
def __init__(self, font):
self.fl = font
# - Getters
def getAscender (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.ascender_value
def getCapsHeight (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.capsHeight_value
def getDescender (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.descender_value
def getLineGap (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.lineGap_value
def getUpm (self, layer=None):
return self.fl.upm
def getXHeight (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.xHeight_value
def getItalicAngle (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.italicAngle_value
def getCaretOffset (self, layer=None):
if layer is not None:
self.fl.setMaster(layer)
return self.fl.caretOffset_value
'''
cornerTension_value
curveTension_value
inktrapLen_value
measurement_value
underlinePosition_value
underlineThickness_value
'''
# - Setters
def setAscender (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.ascender_value = value
def setCapsHeight (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.capsHeight_value = value
def setDescender (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.descender_value = value
def setLineGap (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.lineGap_value = value
def setUpm(self, value, scale=False):
self.fl.setUpm(value, scale)
def setXHeight (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.xHeight_value = value
def setItalicAngle (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.italicAngle_value = value
def setCaretOffset (self, value, layer=None):
if layer is not None:
self.fl.setMaster(layer)
self.fl.caretOffset_value = value
# - Export & Import
def asDict(self, layer=None):
# - Genius!!!!
getterFunctions = [func for func in dir(self) if callable(getattr(self, func)) and not func.startswith("__") and 'get' in func]
return {getter.replace('get',''):getattr(self, getter)(layer) for getter in getterFunctions}
def fromDict(self, metricDict, layer=None):
for func, value in metricDict.iteritems():
eval("self.set{}({}, '{}')".format(func, value, layer))
class pMaster(treeDict):
def __init__(self, *args, **kwargs):
super(pMaster, self).__init__(*args)
self.name = kwargs.get('name', None)
def __repr__(self):
return '<{} name={}; axes={}>'.format(self.__class__.__name__, self.name, '; '.join(self.keys()).replace('name',''))
class pMasters(object):
# -- Aliasing some master related commands in common group
def __init__(self, parent):
self.parent = parent
self.add = self.parent.fl.addMaster
self.clear = self.parent.fl.clearMasters
self.container = self.parent.fl.mastersContainer
self.count = self.parent.fl.mastersCount
self.default = self.parent.fl.defaultMaster
self.has = self.parent.fl.hasMaster
self.isActive = self.parent.fl.can_interpolate
self.location = self.parent.fl.location
self.names = self.parent.fl.masters
self.remove = self.parent.fl.removeMaster
self.rename = self.parent.fl.renameMaster
self.setActive = self.parent.fl.set_interpolate
self.setLocation = self.parent.fl.setLocation
self.setMaster = self.parent.fl.setMaster
def locate(self, master_name, axes_list=None):
axes_list = axes_list if axes_list is not None else self.parent.pDesignSpace.axes_list
master_location = self.location(master_name)
location_list = []
for axis in axes_list:
location_list.append((axis.tag.lower(), (axis.valueWeight(master_location, 0.), axis.valueWidth(master_location, 0.))))
return pMaster(location_list, name=master_name)
def locateAxis(self, master_name, axis_tag, width=False):
axes_dict = self.parent.pDesignSpace.axes_dict
if not axes_dict.has_key(axis_tag): return
selected_axis = axes_dict[axis_tag]
master_location = self.location(master_name)
master_weight = selected_axis.valueWeight(master_location, 0.)
master_width = selected_axis.valueWidth(master_location, 0.)
master_neighbors = [pMaster([(selected_axis.tag.lower(), (master_weight, master_width))], name=master_name)]
for name in self.names:
if name != master_name:
temp_location = self.location(name)
temp_weight = selected_axis.valueWeight(temp_location, 0.)
temp_width = selected_axis.valueWidth(temp_location, 0.)
if (temp_width == master_width, temp_weight == master_weight)[width]:
master_neighbors.append(pMaster([(selected_axis.tag.lower(), (temp_weight, temp_width))], name=name))
return selected_axis, sorted(master_neighbors, key=lambda m: m[axis_tag])
def groupByWidth(self, double=0.):
master_dict = {}
axes_dict = {}
for axis_name, axis in self.parent.pDesignSpace.axes_dict.items():
for name in self.names:
temp_location = self.location(name)
temp_weight = axis.valueWeight(temp_location, double)
temp_width = axis.valueWidth(temp_location, double)
#master_storage.append((name, temp_weight, temp_width, temp_location))
master_dict.setdefault(temp_width, []).append((name, temp_weight))
axes_dict[axis_name] = {key:set(sorted(value, key=lambda i:i[1])) for key, value in master_dict.items()}
return axes_dict
@property
def masters(self):
return [self.locate(master_name) for master_name in self.names]
def __repr__(self):
return '<{} masters={}>'.format(self.__class__.__name__, '; '.join(self.names))
class pDesignSpace(object):
# -- Aliasing some axis related commands
def __init__(self, parent):
self.parent = parent
self.add = parent.fl.addAxis
self.prepare = parent.fl.prepareAxes
def __repr__(self):
return '<{} axes={}>'.format(self.__class__.__name__, '; '.join([axis.name for axis in self.axes_list]))
@property
def axes(self):
return treeDict([(axis.tag, axis) for axis in self.axes_list])
@property
def axes_list(self):
return self.parent.fl.axes
@property
def axes_dict(self):
return {axis.tag: axis for axis in self.parent.fl.axes}
class pFont(object):
'''
A Proxy Font representation of Fonlab fgFont and flPackage.
Constructor:
pFont(None) : Default represents the current glyph and current font
pFont(fgFont) : Creates a pFont object from FontGate fgFont object
pFont(file_path) : Loats a existing font form file_path (str) and creates a pFont object
'''
def __init__(self, font=None):
if font is not None:
if isinstance(font, fgt.fgFont):
self.fg = font
self.fl = fl6.flPackage(font)
elif isinstance(font, basestring):
fl6.flItems.requestLoadingFont(font)
self.fg = fl6.CurrentFont()
self.fl = fl6.flPackage(fl6.CurrentFont())
else:
self.fg = fl6.CurrentFont()
self.fl = fl6.flPackage(fl6.CurrentFont())
# - Special
self.__altMarks = {'liga':'_', 'alt':'.', 'hide':'__'}
self.__diactiricalMarks = ['grave', 'dieresis', 'macron', 'acute', 'cedilla', 'uni02BC', 'circumflex', 'caron', 'breve', 'dotaccent', 'ring', 'ogonek', 'tilde', 'hungarumlaut', 'caroncomma', 'commaaccent', 'cyrbreve'] # 'dotlessi', 'dotlessj'
self.__specialGlyphs = ['.notdef', 'CR', 'NULL', 'space', '.NOTDEF']
self.__kern_group_type = {'L':'KernLeft', 'R':'KernRight', 'B': 'KernBothSide'}
self.__kern_pair_mode = ('glyphMode', 'groupMode')
# -- Design space related
self.pMastersContainer = pMasters(self)
self.pDesignSpace = pDesignSpace(self)
self.pMasters = self.pMastersContainer
self.pSpace = self.pDesignSpace
def __repr__(self):
return '<{} name={} glyphs={} path={}>'.format(self.__class__.__name__, self.fg.info.familyName, len(self.fg), self.fg.path)
# - Properties ----------------------------------------------
# -- Basics -------------------------------------------------
@property
def italic_angle(self):
return self.getItalicAngle()
@property
def info(self):
return self.fg.info
@property
def familyName(self):
return self.fl.tfn
@property
def name(self):
return self.familyName
@property
def OTfullName(self):
return self.info.openTypeNameCompatibleFullName
@property
def PSfullName(self):
return self.info.postscriptFullName
@property
def path(self):
return self.fg.path
@property
def ps_stems(self):
return self.fl.stems(0, True)
@property
def tt_stems(self):
return self.fl.stems(1, True)
# Functions ---------------------------------------------------
# - Font Basics -----------------------------------------------
def getSelectedIndices(self):
# WARN: Legacy syntax used, as of current 6722 build there is no way to get the selected glyphs in editor
return [index for index in range(len(legacy.fl.font)) if legacy.fl.Selected(index)]
def setSelectedIndices(self, indList):
# WARN: Legacy syntax used, as of current 6722 build there is no way to get the selected glyphs in editor
for index in indList:
legacy.fl.Select(index)
def selectGlyphs(self, glyphNameList):
for glyphName in glyphNameList:
if self.fg.has_key(glyphName):
legacy.fl.Select(self.fg[glyphName].index)
def unselectAll(self):
legacy.fl.Unselect()
def selected_pGlyphs(self):
'''Return TypeRig proxy glyph object for each selected glyph'''
selection = self.getSelectedIndices()
return self.pGlyphs(self.selectedGlyphs()) if len(selection) else []
def selectedGlyphs(self, extend=None):
'''Return TypeRig proxy glyph object for each selected glyph'''
selection = self.getSelectedIndices()
return self.glyphs(selection, extend) if len(selection) else []
def glyph(self, glyph, extend=None):
'''Return TypeRig proxy glyph object (pGlyph) by index (int) or name (str).'''
if isinstance(glyph, int) or isinstance(glyph, basestring):
return pGlyph(self.fg, self.fg[glyph]) if extend is None else extend(self.fg, self.fg[glyph])
else:
return pGlyph(self.fg, glyph) if extend is None else extend(self.fg, self.fg[glyph])
def symbol(self, gID):
'''Return fgSymbol by glyph index (int)'''
return fl6.fgSymbol(gID, self.fg)
def glyphs(self, indexList=[], extend=None):
'''Return list of FontGate glyph objects (list[fgGlyph]).'''
if extend is None:
return self.fg.glyphs if not len(indexList) else [self.fg.glyphs[index] for index in indexList]
else:
if not len(indexList):
return [extend(glyph, self.fg) for glyph in self.fg.glyphs]
else:
return [extend(glyph, self.fg) for glyph in [self.fg.glyphs[index] for index in indexList]]
def symbols(self):
'''Return list of FontGate symbol objects (list[fgSymbol]).'''
return [self.symbol(gID) for gID in range(len(self.fg.glyphs))]
def pGlyphs(self, fgGlyphList=[]):
'''Return list of TypeRig proxy Glyph objects glyph objects (list[pGlyph]).'''
return [self.glyph(glyph) for glyph in self.fg] if not len(fgGlyphList) else [self.glyph(glyph) for glyph in fgGlyphList]
def findShape(self, shapeName, master=None, deep=True):
'''Search for element (flShape) in font and return it'''
for glyph in self.pGlyphs():
if glyph.layer(master) is not None:
foundShape = glyph.findShape(shapeName, master, deep=deep)
if foundShape is not None:
return foundShape
def hasGlyph(self, glyphName):
return self.fg.has_key(glyphName)
# - Font metrics -----------------------------------------------
def getItalicAngle(self):
return self.fl.italicAngle_value
def fontMetricsInfo(self, layer):
'''Returns Font(layer) metrics no matter the reference.
Args:
layer (int or str): Layer index or name. If None returns ActiveLayer
Returns:
FontMetrics (object)
'''
if isinstance(layer, int):
return fl6.FontMetrics(self.fl, self.fl.masters[layer])
elif isinstance(layer, basestring):
return fl6.FontMetrics(self.fl, layer)
def fontMetrics(self):
'''Returns pFontMetrics Object with getter/setter functionality'''
return pFontMetrics(self.fl)
def updateObject(self, flObject, undoMessage='TypeRig', verbose=True):
'''Updates a flObject sends notification to the editor as well as undo/history item.
Args:
flObject (flGlyph, flLayer, flShape, flNode, flContour): Object to be update and set undo state
undoMessage (string): Message to be added in undo/history list.
'''
fl6.flItems.notifyChangesApplied(undoMessage, flObject, True)
fl6.flItems.notifyPackageContentUpdated(self.fl.fgPackage.id)
if verbose: print('DONE:\t{}'.format(undoMessage))
def update(self):
self.updateObject(self.fl, verbose=False)
# - Hinting --------------------------------------------------------
def setStem(self, stem_value, stem_name='', stem_is_horizontal=False, stem_type_TT=False):
new_stem = fl6.flStem(stem_value, stem_name)
if stem_type_TT:
if stem_is_horizontal:
self.fl.tt_stemsH = self.fl.tt_stemsH + [new_stem]
else:
self.fl.tt_stemsV = self.fl.tt_stemsV + [new_stem]
else:
if stem_is_horizontal:
self.fl.ps_stemsH = self.fl.ps_stemsH + [new_stem]
else:
self.fl.ps_stemsV = self.fl.ps_stemsV + [new_stem]
return new_stem
def resetStems(self, stems_horizontal=False, type_TT=False):
if type_TT:
if stems_horizontal:
self.fl.tt_stemsH = []
else:
self.fl.tt_stemsV = []
else:
if stems_horizontal:
self.fl.ps_stemsH = []
else:
self.fl.ps_stemsV = []
# - Axes and MM ----------------------------------------------------
def axes(self):
return self.fl.axes
def masters(self):
return self.fl.masters
def hasMaster(self, layerName):
return self.fl.hasMaster(layerName)
def instances(self):
return self.fl.instances
# - Guides & Hinting Basics ----------------------------------------
def guidelines(self, hostInf=False, fontgate=False):
'''Return font guidelines
Args:
hostInf (bool): If True Return flHostInfo guidelines host objects
fontgate (bool): If True return FontGate font guideline objects
Returns
list[flGuideline] or list[fgGuideline]
'''
if not fontgate:
return self.fl.guidelines if not hostInf else self.fl.guidelinesHost.guidelines
else:
return self.fg.guides
def addGuideline(self, flGuide):
'''Adds a guideline (flGuide) to font guidelines'''
self.fl.guidelinesHost.appendGuideline(flGuide)
self.fl.guidelinesHost.guidelinesChanged()
def delGuideline(self, flGuide):
'''Removes a guideline (flGuide) from font guidelines'''
self.fl.guidelinesHost.removeGuideline(flGuide)
self.fl.guidelinesHost.guidelinesChanged()
def clearGuidelines(self):
'''Removes all font guidelines'''
self.fl.guidelinesHost.clearGuidelines()
self.fl.guidelinesHost.guidelinesChanged()
def getZones(self, layer=None, HintingDataType=0):
'''Returns font alignment (blue) zones (list[flGuideline]). Note: HintingDataType = {'HintingPS': 0, 'HintingTT': 1}'''
backMasterName = self.fl.master
if layer is not None: self.fl.setMaster(layer)
zoneQuery = (self.fl.zones(HintingDataType, True), self.fl.zones(HintingDataType, False)) # tuple(top, bottom) zones
if self.fl.master != backMasterName: self.fl.setMaster(backMasterName)
return zoneQuery
def setZones(self, fontZones, layer=None):
backMasterName = self.fl.master
if layer is not None: self.fl.setMaster(layer)
self.fl.convertZonesToGuidelines(*fontZones) # Dirty register zones
if self.fl.master != backMasterName: self.fl.setMaster(backMasterName)
self.update()
def zonesToTuples(self, layer=None, HintingDataType=0):
fontZones = self.getZones(layer, HintingDataType)
return [(zone.position, zone.width, zone.name) for zone in fontZones[0]] + [(zone.position, -zone.width, zone.name) for zone in fontZones[1]]
def zonesFromTuples(self, zoneTupleList, layer=None, forceNames=False):
fontZones = ([], [])
for zoneData in zoneTupleList:
isTop = zoneData[1] >= 0
newZone = fl6.flZone(zoneData[0], abs(zoneData[1]))
if forceNames and len(zoneData) > 2: newZone.name = zoneData[2]
newZone.guaranteeName(isTop)
fontZones[not isTop].append(newZone)
if not len(fontZones[1]):
fontZones[1].append(fl6.flZone())
self.setZones(fontZones, layer)
def addZone(self, position, width, layer=None):
''' A very dirty way to add a new Zone to Font'''
isTop = width >= 0
backMasterName = self.fl.master
fontZones = self.getZones(layer)
newZone, killZone = fl6.flZone(position, abs(width)), fl6.flZone()
newZone.guaranteeName(isTop)
fontZones[not isTop].append([newZone, killZone][not isTop])
fontZones[isTop].append([newZone, killZone][isTop])
if layer is not None: self.fl.setMaster(layer)
self.fl.convertZonesToGuidelines(*fontZones)
if self.fl.master != backMasterName: self.fl.setMaster(backMasterName)
self.update()
def hinting(self):
'''Returns fonts hinting'''
return self.fg.hinting
# - Charset -----------------------------------------------
# -- Return Names
def getGlyphNames(self):
return [glyph.name for glyph in self.glyphs()]
def getGlyphNameDict(self):
# -- Init
nameDict = {}
# --- Controls and basic latin: 0000 - 0080
nameDict['Latin_Upper'] = [self.fl.findUnicode(uni).name for uni in range(0x0000, 0x0080) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Latin_Lower'] = [self.fl.findUnicode(uni).name for uni in range(0x0000, 0x0080) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin 1 Supplement: 0080 - 00FF
nameDict['Latin1_Upper'] = [self.fl.findUnicode(uni).name for uni in range(0x0080, 0x00FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Latin1_Lower'] = [self.fl.findUnicode(uni).name for uni in range(0x0080, 0x00FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin A: unicode range 0100 - 017F
nameDict['LatinA_Upper'] = [self.fl.findUnicode(uni).name for uni in range(0x0100, 0x017F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['LatinA_Lower'] = [self.fl.findUnicode(uni).name for uni in range(0x0100, 0x017F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin B: unicode range 0180 - 024F
nameDict['LatinB_Upper'] = [self.fl.findUnicode(uni).name for uni in range(0x0180, 0x024F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['LatinB_Lower'] = [self.fl.findUnicode(uni).name for uni in range(0x0180, 0x024F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Cyrillic: unicode range 0400 - 04FF
nameDict['Cyrillic_Upper'] = [self.fl.findUnicode(uni).name for uni in range(0x0400, 0x04FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Cyrillic_Lower'] = [self.fl.findUnicode(uni).name for uni in range(0x0400, 0x04FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
return nameDict
def getGlyphUnicodeDict(self, encoding='utf-8'):
# -- Init
nameDict = {}
# --- Controls and basic latin: 0000 - 0080
nameDict['Latin_Upper'] = [unichr(uni).encode(encoding) for uni in range(0x0000, 0x0080) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Latin_Lower'] = [unichr(uni).encode(encoding) for uni in range(0x0000, 0x0080) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin 1 Supplement: 0080 - 00FF
nameDict['Latin1_Upper'] = [unichr(uni).encode(encoding) for uni in range(0x0080, 0x00FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Latin1_Lower'] = [unichr(uni).encode(encoding) for uni in range(0x0080, 0x00FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin A: unicode range 0100 - 017F
nameDict['LatinA_Upper'] = [unichr(uni).encode(encoding) for uni in range(0x0100, 0x017F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['LatinA_Lower'] = [unichr(uni).encode(encoding) for uni in range(0x0100, 0x017F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Latin B: unicode range 0180 - 024F
nameDict['LatinB_Upper'] = [unichr(uni).encode(encoding) for uni in range(0x0180, 0x024F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['LatinB_Lower'] = [unichr(uni).encode(encoding) for uni in range(0x0180, 0x024F) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
# --- Cyrillic: unicode range 0400 - 04FF
nameDict['Cyrillic_Upper'] = [unichr(uni).encode(encoding) for uni in range(0x0400, 0x04FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).isupper()]
nameDict['Cyrillic_Lower'] = [unichr(uni).encode(encoding) for uni in range(0x0400, 0x04FF) if isinstance(self.fl.findUnicode(uni), fl6.flGlyph) and unichr(uni).islower()]
return nameDict
# -- Return Glyphs
def uppercase(self, namesOnly=False):
'''Returns all uppercase characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if glyph.unicode is not None and glyph.unicode < 10000 and unichr(glyph.unicode).isupper()] # Skip Private ranges - glyph.unicode < 10000
def lowercase(self, namesOnly=False):
'''Returns all uppercase characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if glyph.unicode is not None and glyph.unicode < 10000 and unichr(glyph.unicode).islower()]
def figures(self, namesOnly=False):
'''Returns all uppercase characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if glyph.unicode is not None and glyph.unicode < 10000 and unichr(glyph.unicode).isdigit()]
def symbols(self, namesOnly=False):
'''Returns all uppercase characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if glyph.unicode is not None and glyph.unicode < 10000 and not unichr(glyph.unicode).isdigit() and not unichr(glyph.unicode).isalpha()]
def ligatures(self, namesOnly=False):
'''Returns all ligature characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if self.__altMarks['liga'] in glyph.name and not self.__altMarks['hide'] in glyph.name and glyph.name not in self.__specialGlyphs]
def alternates(self, namesOnly=False):
'''Returns all alternate characters (list[fgGlyph])'''
return [glyph if not namesOnly else glyph.name for glyph in self.fg if self.__altMarks['alt'] in glyph.name and not self.__altMarks['hide'] in glyph.name and glyph.name not in self.__specialGlyphs]
# - Glyph generation ------------------------------------------
def addGlyph(self, glyph):
'''Adds a Glyph (fgGlyph or flGlyph) to font'''
if isinstance(glyph, fgt.fgGlyph):
glyph = fl6.flGlyph(glyph)
self.fl.addGlyph(glyph)
def addGlyphList(self, glyphList):
'''Adds a List of Glyphs [fgGlyph or flGlyph] to font'''
for glyph in glyphList:
self.addGlyph(glyph)
def newGlyph(self, glyph_name, layers=[], unicode_int=None):
'''Creates new glyph and adds it to the font
Args:
glyph_name (str): New glyph name
layers (list(str) or list(flLayer)): List of layers to be added to the new glyph
unicode_int (int): Unicode int of the new glyph
Returns:
pGlyph
'''
# - Build
base_glyph = fl6.flGlyph()
base_glyph.name = glyph_name
self.addGlyph(base_glyph)
# - Get the newly added glyph (all sane methods exhausted)
new_glyph = self.glyph(glyph_name)
# - Set Unicode
if unicode_int is not None: new_glyph.fg.setUnicode(unicode_int)
# - Add layers
if len(layers):
for layer in layers:
if isinstance(layer, basestring):
new_layer = fl6.flLayer()
new_layer.name = layer
new_glyph.addLayer(new_layer)
elif isinstance(layer, fl6.flLayer):
new_glyph.addLayer(layer)
# - Add to font
return new_glyph
def newGlyphFromRecipe(self, glyph_name, recipe, layers=[], unicode_int=None, rtl=False):
''' Generate new glyph (glyph_name) using String Recipe (recipe)
Args:
glyph_name (str): New glyph name
recipe (str): Glyph composition recipe using OLD Fontlab syntax (ex. A+acute=Aacute)
layers (list(str)): List of layer names to be added
unicode_int (int): Unicode int of the new glyph
rtl (bool): Right to left
Returns:
pGlyph
'''
# - Prepare
advanceWidth = 0 #!!! Figure it out later
prepared_layers = []
for layer_name in layers:
layer_fontMetrics = fl6.FontMetrics(self.fl, layer_name)
new_layer = fl6.flLayer(layer_name)
gen_component = self.fl.generateGlyph(recipe, layer_name, layer_fontMetrics, rtl)
new_layer.setGlyphComponents(gen_component, advanceWidth, self.fl, True)
prepared_layers.append(new_layer)
new_glyph = self.newGlyph(glyph_name, prepared_layers, unicode_int)
return new_glyph
def duplicateGlyph(self, src_name, dst_name, dst_unicode=None, options={'out': True, 'gui': True, 'anc': True, 'lsb': True, 'adv': True, 'rsb': True, 'lnk': True, 'ref': True, 'flg': True, 'tag': True}):
'''Duplicates a glyph and adds it to the font
Args:
src_name, dst_name (str): Source and destination names
dst_unicode (int): Unicode int of the new glyph
references (bool): Keep existing element references (True) or decompose (False)
Returns:
pGlyph
'''
# - Init
src_glyph = self.glyph(src_name)
# - Copy Layer data
prepared_layers = []
for layer in src_glyph.layers():
new_layer = src_glyph.copyLayer(layer.name, layer.name + '.duplicate', options, False, False, True)
new_layer.name = new_layer.name.replace('.duplicate', '')
prepared_layers.append(new_layer)
new_glyph = self.newGlyph(dst_name, prepared_layers, dst_unicode)
# - Copy Glyph specific stuff
if options['tag']: new_glyph.tags = src_glyph.tags # Copy tags
if options['flg']: new_glyph.mark = src_glyph.mark # Copy glyph flag/mark
return new_glyph
# - OpenType and features -------------------------------------
def getFeatures(self):
return self.fg.features
def clearFeatures(self):
return self.fg.features.clear()
def getFeatureTags(self):
return self.fg.features.keys()
def getFeaPrefix(self):
return self.fg.features.get_prefix()
def setFeaPrefix(self, feaString):
return self.fg.features.get_prefix(feaString)
def hasFeature(self, tag):
return self.fg.features.has_key(tag)
def getFeature(self, tag):
return self.fg.features.get_feature(tag)
def setFeature(self, tag, feaString):
return self.fg.features.set_feature(tag, feaString)
def delFeature(self, tag):
return self.fg.features.remove(tag)
def newOTgroup(self, groupName, glyphList):
return fgt.fgGroup(groupName, glyphList,'FeaClassGroupMode', 'mainglyphname')
def addOTgroup(self, groupName, glyphList):
temp_groups = self.fg.groups.asDict()
new_group = self.newOTgroup(groupName, glyphList)
temp_groups[groupName] = new_group
self.fg.groups.fromDict(temp_groups)
def getOTgroups(self):
return self.fg.groups
# - Kerning and Groups -------------------------------------
def kerning(self, layer=None):
'''Return the fonts kerning object (fgKerning) no matter the reference.'''
if layer is None:
return self.fl.kerning()
else:
if isinstance(layer, int):
return self.fl.kerning(self.masters[layer])
elif isinstance(layer, basestring):
return self.fl.kerning(layer)
def kerning_to_list(self, layer=None):
# Structure:
# fgKerning{fgKernigPair(fgKerningObject(glyph A, mode), fgKerningObject(glyph B, mode)) : kern value, ...}
layer_kernig = self.kerning(layer)
kern_list = []
for key, value in layer_kernig.asDict().iteritems():
kern_list.append([[item.asTuple() for item in key.asTuple()], value])
return kern_list
def kerning_dump(self, layer=None, mark_groups='@', pairs_only=False):
'''Dump layer kerning to simple tuples.
Args:
layer (None, Int, String): Extract kerning data for layer specified;
mark_groups (String): Mark group kerning with special symbol
pairs_only (Bool): Export pairs without value
Returns:
pairs_only is False: list(tuple(tuple(str(First), str(Second))), Int(Value)))
pairs_only is True: list(tuple(str(First), str(Second)))
'''
layer_kernig = self.kerning(layer)
save_pairs = []
for kern_pair, value in layer_kernig.items():
current_pair = kern_pair.asTuple()
a_tup = current_pair[0].asTuple()
b_tup = current_pair[1].asTuple()
a = mark_groups + a_tup[0] if a_tup[1] == 'groupMode' else a_tup[0]
b = mark_groups + b_tup[0] if b_tup[1] == 'groupMode' else b_tup[0]
if pairs_only:
save_pairs.append((a, b))
else:
save_pairs.append(((a, b), value))
return save_pairs
def kerning_groups(self, layer=None):
'''Return the fonts kerning groups object (fgKerningGroups) no matter the reference.'''
return self.kerning(layer).groups
def fl_kerning_groups(self, layer=None):
return list(filter(lambda x: x[0], self.fl.getAllGroups()))
def fl_kerning_groups_to_dict(self, layer=None):
return extBiDict({item[1]: item[-1] for item in self.fl_kerning_groups(layer)})
def kerning_groups_to_dict(self, layer=None, byPosition=False, sortUnicode=False):
'''Return dictionary containing kerning groups
Args:
layer (None, Int, String): Extract kerning data for layer specified;
byPosition (bool): Dictionary by class kerning positions - KernLeft(1st), KernRight(2nd) or KernBothSide(Both);
sortUnicode (bool): Sort members of kern group according to their Unicode value.
Returns:
dict
'''
kern_groups = self.kerning_groups(layer).asDict()
if sortUnicode:
temp_groups = {}
for groupName, groupData in kern_groups.items():
temp_groups[groupName] = (sorted(groupData[0], key=lambda glyph_name: self.glyph(glyph_name).unicode), groupData[1])
kern_groups = temp_groups
if not byPosition:
return kern_groups
else:
sortedByPosition = {}
for groupName, groupData in kern_groups.items():
sortedByPosition.setdefault(groupData[1], []).append((groupName, groupData[0]))
return sortedByPosition
def dict_to_kerning_groups (self, groupDict, layer=None):
# - Build Group kerning from dictionary
kerning_groups = self.kerning_groups(layer)
for key, value in groupDict.iteritems():
kerning_groups[key] = value
def reset_kerning_groups(self, layer=None):
# - Delete all group kerning at given layer
self.kerning_groups(layer).clear()
def add_kerning_group(self, key, glyphNameList, type, layer=None):
'''Adds a new group to fonts kerning groups.
Args:
key (string): Group name
glyphNameList (list(string)): List of glyph names
type (string): Kern group types: L - Left group (1st), R - Right group (2nd), B - Both (1st and 2nd)
layer (None, Int, String)
Returns:
None
'''
self.kerning_groups(layer)[key] = (glyphNameList, self.__kern_group_type[type.upper()])
def remove_kerning_group(self, key, layer=None):
'''Remove a group from fonts kerning groups at given layer.'''
del self.kerning_groups(layer)[key]
def rename_kerning_group(self, oldkey, newkey, layer=None):
'''Rename a group in fonts kerning groups at given layer.'''
self.kerning_groups(layer).rename(oldkey, newkey)
def newKernPair(self, glyphLeft, glyphRight, modeLeft, modeRight):
if not isinstance(modeLeft, str): modeLeft = self.__kern_pair_mode[modeLeft]
if not isinstance(modeRight, str): modeRight = self.__kern_pair_mode[modeRight]
return fgt.fgKerningObjectPair(glyphLeft, glyphRight, modeLeft, modeRight)
# - Extensions ----------------------------
class eFont(pFont):
'''
Proxy Font extension, packing some useful tools.
Constructor:
eFont() - default represents the current glyph and current font
eFont(fgFont)
'''
def copyZones(self, font):
if isinstance(font, fgt.fgFont):
srcFont = pFont(font)
elif isinstance(font, pFont):
srcFont = font
pass # TODO!
class jFont(object):
'''
Proxy VFJ Font (Fontlab JSON Font format)
Constructor:
jFont(): Construct an empty jFont
jFont(vfj_file_path): Load VFJ form vfj_file_path (STR)
jFont(pFont): Load VFJ from pFont.path. VFJ Font has to be in the same path as the VFC
Methods:
.data(): Access to VFJ font
.load(file_path): Load VFJ font from path
.save_as(file_path): Save VFJ font to path
.save(): Save VFJ (overwrite)
'''
def __init__(self, source=None):
# - Init
self.data = None
self.source = None
self.path = None
if source is not None:
if isinstance(source, basestring):
self.path = source
elif isinstance(source, pFont):
self.path = source.path.replace('vfc', 'vfj')
self.load(self.path)
def load(self, file_path):
with open(file_path, 'r') as importFile:
self.data = json.load(importFile, cls=vfj_decoder)
self.path = file_path
return True
def save_as(self, file_path):
with open(file_path, 'w') as exportFile:
json.dump(self.data, exportFile, cls=vfj_encoder)
return True
def save(self):
return self.save_as(self.path) |
import numpy as np
DATA = 'hpatches'
DEBUG = (0==1)
WS_DIR = '/home/ws/'
WEIGHT_DIR = '%s/meta/weights/'%WS_DIR
NEW_SIZE = (640,480)
GLOBAL_TIME = True
MIN_MATCH_COUNT = 10 # min num of descriptor matching for H estimation
BATCH_SIZE = 1
MOVING_AVERAGE_DECAY = 0.9999
SCALE_NUM = 5
THRESH_OVERLAP = 5
THRESH_DESC = 10000
if DATA=='hpatches':
DATA_DIR = '%s/datasets/hpatches-sequences-release/'%WS_DIR
HP_LIST = 'meta/list/img_hp.txt'
MAX_IMG_NUM = 6 # number of img per scene to process
IMG_EXT = 'ppm'
SCENE_LIST = [l.split("\n")[0] for l in open(HP_LIST).readlines() ]
elif DATA=='hpatches_rot':
DATA_DIR = '%s/datasets/hpatches_rot/'%WS_DIR
HP_LIST = 'meta/list/img_hp.txt'
MAX_IMG_NUM = 7 # number of img per scene to process
IMG_EXT = 'ppm'
elif DATA=='hpatches_s':
DATA_DIR = '%s/datasets/hpatches_s/'%WS_DIR
HP_LIST = 'meta/list/img_hp.txt'
MAX_IMG_NUM = 5 # number of img per scene to process
IMG_EXT = 'ppm'
elif DATA=='strecha':
DATA_DIR = '%s/datasets/strecha/'%WS_DIR
SCENE_LIST = ['fountain', 'castle_entry','herzjesu']
elif DATA=='webcam':
SCENE_LIST = ['Chamonix', 'Courbevoie', 'Frankfurt', 'Mexico', 'Panorama', 'StLouis']
DATA_DIR = '%s/datasets/WebcamRelease'%WS_DIR
else:
print('Error: unknown dataset: %s. Set DATA correctly in tools/cst.py.'%DATA)
exit(1)
# copied from superpoint
# Jet colormap for visualization.
myjet = np.array([[0. , 0. , 0.5 ],
[0. , 0. , 0.99910873],
[0. , 0.37843137, 1. ],
[0. , 0.83333333, 1. ],
[0.30044276, 1. , 0.66729918],
[0.66729918, 1. , 0.30044276],
[1. , 0.90123457, 0. ],
[1. , 0.48002905, 0. ],
[0.99910873, 0.07334786, 0. ],
[0.5 , 0. , 0. ]])
# lift
KP_DIR = 'kp'
ORI_DIR = 'ori'
DES_DIR = 'des'
# lfnet
LFNET_DIR = '/home/ws/methods/lfnet/lf-net-release/' # docker path
|
from django.shortcuts import render, redirect
from django.views.generic import CreateView, UpdateView, DeleteView, ListView, DetailView
from django.urls import reverse_lazy
from django.contrib.auth import get_user_model
from .models import Comment
from products.models import Book
from .forms import CommentForm
from directory.models import Genre
User = get_user_model()
# Create your views here.
# class CommentCreateView(CreateView):
# model = Comment
# template_name = "comments/create.html"
# success_url = reverse_lazy('products:all_products_list')
# fields = ['comment']
def comment_create_view(request):
form = CommentForm()
book_id = request.GET['book']
book = Book.objects.get(pk=book_id)
user_pk = request.user.pk
user = User.objects.get(pk=user_pk)
if request.method == 'POST':
Comment.objects.create(product=book, user=user, comment=request.POST.get('comment'))
return redirect('products:product_detailed', pk=book_id)
else:
print('goodbye')
return render(
request,
'comments/create.html',
context={'form':form, 'book':book, 'user':user, 'genres': Genre.objects.all()})
|
dashboard_html = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Sauna status</title>
<style>
body {{
background-color: rgba(114, 254, 149, 0.38);
}}
h1 {{color: blue;}}
.content {{
border-radius: 25px;
border: 1px solid black;
background-color: white;
width: 80%;
margin-left: auto;
margin-right: auto;
}}
.st {{
padding: 5px 10px 5px 10px;
}}
.st_0 {{
background-color: rgba(166, 241, 166, 0.54);
border-radius: 25px;
}}
.st_1 {{
background-color: rgba(249, 215, 46, 0.54);
border-radius: 25px;
}}
.st_2 {{
background-color: rgba(249, 93, 46, 0.54);
border-radius: 25px;
}}
.st_3 {{
background-color: rgba(45, 149, 222, 0.54);
border-radius: 25px;
}}
table caption {{
font-size: 50px;
}}
table {{
width: 80%;
border-collapse: collapse;
margin-left: auto;
margin-right: auto;
}}
th, td {{
padding: 15px;
text-align: left;
border-top:1pt solid black;
}}
</style>
</head>
<body>
<div class="content">
<table>
<caption>Sauna status</caption>
{}
</table>
</div>
</body>
</html>"""
|
from __future__ import print_function
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
import numpy as np
import matplotlib.pyplot as plt
from flask import Flask, render_template, url_for, redirect, request, flash, request
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import argparse
import imutils
import json
import urllib
import cv2
import os
from werkzeug.utils import secure_filename
from PIL import Image
UPLOAD_FOLDER = './static/images'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask("__app__")
app.config['SECRET_KEY'] = 'a551d32359baf371b9095f28d45347c8b862183'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# def delete_File():
# if os.path.exists('./static/images/fin1.png'):
# os.remove('./static/images/fin1.png')
# print("hello")
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file',
filename=filename))
return render_template('calculator.html', val=1)
from flask import send_from_directory
# Watershed Algorithm
def equalize(img):
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
channels = cv2.split(ycrcb)
cv2.equalizeHist(channels[0], channels[0])
cv2.merge(channels, ycrcb)
cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
return img
def Watershed(location_det,filename):
print(location_det)
image = cv2.imread(location_det)
im = equalize(image) #call
shifted = cv2.pyrMeanShiftFiltering(image, 21, 51)
gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
plt.imshow(thresh, cmap=plt.get_cmap('gray'))
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=20,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
for (i, c) in enumerate(cnts):
((x, y), _) = cv2.minEnclosingCircle(c)
# cv2.putText(image, "#{}".format(i + 1), (int(x) - 10, int(y)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.drawContours(image, [c], -1, (255, 0, 0), 1)
#plt.savefig('./static/images/fig1.png')
plt.figure()
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.savefig('fig1.png')
if os.path.exists('./static/images/modified/'+filename):
os.remove('./static/images/modified/'+filename)
plt.savefig('./static/images/modified/'+filename)
imag = Image.open('fig1.png')
imag.show()
# To measure the dimensions
def midpoint(ptA,ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def measure_dim(loc):
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required = True, help="path to input image")
# ap.add_argument("-w", "--width", type=float, required=True, help="width of the object")
# args = vars(ap.parse_args())
image = cv2.imread(loc)
# image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations = 1)
edged = cv2.erode(edged, None, iterations = 1)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
for c in cnts:
if cv2.contourArea(c) < 100:
continue
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0,255,255), 2)
for (x,y) in box:
cv2.circle(orig,(int(x),int(y)), 5, (0,0,255), -1)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl,tr)
(blbrX, blbrY) = midpoint(bl,br)
(tlblX,tlblY) = midpoint(tl,bl)
(trbrX, trbrY) = midpoint(tr,br)
#Draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
#intersect the lines between midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255,255, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 255, 255), 2)
#compute the Euclidean distance between midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
#We initialize the pixels per metric has not been established
if pixelsPerMetric is None:
pixelsPerMetric = dB / 750
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
#to compute the final object size
cv2.putText(orig, "{:.1f} feet".format(dimA*10),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 0, 0), 2)
cv2.putText(orig, "{:.1f} feet".format(dimB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 0, 0), 2)
area = dimA*dimB
dims = area
print(f'The dims: {dims}')
return dims
@app.route('/uploads/<filename>')
def uploaded_file(filename):
# print('./static/images/'+filename)
Watershed(str('./static/images/'+filename),filename)
#dims = measure_dim(str('./static/images/'+filename))
#dims/=10000
return render_template('calculator.html', val=1, dims=1,filename='images/modified/'+filename)
# Least Geographic Elevation
def elevation(request):
apikey = "AIzaSyDv9C5WnFwlmPtZWMtH6EqfMhSwJrlCcD0"
url = "https://maps.googleapis.com/maps/api/elevation/json"
request = urllib.request.urlopen(url+"?locations="+str(request)+"&key="+apikey)
try:
results = json.load(request).get('results')
if 0 < len(results):
mat={}
for i in range(0,len(results)):
elevation = results[i].get('elevation')
location=results[i].get('location')
loclat=[]
loclat.append(location['lat'])
loclat.append(location['lng'])
loc=tuple(loclat)
if elevation not in mat:
mat[elevation]=[]
mat[elevation].append(loc)
# ELEVATION
return mat
else:
print ('HTTP GET Request failed.')
except ValueError as e:
print ('JSON decode failed: '+str(request))
def postion(lat1,lon1,lat2,lon2):
if(lat1>lat2): # swaping cordinates to get range of latitude and longitude
temp=lat1
lat1=lat2
lat2=temp
if(lon1>lon2):
temp=lon1
lon1=lon2
lon2=temp
res='' #initializing string with null value
i=0.0
i=lat1 #itration variable for varying latitude
while i<lat2:
j=0.0
j=lon1 #iteration variable for varying longitude
while j<lon2:
res=res+(str(i)) # adding current latitude to string
res=res+',' # separator for latitude and longitude
if((i+0.0001)>=lat2 and (j+0.0001)>=lon2): #cheacking wheather the coordinate is last one
res=res+(str(j)) # last coordinate need not to have '|' as its the last one
else:
res=res+(str(j)) #else we need to saperate the coordinates with '|'
res=res+'|' #adding '|' after one coordinate is entered
j=j+0.0001 #increasing longitude by 10 meters
i=i+0.0001 #increasing latitude by 10 meters
result=elevation(res) #calling elevaton function to get elevation data
rest={} #dictonary
for key in sorted(result.keys()): #getting elevation in sorted order
rest[key]=result[key]
return rest #getting elevated data in increasing order
# All routes beyond this
@app.route('/')
def home():
#delete_File()
return render_template('index.html', title='Major Project')
@app.route('/rooftop')
def rooftop():
return render_template('roof.html', title='Rooftop Detection')
@app.route('/references')
def references():
return render_template('references.html', title='References')
@app.route('/calculator', methods=['GET','POST'])
def calculator():
pos = postion(13.00011,77.00011,13.0011,77.00111)
return render_template('calculator.html', title='Calculator', position=pos, val=1)
@app.route('/trial', methods=['GET'])
def trial():
pos = postion(13.00011,77.00011,13.0011,77.00111)
return render_template('trial.html', position=pos)
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/contour')
def contour():
return render_template('contour_map.html', title='Contour Map')
app.run(debug=True, port=5003) |
#!/usr/bin/env python3
class AstPrinter:
def print(self, expr):
return expr.accept(self)
def parenthesize(self, name, *exprs):
strg = "(" + name
for expr in exprs:
strg += " " + expr.accept(self)
strg += ")"
return strg
def visitBinaryExpr(self, expr):
return self.parenthesize(expr.op.lexeme, expr.left, expr.right)
def visitGroupingExpr(self, expr):
return self.parenthesize("group", expr.exp)
def visitLiteralExpr(self, expr):
if expr.value is None:
return "nil"
return str(expr.value)
def visitUnaryExpr(self, expr):
return self.parenthesize(expr.op.lexeme, expr.exp)
|
import pathlib
import pandas as pd
import pytest
import settings
from local.dividends import comony_ru
def test_dividends(monkeypatch, tmpdir):
data_path = pathlib.Path(tmpdir.mkdir("conomy"))
monkeypatch.setattr(settings, 'DATA_PATH', data_path)
df = comony_ru.dividends_conomy('CHMF')
assert isinstance(df, pd.Series)
assert df.name == 'CHMF'
assert df.index.is_monotonic_increasing
assert df.index.is_unique
assert df.index[0] == pd.Timestamp('2006-08-06')
assert df['2018-06-19'] == pytest.approx(66.04)
def test_download_update():
with pytest.raises(NotImplementedError) as error:
comony_ru.ConomyDataManager('AKRN').download_update()
assert error.type == NotImplementedError
|
from typing import List
from sqlalchemy.orm import Session
from models.schemas import Cve
from .exceptions import CveInfoInfoAlreadyExistError, CveInfoNotFoundError
from models.models import CveInfo
from models.schemas import CreateAndUpdateCve
def get_all_cves(session: Session, limit: int, offset: int) -> List[CveInfo]:
return session.query(CveInfo).offset(offset).limit(limit).all()
def get_cve_info_by_id(session: Session, _id: int) -> CveInfo:
cve_info = session.query(CveInfo).get(_id)
if cve_info is None:
raise CveInfoNotFoundError
return cve_info
def get_tracked_cves(session: Session) -> List[CveInfo]:
cve_info = session.query(CveInfo).filter(CveInfo.tracked == True).all()
return cve_info
def create_cve(session: Session, cve_info: CreateAndUpdateCve) -> CveInfo:
cve_details = session.query(CveInfo).filter(
CveInfo.cve_id == cve_info.cve_id,
).first()
if cve_details is not None:
raise CveInfoInfoAlreadyExistError
new_cve_info = CveInfo(**cve_info.dict())
session.add(new_cve_info)
session.commit()
session.refresh(new_cve_info)
return new_cve_info
def update_cve_info(session: Session, _id: int, info_update: CreateAndUpdateCve) -> CveInfo:
cve_info = get_cve_info_by_id(session, _id)
if cve_info is None:
raise CveInfoNotFoundError
cve_info.cve_id = info_update.cve_id
cve_info.assigner = info_update.assigner
cve_info.description = info_update.description
cve_info.severity = info_update.severity
cve_info.attack_vector = info_update.attack_vector
cve_info.confidentiality_impact = info_update.confidentiality_impact
cve_info.integrity_impact = info_update.integrity_impact
cve_info.availability_impact = info_update.availability_impact
cve_info.external_links = info_update.external_links
cve_info.published_date = info_update.published_date
cve_info.last_modified_date = info_update.last_modified_date
cve_info.tracked = info_update.tracked
session.commit()
session.refresh(cve_info)
return cve_info
def delete_cve_info(session: Session, _id: int):
cve_info = get_cve_info_by_id(session, _id)
if cve_info is None:
raise CveInfoNotFoundError
session.delete(cve_info)
session.commit()
return
def track_cve(session: Session, _id: int) -> CveInfo:
cve_info = get_cve_info_by_id(session, _id)
if cve_info is None:
raise CveInfoNotFoundError
cve_info.tracked = 1
session.commit()
session.refresh(cve_info)
return cve_info
def delete_cve_info(session: Session, _id: int):
cve_info = get_cve_info_by_id(session, _id)
if cve_info is None:
raise CveInfoNotFoundError
session.delete(cve_info)
session.commit()
return |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
n = int(input("Введите число: "))
r = []
if n >= 1001:
print('Число больше 1000')
h = ["сто", "двести", "триста", "четыреста", "пятьсот", "шестьсот", "семьсот", "восемьсот", "девятьсот"]
o = ["один", "два", "три", "четыре", "пять", "шесть", "семь", "восемь", "девять"]
s = ["одиннадцать", "двенадцать", "тринадцать", "четырнадцать", "пятнадцать", "шестнадцать", "семьнадцать",
"восемнадцать", "девятнадцать"]
t = ["десять", "двадцать", "тридцать", "сорок", "пятьдесят", "шестьдесят", "семьдесят", "восемьдясят", "девяносто"]
if n == 1000:
print('Тысяча')
exit(0)
if n // 100 > 0:
x = n // 100
r.append(h[x - 1])
if (n % 100) // 10 >= 2:
x = (n % 100) // 10
r.append(t[x - 1])
if (n % 100) % 10 != 0:
x = (n % 100) % 10
r.append(o[x - 1])
r = " ".join(r)
print(r)
exit(0)
if 11 <= n % 100 < 20:
x = (n % 100) % 10
r.append(s[x - 1])
r = " ".join(r)
print(r)
exit(0)
if n % 100 // 10:
x = n % 100 // 10
r.append(t[x - 1])
if n % 10 > 0:
x = (n % 10)
r.append(o[x - 1])
r = " ".join(r)
print(r)
|
# Generated by Django 2.0.3 on 2018-04-04 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Registration', '0003_candidate_reward'),
]
operations = [
migrations.AddField(
model_name='voter',
name='flag',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
from gekitchensdk.erd.converters.abstract import ErdReadWriteConverter, ErdReadOnlyConverter
from gekitchensdk.erd.erd_codes import ErdCodeType
def erd_decode_int(value: str) -> int:
"""Decode an integer value sent as a hex encoded string."""
return int(value, 16)
def erd_encode_int(value: any, length: int = 2) -> str:
"""Encode an integer value as a hex string."""
value = int(value)
return value.to_bytes(length, 'big').hex()
class ErdIntConverter(ErdReadWriteConverter[int]):
def __init__(self, erd_code: ErdCodeType = "Unknown", length: int = 2):
super().__init__(erd_code)
self.length = length
def erd_decode(self, value: str) -> int:
"""Decode an integer value sent as a hex encoded string."""
return erd_decode_int(value)
def erd_encode(self, value) -> str:
"""Encode an integer value as a hex string."""
return erd_encode_int(value)
class ErdReadOnlyIntConverter(ErdReadOnlyConverter[int]):
def erd_decode(self, value: str) -> int:
"""Decode an integer value sent as a hex encoded string."""
return erd_decode_int(value)
|
import unittest
import cq_examples.Ex101_InterpPlate as ex
class TestExample101(unittest.TestCase):
def test_Ex101(self):
# Import and validate
plate_0 = ex.plate_0
plate_1 = ex.plate_1
plate_2 = ex.plate_2
plate_3 = ex.plate_3
plate_4 = ex.plate_4
self.assertTrue(plate_0.val().isValid())
self.assertTrue(plate_1.val().isValid())
self.assertTrue(plate_2.val().isValid())
self.assertTrue(plate_3.val().isValid())
self.assertTrue(plate_4.val().isValid())
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
# pylint: disable=no-member
from cibyl.models.model import Model
class Feature(Model):
"""Represents a Feature present (or not) in a CI environment."""
API = {
'name': {
'attr_type': str,
'arguments': []
},
'present': {
'attr_type': bool,
'arguments': []
},
}
def __init__(self, name, present):
# Let IDEs know this model's attributes
self.name = None
self.present = None
super().__init__({'name': name, 'present': present})
|
from __future__ import unicode_literals
from django.apps import AppConfig
class FilesystemConfig(AppConfig):
name = 'filesystem'
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.LagrangianMPMApplication import *
from materials import *
class ConstitutiveLawUtility:
#
def __init__(self, model_part, domain_size):
self.model_part = model_part
self.domain_size = domain_size
#
def Initialize(self):
self.SetConstitutiveLaw()
#
def SetConstitutiveLaw(self):
AssignMaterial(self.model_part.Properties)
#
|
from collections import namedtuple
import sys
import gym
# Algorithms
RES_FLDS = ['sample', 'vectorized', 'uses_gpu']
RES_FLDS_DOC = '''\n
sample has signature (env, policy, num_episodes, seed) where
num_episodes is the number of trajectories to sample, and seed is used
to sample deterministically. It returns a list of 3-tuples
(states, actions, rewards), each of which is a list.
vectorized is a boolean flag indicating if the algorithm takes VecEnv's.
uses_gpu is a boolean flag indicating whether the algorithm requires a GPU.
'''
RLAlgorithm = namedtuple('RLAlgorithm', ['train', 'value'] + RES_FLDS)
RLAlgorithm.__doc__ = '''\
train has signature (env, discount, seed, log_dir), where env is a gym.Env,
discount is a float, seed is an integer and log_dir is a writable directory.
They return a policy (algorithm-specific object).
value has signature (env, policy, discount, seed).
It returns (mean, se) where mean is the estimated reward and se is the
standard error (0 for exact methods).''' + RES_FLDS_DOC
IRLAlgorithm = namedtuple('IRLAlgorithm',
['train', 'reward_wrapper', 'value'] + RES_FLDS)
IRLAlgorithm.__doc__ = '''\
train signature (env, trajectories, discount, seed, log_dir) where:
- env is a gym.Env.
- trajectories is a dict of environment IDs to lists of trajectories.
- discount is a float in [0,1].
- seed is an integer.
- log_dir is a directory which may be used for logging or other temporary output.
It returns a tuple (reward, policy), both of which are algorithm-specific
objects. reward must be comprehensible to RL algorithms (if any) specified in
the 'eval' key in the experimental config.
reward_wrapper is a class with signature __init__(env, reward).
It wraps environment (that may be a vector environment) and overrides step()
to return the reward learnt by the IRL algorithm.
value has signature (env, policy, discount, seed) where:
- env is a gym.Env.
- policy is as returned by the IRL algorithm.
- discount is a float in [0,1].
- seed is an integer.
It returns (mean, se) where mean is the estimated reward and se is the
standard error (0 for exact methods).''' + RES_FLDS_DOC
MetaIRLAlgorithm = namedtuple('MetaIRLAlgorithm',
['metalearn', 'finetune',
'reward_wrapper', 'value'] + RES_FLDS)
MetaIRLAlgorithm.__doc__ = '''\
Values take the form: (metalearn, finetune, reward_wrapper, compute_value).
metalearn has signature (envs, trajectories, discount, seed, log_dir), where:
- envs is a dictionary mapping to gym.Env
- trajectories is a dictionary mapping to trajectories
- discount, seed and log_dir are as in the single-IRL case.
It returns an algorithm-specific object.
finetune has signature (metainit, env, trajectories, discount, seed, log_dir),
where metainit is the return value of metalearn; the remaining arguments and
the return value are as in the single-IRL case.
reward_wrapper and compute_value are the same as for IRLAlgorithm.'''
def validate_config(rl_algos, single_irl_algos, population_irl_algos):
'''Checks the defined algorithms are of the appropriate type,
and there is no ambiguity based on their keys (for single v.s.
population IRL algorithms.)'''
# Check algorithms
pop_keys = set(population_irl_algos.keys())
intersection = pop_keys.intersection(single_irl_algos.keys())
assert len(intersection) == 0
for rl, algo in rl_algos.items():
assert isinstance(algo, RLAlgorithm), rl
for irl, algo in single_irl_algos.items():
assert isinstance(algo, IRLAlgorithm), irl
for irl, algo in population_irl_algos.items():
assert isinstance(algo, MetaIRLAlgorithm), irl
# Per-experiment configuration
def _list_of(converter):
def f(xs):
return list(map(converter, xs))
return f
# All fields in the parsed configuration.
# 'key': type_converter
FIELD_TYPES = {
# (Lists of) algorithms
'expert': str,
'irl': _list_of(str),
'eval': _list_of(str),
# Lists of environments
'train_environments': _list_of(str),
'test_environments': _list_of(str),
# Lists of trajectories
'train_trajectories': _list_of(int),
'test_trajectories': _list_of(int),
'discount': float,
# Number of seeds to use
'seeds': int,
'parallel_rollouts': int,
}
MANDATORY_FIELDS = ['expert', 'irl', 'eval', 'test_trajectories']
OPTIONAL_FIELDS = {
# 'field': default_value
'discount': 0.99,
'seeds': 3,
'train_trajectories': None,
# note parallel_rollouts is ignored for non-vectorized (I)RL algorithms
'parallel_rollouts': 4,
}
def parse_config(experiment, cfg,
rl_algos, single_irl_algos, population_irl_algos):
'''Returns a canonical configuration from user-specified configuration
dictionary cfg. Fills in defaults from OPTIONAL_FIELDS, verifies all
MANDATORY_FIELDS are present, type checks in FIELD_TYPES, and performs
some additional custom validation.'''
try:
# Fill in defaults
res = {}
for k in MANDATORY_FIELDS:
res[k] = cfg[k]
for k, default in OPTIONAL_FIELDS.items():
v = cfg.get(k, default)
if v is not None:
res[k] = v
for k in ['train_environments', 'test_environments']:
if 'environments' in cfg:
assert k not in cfg
res[k] = cfg['environments']
else:
res[k] = cfg[k]
# Type checking/conversion
for fld, converter in FIELD_TYPES.items():
if fld in res:
res[fld] = converter(res[fld])
# Check environments are registered
for fld in ['train_environments', 'test_environments']:
for env in res[fld]:
gym.envs.registry.spec(env)
# Check RL & IRL algorithms are registered
rl_algos[res['expert']]
for rl in res['eval']:
rl_algos[rl]
for irl in res['irl']:
assert (irl in population_irl_algos or irl in single_irl_algos)
# train_trajectories only makes sense with a meta-IRL algorithm
meta_irl = any([irl in population_irl_algos for irl in res['irl']])
assert ('train_trajectories' in res) == meta_irl
return res
except Exception as e:
fstr = "Error parsing config for experiment '{}': {}"
msg = fstr.format(experiment, str(e))
raise type(e)(msg).with_traceback(sys.exc_info()[2]) |
#!/usr/bin/env python
from pymongo import MongoClient
import json, ast
import time as time_mod
from bokeh.plotting import figure, output_file, show
from bokeh.models import LinearAxis, Range1d
import numpy as np
client = MongoClient('localhost', 27017)
db = client.DB-NAME # SET THIS WITH YOUR DATABASE NAME
posts = db.COLLECTION-NAME # SET THIS WITH YOUR COLLECTION NAME
posts_out = posts.find({'ident': '1'}) # SENSOR IDENT VAL TO PLOT
temps = []
times = []
humids = []
for post in posts_out:
temps.append(post["temp"])
times.append(post["time"])
humids.append(post["humid"])
print(post["time"])
spacefact = 0.3
mintemp = (np.floor(float(str(min(temps)))))
maxtemp = (np.ceil(float(str(max(temps)))))
tempdelta = maxtemp-mintemp
mintemp = int(mintemp-tempdelta*spacefact)
maxtemp = int(maxtemp+tempdelta*spacefact)
minhumid = (np.floor(float(str(min(humids)))))
maxhumid = (np.ceil(float(str(max(humids)))))
humiddelta = maxhumid-minhumid
minhumid = int(minhumid-humiddelta*spacefact)
maxhumid = int(maxhumid+humiddelta*spacefact)
print(mintemp)
print(maxtemp)
times_zse = [time - times[0] for time in times]
systime = time_mod.strftime("%H:%M:%S %d/%m/%y")
# Begin BOKEH plotting code
output_file("test.html")
# Generate Figure
titlestr = "Enviromental Condition Plot, Generated: " + systime
p = figure(plot_width=800,plot_height=400,title=titlestr)
# Plotting Code
p.y_range=Range1d(mintemp,maxtemp)
p.line(times,temps, line_width=2, legend="Temperature", line_color="navy")
p.extra_y_ranges = {"humidrng": Range1d(start=minhumid, end=maxhumid)}
p.add_layout(LinearAxis(y_range_name="humidrng"), 'right')
p.line(times,humids, line_width=2, legend="Humidity", y_range_name="humidrng", line_color="olive")
p.xaxis.axis_label = "Unix Epoch Time"
show(p)
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='Multiple smi',
version='2.0.3',
url='https://github.com/ClementPinard/multiple-smi',
license='MIT',
author='Clément Pinard',
author_email='clempinard@gmail.com',
description='Look up GPU/CPU/RAM usage on multiple servers at the same time',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
entry_points={
'console_scripts': [
'client_smi = multiple_smi.client.client_smi:main',
'discover_hosts = multiple_smi.client.update_hosts_list:main',
'server_smi = multiple_smi.server.server_smi:main',
'install_server_service = multiple_smi.server.install_server_service:main'
]
},
install_requires=[
'numpy',
'python-nmap',
'colorspacious',
'py-cpuinfo',
'pycairo',
'nvidia-ml-py3',
'pyzmq',
'psutil'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research"
]
)
|
# coding=utf-8
# from .helpers import disconnect_client
# from .helpers import get_connections
# from .shadowsocks import Keys
from .shadowsocks import ShadowSocks
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Простой скрипт, автоматически обновляющий резюме на hh.ru
Для автоматического обновления - создайте задачу в `crontab`.
"""
import requests
from secrets import URL, HEADER, RESPONSE_STATUS
def main():
"""Функция c основной логикой."""
req = requests.post(URL, headers=HEADER)
err: str = "Ошибка! Неправильный код ответа."
message = RESPONSE_STATUS.get(req.status_code, err)
print(message)
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2013-present, Anoop Kunchukuttan
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#Program for detokenizing Indian language input
#
# @author Anoop Kunchukuttan
#
import string, re, sys
from indicnlp.common import IndicNlpException
## detokenizer patterns
left_attach=r'!%)\]},.:;>?\u0964\u0965'
pat_la=re.compile(r'[ ](['+left_attach+r'])')
right_attach=r'#$(\[{<@'
pat_ra=re.compile(r'(['+right_attach+r'])[ ]')
lr_attach=r'-/\\'
pat_lra=re.compile(r'[ ](['+lr_attach+r'])[ ]')
#donknow=u'&*+=^_|~'
## date, numbers, section/article numbering
pat_num_seq=re.compile(r'([0-9]+ [,.:/] )+[0-9]+')
### e-mail address
#pat_num=re.compile(ur'[a-zA-Z]+[ ]?
def trivial_detokenize_indic(s):
"""
A trivial detokenizer
- decides whether punctuation attaches to left/right or both
- handles number sequences
- smart handling of quotes
returns a detokenized string
"""
### some normalizations
#numbers and dates
new_s=''
prev=0
for m in pat_num_seq.finditer(s):
start=m.start()
end=m.end()
if start>prev:
new_s=new_s+s[prev:start]
new_s=new_s+s[start:end].replace(' ','')
prev=end
new_s=new_s+s[prev:]
s=new_s
### consective single quotes or backslashes become double quotes
#s=s.replace("' '", "''")
#s=s.replace("` `", '``')
s=pat_lra.sub('\\1',s)
s=pat_la.sub('\\1',s)
s=pat_ra.sub('\\1',s)
# assumes well formedness of quotes and alternates between right and left attach
alt_attach='\'"`'
for punc in alt_attach:
cnt=0
out_str=[]
for c in s:
if c == punc:
if cnt%2==0:
out_str.append('@RA')
else:
out_str.append('@LA')
cnt+=1
else:
out_str.append(c)
s=''.join(out_str).replace('@RA ',punc).replace(' @LA',punc
).replace('@RA',punc).replace('@LA',punc)
return s
def trivial_detokenize(s,lang='hi'):
"""
Trivial tokenizer for languages in the Indian sub-continent
"""
if lang=='ur':
raise IndicNlpException('No detokenizer available for Urdu')
else:
return trivial_detokenize_indic(s)
if __name__ == '__main__':
if len(sys.argv)<4:
print("Usage: python indic_detokenize.py <infile> <outfile> <language>")
sys.exit(1)
with open(sys.argv[1],'r', encoding='utf-8') as ifile:
with open(sys.argv[2],'w', encoding='utf-8') as ofile:
for line in ifile:
detokenized_line=trivial_detokenize(line,sys.argv[3])
ofile.write(detokenized_line)
|
import datetime
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.paginator import InvalidPage
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ngettext
from django.views.generic import ListView, TemplateView
from wagtail.admin import messages
from wagtail.admin.views.mixins import SpreadsheetExportMixin
from wagtail.contrib.forms.forms import SelectDateForm
from wagtail.contrib.forms.utils import get_forms_for_user
from wagtail.core.models import Locale, Page
def get_submissions_list_view(request, *args, **kwargs):
"""Call the form page's list submissions view class"""
page_id = kwargs.get("page_id")
form_page = get_object_or_404(Page, id=page_id).specific
return form_page.serve_submissions_list_view(request, *args, **kwargs)
class SafePaginateListView(ListView):
"""Listing view with safe pagination, allowing incorrect or out of range values"""
paginate_by = 20
page_kwarg = "p"
def paginate_queryset(self, queryset, page_size):
"""Paginate the queryset if needed with nice defaults on invalid param."""
paginator = self.get_paginator(
queryset,
page_size,
orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty(),
)
page_kwarg = self.page_kwarg
page_request = (
self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 0
)
try:
page_number = int(page_request)
except ValueError:
if page_request == "last":
page_number = paginator.num_pages
else:
page_number = 0
try:
if page_number > paginator.num_pages:
page_number = paginator.num_pages # page out of range, show last page
page = paginator.page(page_number)
except InvalidPage:
page = paginator.page(1)
finally:
return paginator, page, page.object_list, page.has_other_pages()
class FormPagesListView(SafePaginateListView):
"""Lists the available form pages for the current user"""
template_name = "wagtailforms/index.html"
context_object_name = "form_pages"
def get_queryset(self):
"""Return the queryset of form pages for this view"""
queryset = get_forms_for_user(self.request.user)
if self.locale:
queryset = queryset.filter(locale=self.locale)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, str):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset
def get(self, request, *args, **kwargs):
self.locale = None
enable_locale_filter = getattr(settings, "WAGTAIL_I18N_ENABLED", False)
if enable_locale_filter:
if request.GET.get("locale"):
self.locale = get_object_or_404(
Locale, language_code=request.GET["locale"]
)
else:
self.locale = Locale.get_default()
return super().get(request, *args, **kwargs)
def get_context_data(self, *, object_list=None, **kwargs):
locale_context = {"locale": None, "translations": []}
if self.locale:
url = reverse("wagtailforms:index")
locale_context = {
"locale": self.locale,
"translations": [
{"locale": locale, "url": url + "?locale=" + locale.language_code}
for locale in Locale.objects.all().exclude(pk=self.locale.pk)
],
}
context = super().get_context_data(object_list=object_list, **kwargs)
context.update(locale_context)
return context
class DeleteSubmissionsView(TemplateView):
"""Delete the selected submissions"""
template_name = "wagtailforms/confirm_delete.html"
page = None
submissions = None
success_url = "wagtailforms:list_submissions"
def get_queryset(self):
"""Returns a queryset for the selected submissions"""
submission_ids = self.request.GET.getlist("selected-submissions")
submission_class = self.page.get_submission_class()
return submission_class._default_manager.filter(id__in=submission_ids)
def handle_delete(self, submissions):
"""Deletes the given queryset"""
count = submissions.count()
submissions.delete()
messages.success(
self.request,
ngettext(
"One submission has been deleted.",
"%(count)d submissions have been deleted.",
count,
)
% {"count": count},
)
def get_success_url(self):
"""Returns the success URL to redirect to after a successful deletion"""
return self.success_url
def dispatch(self, request, *args, **kwargs):
"""Check permissions, set the page and submissions, handle delete"""
page_id = kwargs.get("page_id")
if not get_forms_for_user(self.request.user).filter(id=page_id).exists():
raise PermissionDenied
self.page = get_object_or_404(Page, id=page_id).specific
self.submissions = self.get_queryset()
if self.request.method == "POST":
self.handle_delete(self.submissions)
return redirect(self.get_success_url(), page_id)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""Get the context for this view"""
context = super().get_context_data(**kwargs)
context.update(
{
"page": self.page,
"submissions": self.submissions,
}
)
return context
class SubmissionsListView(SpreadsheetExportMixin, SafePaginateListView):
"""Lists submissions for the provided form page"""
template_name = "wagtailforms/index_submissions.html"
context_object_name = "submissions"
form_page = None
ordering = ("-submit_time",)
ordering_csv = ("submit_time",) # keep legacy CSV ordering
orderable_fields = (
"id",
"submit_time",
) # used to validate ordering in URL
select_date_form = None
def dispatch(self, request, *args, **kwargs):
"""Check permissions and set the form page"""
self.form_page = kwargs.get("form_page")
if not get_forms_for_user(request.user).filter(pk=self.form_page.id).exists():
raise PermissionDenied
self.is_export = self.request.GET.get("export") in self.FORMATS
if self.is_export:
self.paginate_by = None
data_fields = self.form_page.get_data_fields()
# Set the export fields and the headings for spreadsheet export
self.list_export = [field for field, label in data_fields]
self.export_headings = dict(data_fields)
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
"""Return queryset of form submissions with filter and order_by applied"""
submission_class = self.form_page.get_submission_class()
queryset = submission_class._default_manager.filter(page=self.form_page)
filtering = self.get_filtering()
if filtering and isinstance(filtering, dict):
queryset = queryset.filter(**filtering)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, str):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset
def get_paginate_by(self, queryset):
"""Get the number of items to paginate by, or ``None`` for no pagination"""
if self.is_export:
return None
return self.paginate_by
def get_validated_ordering(self):
"""Return a dict of field names with ordering labels if ordering is valid"""
orderable_fields = self.orderable_fields or ()
ordering = {}
if self.is_export:
# Revert to CSV order_by submit_time ascending for backwards compatibility
default_ordering = self.ordering_csv or ()
else:
default_ordering = self.ordering or ()
if isinstance(default_ordering, str):
default_ordering = (default_ordering,)
ordering_strs = self.request.GET.getlist("order_by") or list(default_ordering)
for order in ordering_strs:
try:
_, prefix, field_name = order.rpartition("-")
if field_name in orderable_fields:
ordering[field_name] = (
prefix,
"descending" if prefix == "-" else "ascending",
)
except (IndexError, ValueError):
continue # invalid ordering specified, skip it
return ordering
def get_ordering(self):
"""Return the field or fields to use for ordering the queryset"""
ordering = self.get_validated_ordering()
return [values[0] + name for name, values in ordering.items()]
def get_filtering(self):
"""Return filering as a dict for submissions queryset"""
self.select_date_form = SelectDateForm(self.request.GET)
result = {}
if self.select_date_form.is_valid():
date_from = self.select_date_form.cleaned_data.get("date_from")
date_to = self.select_date_form.cleaned_data.get("date_to")
if date_to:
# careful: date_to must be increased by 1 day
# as submit_time is a time so will always be greater
date_to += datetime.timedelta(days=1)
if date_from:
result["submit_time__range"] = [date_from, date_to]
else:
result["submit_time__lte"] = date_to
elif date_from:
result["submit_time__gte"] = date_from
return result
def get_filename(self):
"""Returns the base filename for the generated spreadsheet data file"""
return "{}-export-{}".format(
self.form_page.slug, datetime.datetime.today().strftime("%Y-%m-%d")
)
def render_to_response(self, context, **response_kwargs):
if self.is_export:
return self.as_spreadsheet(
context["submissions"], self.request.GET.get("export")
)
return super().render_to_response(context, **response_kwargs)
def to_row_dict(self, item):
"""Orders the submission dictionary for spreadsheet writing"""
row_dict = OrderedDict(
(field, item.get_data().get(field)) for field in self.list_export
)
return row_dict
def get_context_data(self, **kwargs):
"""Return context for view"""
context = super().get_context_data(**kwargs)
submissions = context[self.context_object_name]
data_fields = self.form_page.get_data_fields()
data_rows = []
context["submissions"] = submissions
if not self.is_export:
# Build data_rows as list of dicts containing model_id and fields
for submission in submissions:
form_data = submission.get_data()
data_row = []
for name, label in data_fields:
val = form_data.get(name)
if isinstance(val, list):
val = ", ".join(val)
data_row.append(val)
data_rows.append({"model_id": submission.id, "fields": data_row})
# Build data_headings as list of dicts containing model_id and fields
ordering_by_field = self.get_validated_ordering()
orderable_fields = self.orderable_fields
data_headings = []
for name, label in data_fields:
order_label = None
if name in orderable_fields:
order = ordering_by_field.get(name)
if order:
order_label = order[1] # 'ascending' or 'descending'
else:
order_label = "orderable" # not ordered yet but can be
data_headings.append(
{
"name": name,
"label": label,
"order": order_label,
}
)
context.update(
{
"form_page": self.form_page,
"select_date_form": self.select_date_form,
"data_headings": data_headings,
"data_rows": data_rows,
}
)
return context
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 08:28:34 2020
@author: Marcin
"""
from math import fmod
from src.globals import *
from timeit import default_timer as timer
from time import sleep
from collections import deque
import warnings
import pandas as pd
from tqdm import tqdm
# warnings.warn("Warning...........Message")
def normalize_angle_rad(angle):
Modulo = fmod(angle, 2 * np.pi) # positive modulo
if Modulo < -np.pi:
angle = Modulo + 2 * np.pi
elif Modulo > np.pi:
angle = Modulo - 2 * np.pi
else:
angle = Modulo
return angle
class loop_timer():
""" Simple game loop timer that sleeps for leftover time (if any) at end of each iteration"""
LOG_INTERVAL_SEC = 10
NUM_SAMPLES = 1000
def __init__(self, rate_hz: float = None, dt_target: float = None, do_diagnostics: bool = False) -> None:
""" Make a new loop_timer, specifying the target frame rate in Hz or time interval dt_target in seconds
:param rate_hz: the target loop rate in Hz. The rate can be changed anytime by modifying rate_hz.
:param dt_target: the time interval dt in seconds. It can be changed anytime by modifying dt.
:returns: new instance of loop_timer
"""
self.rate_hz = None
self.dt_target = None
if (rate_hz is not None) and (dt_target is not None):
raise Exception('You should provide either rate_hz OR dt, not both!')
elif (rate_hz is None) and (dt_target is None):
raise Exception('You must provide either rate_hz or dt!')
elif (rate_hz is None) and (dt_target is not None):
self.dt_target = dt_target # rate_hz set automatically
elif (self.rate_hz is not None) and (self.dt_target is None):
self.rate_hz = rate_hz # dt_target set automatically
self.first_call_done = False
self.last_iteration_start_time = 0
self.do_diagnostics = do_diagnostics
self.last_log_time = 0
self.circ_buffer_dt = deque(iterable=np.zeros(self.NUM_SAMPLES), maxlen=self.NUM_SAMPLES)
self.circ_buffer_leftover = deque(iterable=np.zeros(self.NUM_SAMPLES), maxlen=self.NUM_SAMPLES)
self.circ_buffer_dt_real = deque(iterable=np.zeros(50), maxlen=50)
@property
def rate_hz(self):
return self._rate_hz
@property
def dt_target(self):
return self._dt_target
@rate_hz.setter
def rate_hz(self, new_rate):
if new_rate is None:
self._rate_hz = None
elif new_rate > 0.0:
self._rate_hz = float(new_rate)
self._dt_target = 1.0 / new_rate
else:
raise Exception('{} is not valid target rate!'.format(new_rate))
@dt_target.setter
def dt_target(self, new_dt):
if new_dt is None:
self._dt_target = None
elif new_dt > 0.0:
self._dt_target = float(new_dt)
self._rate_hz = 1.0 / new_dt
elif new_dt == 0:
self._dt_target = float(new_dt)
self._rate_hz = np.inf
else:
raise Exception('{} is not valid target dt!'.format(new_dt))
def start_loop(self):
""" should be called to initialize the timer just before the entering the first loop"""
self.last_iteration_start_time = timer()
self.last_log_time = self.last_iteration_start_time
self.first_call_done = True
def sleep_leftover_time(self):
"""
Call at the very end of each iteration.
"""
now = timer()
if not self.first_call_done:
raise Exception('Loop timer was not initialized properly')
dt = (now - self.last_iteration_start_time)
leftover_time = self.dt_target - dt
if leftover_time > 0:
sleep(leftover_time)
dt_real = self.dt_target
else:
dt_real = dt
self.last_iteration_start_time = timer()
self.circ_buffer_dt.append(dt)
self.circ_buffer_leftover.append(leftover_time)
self.circ_buffer_dt_real.append(dt_real)
# Main functionality ends here. Lines below are just for diagnostics
if self.do_diagnostics:
if now - self.last_log_time > self.LOG_INTERVAL_SEC:
self.last_log_time = now
if leftover_time > 0:
print('Loop_timer slept for {:.3f} ms leftover time for desired loop interval {:.3f} ms.'
.format(leftover_time * 1000, self.dt_target * 1000))
else:
if self.dt_target == 0.0:
warnings.warn('\nYou target the maximal simulation speed, '
'the average time for simulation step is {:.3f} ms.\n'
.format(-leftover_time * 1000))
else:
warnings.warn('\nTime ran over by {:.3f}ms the allowed time of {:.3f} ms.\n'
.format(-leftover_time * 1000, self.dt_target * 1000))
print('Average leftover time is {:.3f} ms and its variance {:.3f} ms'
.format(np.mean(self.circ_buffer_leftover) * 1000,
np.std(self.circ_buffer_leftover) * 1000))
print('Average total time of calculations is {:.3f} ms and its variance {:.3f} ms'
.format(np.mean(self.circ_buffer_dt) * 1000,
np.std(self.circ_buffer_dt) * 1000))
def Generate_Experiment(MyCart, exp_len=random_length_globals, dt=dt_main_simulation_globals, track_complexity=N_globals, csv=None, mode=1):
"""
This function runs a random CartPole experiment
and returns the history of CartPole states, control inputs and desired cart position
:param MyCart: instance of CartPole containing CartPole dynamics
:param exp_len: How many time steps should the experiment have
(default: 64+640+1 this format is used as it can )
"""
# Set CartPole in the right (automatic control) mode
MyCart.set_mode(mode) # 1 - you are controlling with LQR, 2- with do-mpc
MyCart.save_data = 1
MyCart.use_pregenerated_target_position = 1
MyCart.reset_dict_history()
MyCart.reset_state()
# Generate new random function returning desired target position of the cart
MyCart.dt = dt
MyCart.random_length = exp_len
MyCart.N = track_complexity # Complexity of generated target position track
MyCart.Generate_Random_Trace_Function()
# Randomly set the initial state
MyCart.time = 0.0
MyCart.s.position = np.random.uniform(low=-MyCart.HalfLength / 2.0,
high=MyCart.HalfLength / 2.0)
MyCart.s.positionD = np.random.uniform(low=-10.0,
high=10.0)
MyCart.s.angle = np.random.uniform(low=-17.5 * (np.pi / 180.0),
high=17.5 * (np.pi / 180.0))
MyCart.s.angleD = np.random.uniform(low=-15.5 * (np.pi / 180.0),
high=15.5 * (np.pi / 180.0))
MyCart.u = np.random.uniform(low=-0.9 * MyCart.p.u_max,
high=0.9 * MyCart.p.u_max)
# Target position at time 0 (should be always 0)
MyCart.target_position = MyCart.random_track_f(MyCart.time) # = 0
# Run the CartPole experiment for number of time
for i in tqdm(range(int(exp_len)-1)):
# Print an error message if it runs already to long (should stop before)
if MyCart.time > MyCart.t_max_pre:
raise Exception('ERROR: It seems the experiment is running too long...')
MyCart.update_state()
MyCart.augment_dict_history()
data = pd.DataFrame(MyCart.dict_history)
if csv is not None:
MyCart.save_history_csv(csv_name=csv)
MyCart.reset_dict_history()
MyCart.reset_state()
return data
|
letter = '''
Dear {salutation} {name},
Thank you for your letter. We are sorry that our {product} {verbed} in your {room}.
Please note that it should never be used in a {room}, especially near any {animals}.
Send us your reciept and {amount} for shipping and handling. We will send you another
{product} that, in our tests, is {percent}% less likely to have {verbed}.
Thank you for your support.
Sincerely,
{spokesman}
{job_title}
'''
print (letter)
|
# coding: utf-8
__version__ = '0.1.2'
from .decorator import scheduledTask
|
from django.contrib import admin
from .models import Notification_Student
# Register your models here.
admin.site.register(Notification_Student) |
class Design:
def __init__(self,style,objects):
self.style = style
self.objects = objects
self.furniture = 0
self.place = 'place'
def __str__(self):
return (f'Stil kvartiry - {self.style}, Predmety neobhodimosti - {self.objects}, '
f'Mebel - {self.furniture}, Mesto paspolojeniya kvartiry: {self.place}')
design = Design('Minimalizma','Tolko nujnie predmety')
design.furniture = 'VSE 4TO VY HOTELI'
design.place = 'Djal'
print(design)
|
from django.db import migrations
def operation_document_file_filename_copy(apps, schema_editor):
cursor_main = schema_editor.connection.cursor()
cursor_document_file = schema_editor.connection.cursor()
query_document_file = '''
UPDATE {documents_documentfile} SET {filename} = %s WHERE {documents_documentfile}.{id} = %s;
'''.format(
documents_documentfile=schema_editor.connection.ops.quote_name(
name='documents_documentfile'
),
filename=schema_editor.connection.ops.quote_name(name='filename'),
id=schema_editor.connection.ops.quote_name(name='id')
)
query = '''
SELECT
{documents_document}.{label},
{documents_documentfile}.{id}
FROM {documents_documentfile}
INNER JOIN {documents_document} ON (
{documents_documentfile}.{document_id} = {documents_document}.{id}
)
'''.format(
document_id=schema_editor.connection.ops.quote_name(
name='document_id'
),
documents_document=schema_editor.connection.ops.quote_name(
name='documents_document'
),
documents_documentfile=schema_editor.connection.ops.quote_name(
name='documents_documentfile'
),
id=schema_editor.connection.ops.quote_name(name='id'),
label=schema_editor.connection.ops.quote_name(name='label')
)
cursor_main.execute(query)
for row in cursor_main.fetchall():
cursor_document_file.execute(
query_document_file, row
)
class Migration(migrations.Migration):
dependencies = [
('documents', '0066_documentfile_filename'),
]
operations = [
migrations.RunPython(
code=operation_document_file_filename_copy,
reverse_code=migrations.RunPython.noop
),
]
|
"""Some utility functions for commands (e.g., for cmdline handling)."""
# Authors: Yaroslav Halchenko <debian@onerussian.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import glob
import importlib
import os
import os.path as op
from optparse import OptionParser
import sys
import MNE.mne
def _add_verbose_flag(parser):
parser.add_option("--verbose", dest='verbose',
help="Enable verbose mode (printing of log messages).",
default=None, action="store_true")
def load_module(name, path):
"""Load module from .py/.pyc file.
Parameters
----------
name : str
Name of the module.
path : str
Path to .py/.pyc file.
Returns
-------
mod : module
Imported module.
"""
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(name, path)
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None):
"""Create OptionParser with cmd specific settings (e.g., prog value)."""
# Fetch description
mod = load_module('__temp', cmdpath)
if mod.__doc__:
doc, description, epilog = mod.__doc__, None, None
doc_lines = doc.split('\n')
description = doc_lines[0]
if len(doc_lines) > 1:
epilog = '\n'.join(doc_lines[1:])
# Get the name of the command
command = os.path.basename(cmdpath)
command, _ = os.path.splitext(command)
command = command[len(prog_prefix) + 1:] # +1 is for `_` character
# Set prog
prog = prog_prefix + ' {}'.format(command)
# Set version
if version is None:
version = mne.__version__
# monkey patch OptionParser to not wrap epilog
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(prog=prog,
version=version,
description=description,
epilog=epilog, usage=usage)
return parser
def main():
"""Entrypoint for mne <command> usage."""
mne_bin_dir = op.dirname(op.dirname(__file__))
valid_commands = sorted(glob.glob(op.join(mne_bin_dir,
'commands', 'mne_*.py')))
valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands]
def print_help(): # noqa
print("Usage : mne command options\n")
print("Accepted commands :\n")
for c in valid_commands:
print("\t- %s" % c)
print("\nExample : mne browse_raw --raw sample_audvis_raw.fif")
print("\nGetting help example : mne compute_proj_eog -h")
if len(sys.argv) == 1 or "help" in sys.argv[1] or "-h" in sys.argv[1]:
print_help()
elif sys.argv[1] == "--version":
print("MNE %s" % mne.__version__)
elif sys.argv[1] not in valid_commands:
print('Invalid command: "%s"\n' % sys.argv[1])
print_help()
else:
cmd = sys.argv[1]
cmd = importlib.import_module('.mne_%s' % (cmd,), 'mne.commands')
sys.argv = sys.argv[1:]
cmd.run()
|
#!/usr/bin/python
from socket import *
from django.core.management import BaseCommand
class BroadcastListener(BaseCommand):
def handle(self, *args, **options):
cs = socket(AF_INET, SOCK_DGRAM)
cs.bind(('192.168.8.120', 988))
while True:
data, address = cs.recvfrom(1024)
print ':received:', data, "from ", address
if data == "HLK":
cs.sendto("HLK-RM(V1.78(Jul 23))", address)
Command = BroadcastListener
|
import socket
import struct
import textwrap
PROTOCOLS = {1: 'ICMP', 2: 'IGMP', 6: 'TCP', 9: 'IGP', 17: 'UDP'}
BUF = 65535
def textwrapping(prefix, string, size=80):
size -= len(prefix)
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([prefix+line for line in textwrap.wrap(string, size)])
def parse_mac(mac):
byte_str = map('{:02x}'.format, mac)
return ':'.join(byte_str).upper()
def tcp_parse(packet):
res = {}
(res['src_port'], res['dest_port'], res['sequence'], res['acknowledgment'], ORF) = struct.unpack('! H H L L H', packet[:14])
offset = (ORF >> 12) * 4
res['flag_urg'] = (ORF & 32) >> 5
res['flag_ack'] = (ORF & 16) >> 4
res['flag_psh'] = (ORF & 8) >> 3
# ...
return packet[offset:], res
def udp_parse(packet):
res = {}
(res['src_port'], res['dest_port'], res['size']) = struct.unpack('! H H 2x H', packet[:8])
return packet[4:], res
def ether_frame(packet):
res = {}
(dest, src, ether_type) = struct.unpack('! 6s 6s H', packet[:14])
res['destination_mac'] = parse_mac(dest)
res['src_mac'] = parse_mac(src)
res['ethernet_type'] = socket.htons(ether_type)
return packet[14:], res
def icmp_parse(packet):
res = {}
(res['type'], res['code'], res['checksum']) = struct.unpack('! B B H', raw_data[:4])
return packet[4:], res
def header_parse(packet):
res = {}
version_header = packet[0]
res['version'] = version_header >> 4
header_length = (version_header & 15) * 4
(res['ttl'], pro, src, target) = struct.unpack('! 8x B B 2x 4s 4s', packet[:20])
res['src_ip'] = '.'.join(map(str, src))
res['destination_ip'] = '.'.join(map(str, target))
res['header_length'] = str(header_length) + 'byte'
try:
res['destination_host'] = socket.gethostbyaddr(res['destination_ip'])[0]
except:
pass
try:
res['src_host'] = socket.gethostbyaddr(res['src_ip'])[0]
except:
pass
res['protocol'] = PROTOCOLS.get(pro, 'unknown')
return packet[header_length:], res
with socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0800)) as conn:
conn.bind(('wlan0', 0x0003))
while True:
recv = conn.recvfrom(BUF)
(raw_data, ether) = ether_frame(recv[0])
print('Ethernet frame:')
for i in ether:
print(f"\t{i}: {ether[i]}")
print('Header:')
(raw_data, header) = header_parse(raw_data)
for i in header:
print(f"\t{i}: {header[i]}")
# if header['protocol'] == 'TCP':
# data, tcp = tcp_parse(raw_data)
# print('TCP:')
# for i in tcp:
# print(f"\t{i}: {tcp[i]}")
# if len(data):
# print('TCP data:')
# if 80 in (tcp['src_port'], tcp['dest_port']):
# try:
# data = data.decode('u8')
# for line in data.split('\n'):
# print(f"\t{line}")
# except:
# print(textwrapping('\t', data))
# else:
# print(textwrapping('\t', data))
# elif header['protocol'] == 'UDP':
# (data, udp) = udp_parse(raw_data)
# print('UDP:')
# for i in udp:
# print(f"\t{i}: {udp[i]}")
# print('UDP data:')
# print(textwrapping('\t', data))
# elif header['protocol'] == 'ICMP':
# (data, icmp) = icmp_parse(raw_data)
# print('ICMP:')
# for i in icmp:
# print(f"\t{i}: {icmp[i]}")
# print('ICMP data:')
# print(textwrapping('\t', data))
# else:
# print('Other protocols:')
# print(textwrapping('\t', raw_data))
# print('-'*40) |
from floodsystem.geo import stations_by_distance
from floodsystem.stationdata import build_station_list
def run():
stations = build_station_list()
lists = stations_by_distance(stations, (52.2053, 0.1218))
print("the closest 10 stations are")
print(lists[:10])
print("the farther 10 stations are")
print(lists[-10:])
if __name__ == "__main__":
print("*** Task 1B: CUED Part IA Flood Warning System ***")
run() |
"""
Twisted PythonCard PbEchoClient
"""
from PythonCard import model, twistedModel
from twisted.cred.credentials import UsernamePassword
from twisted.spread import pb
from twisted.internet import reactor
from PythonCard.templates.dialogs.loginDialog import loginDialog
class DefinedError(pb.Error):
pass
class EchoClient(model.Background):
"""
TPC PB Echo GUI Panel
"""
def on_initialize(self, event):
self.pbfactory = pb.PBClientFactory()
# KEA the Send button and SendTextField should be disabled
# until a successful login
self.components.SendTextField.enabled = False
self.components.buttonSend.enabled = False
def on_SendTextField_keyPress(self, event):
# if user presses return, send text
if event.keyCode == 13:
self.sendAndClearText()
else:
event.skip()
# KEA 2004-04-27
# this should popup a custom dialog
# to prompt the user for the host, port number,
# username, and password
# with defaults of "localhost", pb.portno
# "guest", and "guest"
# this dialog is going to be pretty common so we'll stick
# in PythonCard/templates/dialogs to simplify usage from
# other twisted apps
def on_buttonLogin_mouseClick(self, event):
result = loginDialog(self, port=pb.portno)
if result.accepted:
# verify input here?
host = result.host
port = result.port
username = result.username
password = result.password
reactor.connectTCP(host, port, self.pbfactory)
self.pbfactory.login(
UsernamePassword(username, password)
).addCallbacks(self.loginsuccess,
self.loginfailure)
def loginsuccess(self, perspective):
self.statusBar.text = 'Connected'
self.components.SendTextField.enabled = True
self.components.buttonSend.enabled = True
self.components.SendTextField.setFocus()
self.perspective = perspective
def loginfailure(self, error):
self.displaycontent("Error on login: %s" % error)
def sendAndClearText(self):
fld = self.components.SendTextField
self.perspective.callRemote('echo', fld.text
).addCallbacks(self.echosuccess,
self.echofailure)
fld.text = ""
def on_buttonSend_mouseClick(self, event):
self.sendAndClearText()
def echosuccess(self, message):
self.displaycontent(message)
def echofailure(self, error):
t = error.trap(DefinedError)
self.displaycontent("error received"+t)
def displaycontent(self, text):
self.components.ReceivedTextArea.appendText(text + "\n")
if __name__ == '__main__':
app = twistedModel.TwistedApplication(EchoClient)
app.MainLoop()
|
import os
def write_additional_table_file(table_folder):
# get all the file names in the table folder
file_names = os.listdir(table_folder)
file_names.sort()
# make sure we have the default table
default_name = 'default.csv'
if default_name not in file_names:
raise RuntimeError("Did not find the default table ('default.csv') in the table folder %s" % table_folder)
# don't write anything if we don't have additional tables
if len(file_names) == 1:
return
# write file for the additional tables
out_file = os.path.join(table_folder, 'additional_tables.txt')
with open(out_file, 'w') as f:
for name in file_names:
ext = os.path.splitext(name)[1]
# only add csv files
if ext != '.csv':
continue
# don't add the default table
if name == 'default.csv':
continue
f.write(name + '\n')
|
import pandas as pd
diction = {"A": 1,
"B": 2,
"C": 3,
"D": 4}
df = pd.DataFrame(diction)
print(df)
print('this is an edit')
|
import sys
sys.path.append('build')
import AvTrajectoryPlanner as av
import time
scenario_file = "benchmark/speed_test_scenario.txt"
with open(scenario_file) as file:
envFileStr = file.read()
planner = av.Planner()
planner.loadFromJson(envFileStr)
start = time.time()
# run your code
for i in range(100):
vehicleTrajectory = planner.solveTrajectory()
end = time.time()
elapsed = end - start
print("Benchmark results:")
print(100.0/elapsed)
|
#!/usr/bin/env python3
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import unittest
from common.common_arguments import build_common_argument_parser
class CommonArgumentsTest(unittest.TestCase):
def test_build_common_argument_parser(self):
arg_parser = build_common_argument_parser()
arg_parser.parse_args([
"--normal_benchmark_tool_dir=/tmp", "--traced_benchmark_tool_dir=/tmp",
"--trace_capture_tool=/bin/ls", "."
])
def test_build_common_argument_parser_check_build_dir(self):
arg_parser = build_common_argument_parser()
with self.assertRaises(SystemExit):
arg_parser.parse_args(["nonexistent"])
def test_build_common_argument_parser_check_normal_benchmark_tool(self):
arg_parser = build_common_argument_parser()
with self.assertRaises(SystemExit):
arg_parser.parse_args(["--normal_benchmark_tool_dir=nonexistent", "."])
def test_build_common_argument_parser_check_traced_benchmark_tool(self):
arg_parser = build_common_argument_parser()
with self.assertRaises(SystemExit):
arg_parser.parse_args(["--traced_benchmark_tool_dir=nonexistent", "."])
def test_build_common_argument_parser_check_trace_capture_tool(self):
arg_parser = build_common_argument_parser()
with self.assertRaises(SystemExit):
arg_parser.parse_args(["--trace_capture_tool=nonexistent", "."])
if __name__ == "__main__":
unittest.main()
|
import unittest
from escore_python.observers import MockDataStoreObserver, TestCaseObservable
class DsToDsTest(unittest.TestCase, TestCaseObservable):
def setUp(self):
observers = [MockDataStoreObserver()]
super(DsToDsTest, self).set_up_observers(observers)
def test_execute(self):
from escore import process_manager, DataStore
from escore.core_ops.links import DsToDs
ds = process_manager.service(DataStore)
ds['test'] = 1
ds_to_ds = DsToDs()
ds_to_ds.read_key = 'test'
ds_to_ds.store_key = 'moved_test'
ds_to_ds.execute()
self.assertIn('moved_test', ds, 'new key not in datastore')
self.assertNotIn('test', ds, 'old key still in datastore')
self.assertEqual(ds['moved_test'], 1, 'key-value pair not consistent')
def tearDown(self):
super(DsToDsTest, self).tear_down_observers()
from escore.core import execution
execution.reset_eskapade()
|
# third party imports
from rest_framework import serializers
# covid_app imports
from src.covid_project.covid_app.api.v1.serializers.region_serializer import RegionSerializer
class EstimatorSerializer(serializers.Serializer):
region = RegionSerializer()
periodType = serializers.CharField(max_length=50000)
timeToElapse: serializers.IntegerField()
reportedCases = serializers.IntegerField()
population = serializers.IntegerField()
totalHospitalBeds = serializers.IntegerField()
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
|
'''
Class that gathers informations from XmlDictConfig class
This class will parse all informations stacked up in XmlDictConfig instance
and will create a file based of it.
'''
import xml.etree.ElementTree as ET
import errno
import os.path
try:
from builder.xml_converter import XmlDictConfig
from builder.conf import FILEERROR, ATTRIBERROR, DEFAULTTITLE, PYERROR,\
PYCONFERROR, PYERROREXISTS, PYCONFERROREXISTS
from builder.file_constructor import FileConstructor
from builder.recursive_packager import RecursivePackager
except:
from xml_converter import XmlDictConfig
from conf import FILEERROR, ATTRIBERROR, DEFAULTTITLE, PYERROR,\
PYCONFERROR, PYERROREXISTS, PYCONFERROREXISTS
from file_constructor import FileConstructor
from recursive_packager import RecursivePackager
class ParseIntoCreate:
"""This class is meant to create a tkinter code. It takes as argument
uifile a .ui file, created on pygubu-designer It will convert and create a
new document coded in python in newfile.
if you don't give uifile any argument, it will load a default template
you can consult in your target path.
newfile is the file that's going to be created.
defaultconf is the file that will include all variables for newfile.
For more informations, please consult the README.md file.
Have fun !
"""
def __init__(self, newfile, uifile="tests/template_ui_file.ui", defaultconf="conf.py"):
# newfile is the file that this class will create
self.newfile = newfile
# ui file is the file that's going to be converted
self.uifile = uifile
# defaultconf is the file that will be created and will include all
# variables for newfile
self.defaultconf = defaultconf
# getting all informations from ui file
try:
tree = ET.parse(self.uifile)
root = tree.getroot()
except OSError as er:
#if file isn't an xml file
if er.errno == errno.ENOENT:
print(FILEERROR)
return
try:
# Converting xml data into dictionnary
self.xmldict = XmlDictConfig(root)
except UnboundLocalError:
# if file can't be read
print(ATTRIBERROR)
return
# Loading constructor class
self.constructor = FileConstructor()
# Converting object into dictionnary
self.creating_new_dicts()
# self.realdict is now a packaged list
self.real_list = RecursivePackager(self.realdict)
self.real_list = self.real_list.return_converted_list()
# dictionnary of text for conf.py file
# List valors goes like this : [["LABEL_FRAME_TEXT", "some text"],
# ...
# ]
self.conf_text = []
# Adding erros if self.newfile or self.default_conf isn't .py
if self.newfile[-3:] != ".py":
print(PYERROR)
return
if self.defaultconf[-3:] != ".py":
print(PYCONFERROR)
return
# Adding erros if self.newfile or self.default_conf already exists
if os.path.isfile(self.newfile):
print(PYERROREXISTS)
return
if os.path.isfile(self.defaultconf):
print(PYCONFERROREXISTS)
return
# Running creating_new_file()
self.creating_new_file()
def creating_new_dicts(self):
"""This function is taking data inside xmldict and converts them into a
new dictionnary.
XmlDictConfig looks like a dictionnary, but it renders an
object. This class also prevents the code from being spread out
in the new file.
"""
# removing useless data
self.xmldict = self.xmldict["object"]
# Creating a new dictionnary from self.xmldict
# xmldict is actually an instance of XmlDictConfig
# class, and by no mean a dictionnary
self.realdict = {}
# Adding xmldict values to realdict
# cant do for x, y for some reasons
for keys in self.xmldict:
self.realdict[keys] = self.xmldict[keys]
def creating_new_file(self):
"""This function takes self.realdict datas and converts them into code,
using conf.py file as database."""
widget_list = self.getting_master_widgets()
# Fullfilling self.newfile with data
with open(self.newfile, "w") as newdoc:
#Documentation
# Removing .py in self.defaultconf using [:-3]
newdoc.write(self.constructor.create_stock_class(self.defaultconf[:-3]))
#Creating class and init
self.conf_text.append(["TITLE", DEFAULTTITLE])
newdoc.write(self.constructor.create_class_and_init("text."+"TITLE"))
# Adding functions in init
for widgets in widget_list:
# If widget is master widget
# and instanciates from tk()
if widgets[1]:
newdoc.write(self.constructor.add_main_widget_function_to_init(widgets[0]))
newdoc.write(self.constructor.add_widgets_to_master_widgets_func(widgets[0]))
else:
newdoc.write(self.constructor.add_widgets_to_master_widgets_func(widgets[0]))
# Creating functions, fulfilling them
# Know which widgets gets two functions passes
for widgets in widget_list:
# If widgets[0] is an instance of Tk()
if widgets[1]:
# Create master widget in its own function
self.creating_function(self.who_s_your_master(widgets[0], True),
newdoc,
True)
# Add slave widgets
self.creating_function(self.who_s_your_master(widgets[0]),
newdoc)
# Add launch function
newdoc.write(self.constructor.add_launch_function())
# Add if name == main function
newdoc.write(self.constructor.add_name_eq_main())
# Finally
newdoc.close()
###########################
# Now we can finally
# create document for conf
###########################
self.creating_conf_file()
def who_s_your_master(self, arg1, master=False):
"""This function takes arg1, parses self.real_list and returns a list
only containing widgets that have arg1 as master.
Optionnal argument as "master" is given if we're looking for all
informations of arg1 only.
"""
new_list = []
# If arg1 is instance of Tk()
if master:
for widgets in self.real_list:
if arg1 == widgets[1]:
new_list.append(widgets)
# If we're looking for all widgets that arg1 has
elif not master:
for widgets in self.real_list:
if arg1 == widgets[0]:
new_list.append(widgets)
# Return new_list once completed
return new_list
def creating_function(self, list_widgets, document, master=False):
"""This function helps creating_new_file function. It parses
RecursivePackager result to create a function for the new file.
Change master to True ONLY if you need to create a master
function.
"""
# If master = True
# Unique case
if master:
document.write(self.constructor.add_master_function(list_widgets[0][1]))
elif not master:
document.write(self.constructor.add_widgets_function(list_widgets[0][0]))
# Create loop, adding all widgets in list_widgets inside the function
for widgets in list_widgets:
# Add id and class for current widget
# if master = True, no arg3
if master:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2]))
# Add arg3 if master = False and widgets[0] is not null
elif not master and widgets[0]:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2],
"self." + widgets[0]))
elif not master and not widgets[0]:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2]))
if widgets[3]:
# if there is text in properties
if len(widgets[3]) > 1:
# Add text in conf_text list
self.conf_text.append([self.cap_text(widgets[1]), widgets[3][1]])
document.write(self.constructor.add_widget_conf(widgets[1],
widgets[3][0].format("text." + self.cap_text(widgets[1]))))
elif len(widgets[3]) == 1:
document.write(self.constructor.add_widget_conf(widgets[1],
widgets[3][0]))
if widgets[4]:
document.write(self.constructor.add_widget_loc(widgets[1],
widgets[4][0]))
# If _propagate == False
# Add place_propagate(0)
if len(widgets[4]) > 1:
document.write(self.constructor.add_widget_loc(widgets[1],
widgets[4][1]))
# Add spaces between widgets / functions
# for each iteration
document.write("\n")
def cap_text(self, arg):
"""This function takes arg and converts it to ARG_TEXT.
This function is usefull for the conf.py text.
"""
return arg.upper() + "_TEXT"
def getting_master_widgets(self):
"""This function works with creating_functions_for_new_file It returns
a list with all master widgets. Initial list is self.real_list.
Returns valors like this : [[example_widget, True]...]
True means example_widget is a master widget that instanciates
directly from tk()
False means example_widget is an instance of another widget.
"""
return_list = []
# Loop that gets all master widgets
for valors in self.real_list:
if valors[0]not in return_list:
return_list.append(valors[0])
list_valors = []
# Checking which widget is main widget.
for masters in return_list:
for valors in self.real_list:
# Do not count [] empty list
if isinstance(masters, str):
if masters == valors[1] and not valors[0]:
list_valors.append([masters, True])
if masters == valors[1] and valors[0]:
list_valors.append([masters, False])
return list_valors
def creating_conf_file(self):
"""This function is going to create a conf file. Data are stocked in
the self.conf_text list They are gathered during the writing of newfile
process, in the creating_function function.
conf file name is by default conf.py Can be changed during class
creation, by changing defaultconf arg
"""
# Fullfilling self.defaultconf with data
with open(self.defaultconf, "w") as newconf:
# Documentation
newconf.write(self.constructor.add_intro_conf(self.newfile))
# Adding all variables and text for self.newfile file
for text in self.conf_text:
newconf.write(self.constructor.add_text(text[0], text[1]))
newconf.close()
if __name__ == '__main__':
# test to make sure everything is working properly
parser = ParseIntoCreate("newdocument.py", "tests/template_ui_file.ui")
|
from flask import Flask,request,url_for,g,render_template
from sqlalchemy import create_engine
import csv
import os
import pandas as pd
mysql_uri = 'mysql://root:2080kk757@localhost:3306/echart_data?charset=utf8'
engine = create_engine(mysql_uri)
with engine.connect() as conn, conn.begin():
data = pd.read_sql_table('tt', conn)
data_len = data.shape[0]
new_df = data[0:100]
print(new_df.to_html())
#
# app = Flask(__name__)
#
#
# @app.route('/')
# def index():
# return new_df.to_html()
#
#
# if '__main__' == __name__:
# app.run(debug=True) |
# -*- coding: utf-8 -*-
import requests
import json
from lxml import etree
class Crawl(object):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}
def get_price(self, item_id_inner):
url = 'https://p.3.cn/prices/mgets?callback=&skuIds=J_' + item_id_inner
print '该商品价格URL:', url
r = requests.get(url, headers=self.headers)
price = r.text
price = price[2:-4]
js = json.loads(str(price))
return js['p']
def get_name(self, item_id_inner):
url = 'https://item.jd.com/' + item_id_inner + '.html'
r = requests.get(url, headers=self.headers)
selector = etree.HTML(r.text)
name = selector.xpath("//*[@class='sku-name']/text()") # list
try:
name = name[0].strip()
except IndexError as e:
print('尝试第二种名称捕获方式')
try:
name = selector.xpath("//*[@id='name']/h1/text()")
name = name[0].strip()
except IndexError as e:
print('名称捕获失败')
return name
|
from execution.executeSentence import executeSentence
from execution.AST.sentence import *
from execution.AST.expression import *
def up():
print(1)
def SALES_TAX(SUBTOTAL: float) ->float:
t0=23
t1=5
t2=t0*t1
t3=6
t4=2
t5=t3/t4
t6=t2+t5
TOTAL:float=t6
print(1) |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training Keras model with multi-worker strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
def input_fn():
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(32)
return dataset
def main(args):
if len(args) < 2:
print('You must specify model_dir for checkpoints such as'
' /tmp/tfkeras_example/.')
return
model_dir = args[1]
print('Using %s to store checkpoints.' % model_dir)
# Define a Keras Model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# Compile the model.
optimizer = tf.train.GradientDescentOptimizer(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
tf.keras.backend.set_learning_phase(True)
# Define DistributionStrategies and convert the Keras Model to an
# Estimator that utilizes these DistributionStrateges.
# Evaluator is a single worker, so using MirroredStrategy.
config = tf.estimator.RunConfig(
experimental_distribute=tf.contrib.distribute.DistributeConfig(
train_distribute=tf.contrib.distribute.CollectiveAllReduceStrategy(
num_gpus_per_worker=2),
eval_distribute=tf.contrib.distribute.MirroredStrategy(
num_gpus_per_worker=2)))
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, config=config, model_dir=model_dir)
# Train and evaluate the model. Evaluation will be skipped if there is not an
# "evaluator" job in the cluster.
tf.estimator.train_and_evaluate(
keras_estimator,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(argv=sys.argv)
|
''' Count number of declines with uncertain dates.
'''
import os
import pandas as pd
import numpy as np
input_path="output/cohort.pickle"
output_path="output/cohort_pickle_checks.csv"
backend = os.getenv("OPENSAFELY_BACKEND", "expectations")
cohort = pd.read_pickle(input_path)
cohort = cohort.loc[pd.notnull(cohort["decl_first_dat"])]
cohort["decline date incorrect"] = np.where(cohort["decl_first_dat"] < "2020-12-08", 1, 0)
checks = cohort.groupby(["decline date incorrect"])["sex"].count()
checks = 100*checks/checks.sum()
print (checks)
#checks = cohort.agg({"max","min", "count"}).transpose()
checks.to_csv(f"{output_path}")
|
r"""
Finite dimensional semisimple algebras with basis
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2011-2015 Nicolas M. Thiery <nthiery at users.sf.net>
# 2014-2015 Aladin Virmaux <aladin.virmaux at u-psud.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.misc.cachefunc import cached_method
from .algebras import Algebras
from .semisimple_algebras import SemisimpleAlgebras
class FiniteDimensionalSemisimpleAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
"""
The category of finite dimensional semisimple algebras with a distinguished basis
EXAMPLES::
sage: from sage.categories.finite_dimensional_semisimple_algebras_with_basis import FiniteDimensionalSemisimpleAlgebrasWithBasis
sage: C = FiniteDimensionalSemisimpleAlgebrasWithBasis(QQ); C
Category of finite dimensional semisimple algebras with basis over Rational Field
This category is best constructed as::
sage: D = Algebras(QQ).Semisimple().FiniteDimensional().WithBasis(); D
Category of finite dimensional semisimple algebras with basis over Rational Field
sage: D is C
True
TESTS::
sage: TestSuite(C).run()
"""
_base_category_class_and_axiom = (SemisimpleAlgebras.FiniteDimensional, "WithBasis")
class ParentMethods:
# This is needed to override the one in finite_dimensional_algebras_with_basis
def radical_basis(self, **keywords):
r"""
Return a basis of the Jacobson radical of this algebra.
- ``keywords`` -- for compatibility; ignored.
OUTPUT: the empty list since this algebra is semisimple.
EXAMPLES::
sage: A = SymmetricGroup(4).algebra(QQ)
sage: A.radical_basis()
()
TESTS::
sage: A.radical_basis.__module__
'sage.categories.finite_dimensional_semisimple_algebras_with_basis'
"""
return ()
@cached_method
def central_orthogonal_idempotents(self):
r"""
Return a maximal list of central orthogonal
idempotents of ``self``.
*Central orthogonal idempotents* of an algebra `A`
are idempotents `(e_1, \dots, e_n)` in the center
of `A` such that `e_i e_j = 0` whenever `i \neq j`.
With the maximality condition, they sum up to `1`
and are uniquely determined (up to order).
INPUT:
- ``self`` -- a semisimple algebra.
EXAMPLES:
For the algebra of the symmetric group `S_3`, we
recover the sum and alternating sum of all
permutations, together with a third idempotent::
sage: A3 = SymmetricGroup(3).algebra(QQ)
sage: idempotents = A3.central_orthogonal_idempotents()
sage: idempotents
(1/6*() + 1/6*(2,3) + 1/6*(1,2) + 1/6*(1,2,3) + 1/6*(1,3,2) + 1/6*(1,3),
2/3*() - 1/3*(1,2,3) - 1/3*(1,3,2),
1/6*() - 1/6*(2,3) - 1/6*(1,2) + 1/6*(1,2,3) + 1/6*(1,3,2) - 1/6*(1,3))
sage: A3.is_identity_decomposition_into_orthogonal_idempotents(idempotents)
True
For the semisimple quotient of a quiver algebra,
we recover the vertices of the quiver::
sage: A = FiniteDimensionalAlgebrasWithBasis(QQ).example(); A
An example of a finite dimensional algebra with basis:
the path algebra of the Kronecker quiver (containing
the arrows a:x->y and b:x->y) over Rational Field
sage: Aquo = A.semisimple_quotient()
sage: Aquo.central_orthogonal_idempotents()
(B['x'], B['y'])
"""
return tuple([x.lift()
for x in self.center().central_orthogonal_idempotents()])
class Commutative(CategoryWithAxiom_over_base_ring):
class ParentMethods:
@cached_method
def _orthogonal_decomposition(self, generators=None):
r"""
Return a maximal list of orthogonal quasi-idempotents of
this finite dimensional semisimple commutative algebra.
INPUT:
- ``generators`` -- a list of generators of
``self`` (default: the basis of ``self``)
OUTPUT:
A list of quasi-idempotent elements of ``self``.
Each quasi-idempotent `e` spans a one
dimensional (non unital) subalgebra of
``self``, and cannot be decomposed as a sum
`e=e_1+e_2` of quasi-idempotents elements.
All together, they form a basis of ``self``.
Up to the order and scalar factors, the result
is unique. In particular it does not depend on
the provided generators which are only used
for improved efficiency.
ALGORITHM:
Thanks to Schur's Lemma, a commutative
semisimple algebra `A` is a direct sum of
dimension 1 subalgebras. The algorithm is
recursive and proceeds as follows:
0. If `A` is of dimension 1, return a non zero
element.
1. Otherwise: find one of the generators such
that the morphism `x \mapsto ax` has at
least two (right) eigenspaces.
2. Decompose both eigenspaces recursively.
EXAMPLES:
We compute an orthogonal decomposition of the
center of the algebra of the symmetric group
`S_4`::
sage: Z4 = SymmetricGroup(4).algebra(QQ).center()
sage: Z4._orthogonal_decomposition()
(B[0] + B[1] + B[2] + B[3] + B[4],
B[0] + 1/3*B[1] - 1/3*B[2] - 1/3*B[4],
B[0] + B[2] - 1/2*B[3],
B[0] - 1/3*B[1] - 1/3*B[2] + 1/3*B[4],
B[0] - B[1] + B[2] + B[3] - B[4])
.. TODO::
Improve speed by using matrix operations
only, or even better delegating to a
multivariate polynomial solver.
"""
if self.dimension() == 1:
return self.basis().list()
category = Algebras(self.base_ring()).Semisimple().WithBasis().FiniteDimensional().Commutative().Subobjects()
if generators is None:
generators = self.basis().list()
# Searching for a good generator ...
for gen in generators:
# Computing the eigenspaces of the
# linear map x -> gen*x
phi = self.module_morphism(
on_basis=lambda i:
gen*self.term(i),
codomain=self)
eigenspaces = phi.matrix().eigenspaces_right()
if len(eigenspaces) >= 2:
# Gotcha! Let's split the algebra according to the eigenspaces
subalgebras = [
self.submodule(map(self.from_vector, eigenspace.basis()),
category=category)
for eigenvalue, eigenspace in eigenspaces]
# Decompose recursively each eigenspace
return tuple([idempotent.lift()
for subalgebra in subalgebras
for idempotent in subalgebra._orthogonal_decomposition()])
# TODO: Should this be an assertion check?
raise Exception("Unable to fully decompose %s!"%self)
@cached_method
def central_orthogonal_idempotents(self):
r"""
Return the central orthogonal idempotents of
this semisimple commutative algebra.
Those idempotents form a maximal decomposition
of the identity into primitive orthogonal
idempotents.
OUTPUT:
A list of orthogonal idempotents of ``self``.
EXAMPLES::
sage: A4 = SymmetricGroup(4).algebra(QQ)
sage: Z4 = A4.center()
sage: idempotents = Z4.central_orthogonal_idempotents()
sage: idempotents
(1/24*B[0] + 1/24*B[1] + 1/24*B[2] + 1/24*B[3] + 1/24*B[4],
3/8*B[0] + 1/8*B[1] - 1/8*B[2] - 1/8*B[4],
1/6*B[0] + 1/6*B[2] - 1/12*B[3],
3/8*B[0] - 1/8*B[1] - 1/8*B[2] + 1/8*B[4],
1/24*B[0] - 1/24*B[1] + 1/24*B[2] + 1/24*B[3] - 1/24*B[4])
Lifting those idempotents from the center, we
recognize among them the sum and alternating
sum of all permutations::
sage: [e.lift() for e in idempotents]
[1/24*() + 1/24*(3,4) + 1/24*(2,3) + 1/24*(2,3,4) + 1/24*(2,4,3)
+ 1/24*(2,4) + 1/24*(1,2) + 1/24*(1,2)(3,4) + 1/24*(1,2,3)
+ 1/24*(1,2,3,4) + 1/24*(1,2,4,3) + 1/24*(1,2,4) + 1/24*(1,3,2)
+ 1/24*(1,3,4,2) + 1/24*(1,3) + 1/24*(1,3,4) + 1/24*(1,3)(2,4)
+ 1/24*(1,3,2,4) + 1/24*(1,4,3,2) + 1/24*(1,4,2) + 1/24*(1,4,3)
+ 1/24*(1,4) + 1/24*(1,4,2,3) + 1/24*(1,4)(2,3),
...,
1/24*() - 1/24*(3,4) - 1/24*(2,3) + 1/24*(2,3,4) + 1/24*(2,4,3)
- 1/24*(2,4) - 1/24*(1,2) + 1/24*(1,2)(3,4) + 1/24*(1,2,3)
- 1/24*(1,2,3,4) - 1/24*(1,2,4,3) + 1/24*(1,2,4) + 1/24*(1,3,2)
- 1/24*(1,3,4,2) - 1/24*(1,3) + 1/24*(1,3,4) + 1/24*(1,3)(2,4)
- 1/24*(1,3,2,4) - 1/24*(1,4,3,2) + 1/24*(1,4,2) + 1/24*(1,4,3)
- 1/24*(1,4) - 1/24*(1,4,2,3) + 1/24*(1,4)(2,3)]
We check that they indeed form a decomposition
of the identity of `Z_4` into orthogonal idempotents::
sage: Z4.is_identity_decomposition_into_orthogonal_idempotents(idempotents)
True
"""
return tuple([(e.leading_coefficient()/(e*e).leading_coefficient())*e
for e in self._orthogonal_decomposition()])
|
import copy
import multiprocessing
import re
import requests
import setproctitle
import time
from shakenfist.config import config
from shakenfist.daemons import daemon
from shakenfist import db
from shakenfist import exceptions
from shakenfist.images import Image
from shakenfist import logutil
from shakenfist import net
from shakenfist import scheduler
from shakenfist import util
from shakenfist import virt
from shakenfist.tasks import (QueueTask,
DeleteInstanceTask,
ErrorInstanceTask,
FetchImageTask,
InstanceTask,
PreflightInstanceTask,
StartInstanceTask,
)
LOG, _ = logutil.setup(__name__)
def handle(jobname, workitem):
log = LOG.withField('workitem', jobname)
log.info('Processing workitem')
setproctitle.setproctitle(
'%s-%s' % (daemon.process_name('queues'), jobname))
instance_uuid = None
task = None
try:
for task in workitem.get('tasks', []):
if not QueueTask.__subclasscheck__(type(task)):
raise exceptions.UnknownTaskException(
'Task was not decoded: %s' % task)
if (InstanceTask.__subclasscheck__(type(task)) or
isinstance(task, FetchImageTask)):
instance_uuid = task.instance_uuid()
if instance_uuid:
log_i = log.withInstance(instance_uuid)
else:
log_i = log
log_i.withField('task_name', task.name()).info('Starting task')
# TODO(andy) Should network events also come through here eventually?
# Then this can be generalised to record events on networks/instances
# TODO(andy) This event should be recorded when it is recorded as
# dequeued in the DB. Currently it's reporting action on the item
# and calling it 'dequeue'.
if instance_uuid:
# TODO(andy) move to QueueTask
db.add_event('instance', instance_uuid, task.pretty_task_name(),
'dequeued', None, 'Work item %s' % jobname)
if isinstance(task, FetchImageTask):
image_fetch(task.url(), instance_uuid)
elif isinstance(task, PreflightInstanceTask):
redirect_to = instance_preflight(instance_uuid, task.network())
if redirect_to:
log_i.info('Redirecting instance start to %s'
% redirect_to)
db.enqueue(redirect_to, workitem)
return
elif isinstance(task, StartInstanceTask):
instance_start(instance_uuid, task.network())
db.enqueue('%s-metrics' % config.NODE_NAME, {})
elif isinstance(task, DeleteInstanceTask):
try:
instance_delete(instance_uuid, 'deleted')
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
elif isinstance(task, ErrorInstanceTask):
try:
instance_delete(instance_uuid, 'error', task.error_msg())
db.enqueue('%s-metrics' % config.NODE_NAME, {})
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
else:
log_i.withField('task', task).error('Unhandled task - dropped')
log_i.info('Task complete')
except exceptions.ImageFetchTaskFailedException as e:
# Usually caused by external issue and not an application error
log.info('Fetch Image Error: %s', e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
finally:
db.resolve(config.NODE_NAME, jobname)
if instance_uuid:
db.add_event('instance', instance_uuid, 'tasks complete',
'dequeued', None, 'Work item %s' % jobname)
log.info('Completed workitem')
def image_fetch(url, instance_uuid):
instance = virt.Instance.from_db(instance_uuid)
try:
# TODO(andy): Wait up to 15 mins for another queue process to download
# the required image. This will be changed to queue on a
# "waiting_image_fetch" queue but this works now.
with db.get_lock('image', config.NODE_NAME, Image.calc_unique_ref(url),
timeout=15*60, op='Image fetch') as lock:
img = Image.from_url(url)
img.get([lock], instance)
db.add_event('image', url, 'fetch', None, None, 'success')
except (exceptions.HTTPError, requests.exceptions.RequestException) as e:
LOG.withField('image', url).info('Failed to fetch image')
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'Image fetch failed: %s' % e)
# Clean common problems to store in events
msg = str(e)
re_conn_err = re.compile(r'.*NewConnectionError\(\'\<.*\>: (.*)\'')
m = re_conn_err.match(msg)
if m:
msg = m.group(1)
db.add_event('image', url, 'fetch', None, None, 'Error: '+msg)
raise exceptions.ImageFetchTaskFailedException(
'Failed to fetch image %s' % url)
def instance_preflight(instance_uuid, network):
instance = virt.Instance.from_db(instance_uuid)
if not instance:
raise exceptions.InstanceNotInDBException(instance_uuid)
instance.update_instance_state('preflight')
# Try to place on this node
s = scheduler.Scheduler()
try:
s.place_instance(instance, network, candidates=[config.NODE_NAME])
return None
except exceptions.LowResourceException as e:
instance.add_event('schedule', 'retry', None,
'insufficient resources: ' + str(e))
# Unsuccessful placement, check if reached placement attempt limit
if instance.placement_attempts > 3:
raise exceptions.AbortInstanceStartException(
'Too many start attempts')
# Try placing on another node
try:
if instance.requested_placement:
# TODO(andy): Ask Mikal why this is not the current node?
candidates = [instance.requested_placement]
else:
candidates = []
for node in s.metrics.keys():
if node != config.NODE_NAME:
candidates.append(node)
candidates = s.place_instance(instance, network,
candidates=candidates)
instance.place_instance(candidates[0])
return candidates[0]
except exceptions.LowResourceException as e:
instance.add_event('schedule', 'failed', None,
'insufficient resources: ' + str(e))
# This raise implies delete above
raise exceptions.AbortInstanceStartException(
'Unable to find suitable node')
def instance_start(instance_uuid, network):
log = LOG.withField('instance', instance_uuid)
with db.get_lock(
'instance', None, instance_uuid, ttl=900, timeout=120,
op='Instance start') as lock:
instance = virt.Instance.from_db(instance_uuid)
# Collect the networks
nets = {}
for netdesc in network:
if netdesc['network_uuid'] not in nets:
n = net.from_db(netdesc['network_uuid'])
if not n:
db.enqueue_instance_error(instance_uuid, 'missing network')
return
nets[netdesc['network_uuid']] = n
# Create the networks
with util.RecordedOperation('ensure networks exist', instance):
for network_uuid in nets:
n = nets[network_uuid]
try:
n.create()
n.ensure_mesh()
n.update_dhcp()
except exceptions.DeadNetwork as e:
log.withField('network', n).warning(
'Instance tried to use dead network')
db.enqueue_instance_error(
instance_uuid, 'tried to use dead network: %s' % e)
return
# Allocate console and VDI ports
instance.allocate_instance_ports()
# Now we can start the instance
libvirt = util.get_libvirt()
try:
with util.RecordedOperation('instance creation',
instance):
instance.create(lock=lock)
except libvirt.libvirtError as e:
code = e.get_error_code()
if code in (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,
libvirt.VIR_ERR_XML_ERROR):
db.enqueue_instance_error(instance_uuid,
'instance failed to start: %s' % e)
return
for iface in db.get_instance_interfaces(instance_uuid):
db.update_network_interface_state(iface['uuid'], 'created')
instance.update_instance_state('created')
def instance_delete(instance_uuid, new_state, error_msg=None):
with db.get_lock('instance', None, instance_uuid, timeout=120,
op='Instance delete'):
db.add_event('instance', instance_uuid, 'queued', 'delete', None, None)
# Create list of networks used by instance
instance_networks = []
for iface in list(db.get_instance_interfaces(instance_uuid)):
if not iface['network_uuid'] in instance_networks:
instance_networks.append(iface['network_uuid'])
# Create list of networks used by all other instances
host_networks = []
for inst in list(db.get_instances(only_node=config.NODE_NAME)):
if not inst['uuid'] == instance_uuid:
for iface in db.get_instance_interfaces(inst['uuid']):
if not iface['network_uuid'] in host_networks:
host_networks.append(iface['network_uuid'])
instance = virt.Instance.from_db(instance_uuid)
if instance:
instance.delete()
instance.update_instance_state(new_state)
if error_msg:
instance.update_error_message(error_msg)
# Check each network used by the deleted instance
for network in instance_networks:
n = net.from_db(network)
if n:
# If network used by another instance, only update
if network in host_networks:
with util.RecordedOperation('deallocate ip address',
instance):
n.update_dhcp()
else:
# Network not used by any other instance therefore delete
with util.RecordedOperation('remove network', n):
n.delete()
class Monitor(daemon.Daemon):
def run(self):
workers = []
LOG.info('Starting Queues')
libvirt = util.get_libvirt()
conn = libvirt.open(None)
present_cpus, _, _ = conn.getCPUMap()
while True:
try:
for w in copy.copy(workers):
if not w.is_alive():
w.join(1)
workers.remove(w)
if len(workers) < present_cpus / 2:
jobname, workitem = db.dequeue(config.NODE_NAME)
else:
workitem = None
if not workitem:
time.sleep(0.2)
continue
p = multiprocessing.Process(
target=handle, args=(jobname, workitem,),
name='%s-worker' % daemon.process_name('queues'))
p.start()
workers.append(p)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
|
# -*- coding=utf-8 -*-
import argparse
import buildplug
import shutil
import utils
import os
import logging
import glob
import sys
import cmdrunner
__hostunityprojpath = r"..\..\HostUnityProj"
config = "debug"
def create(args):
"""
create a project by plugs config
"""
try:
logging.info("copy host project...")
shutil.copytree(__hostunityprojpath, args.pathname)
except shutil.Error as e:
logging.error("create project [%s] error: [%s]", args.pathname, e.args)
return
except FileExistsError as e:
logging.error("target project already exist.")
return
logging.info("copy plugs...")
cfg = utils.Config(buildplug.moduleini)
plugs = buildplug.getusedplugs(cfg)
uniplugroot = cfg.get("DEFAULT", "uniplugroot")
for plug in plugs:
root = cfg.get(plug, "root")
projname = cfg.get(plug, "projname")
srcroot = cfg.get(plug, "srcroot")
srcdirs = cfg.get(plug, "srcdir")
usedll = cfg.getbool(plug, "usedll")
plugfolder = cfg.get(plug, "plugfolder", "Assets/Plugins")
plugdestfolder = os.path.join(args.pathname, plugfolder, uniplugroot)
logging.info("copy plug : " + plug)
ref_dll_list = [cfg.get(plug, x) for x in cfg.options(plug) if x.startswith("refdll_")]
# copy lib
for dll in ref_dll_list:
if "UnityAssemblies" not in dll:
logging.info("copy ref lib dll: " + dll)
utils.copy(os.path.join(buildplug.libroot, dll), plugdestfolder, buildplug.libroot)
if not usedll:# copy sources file.
plugfoler = os.path.join(buildplug.modulefolder, plug, root, srcroot)
# plugfoler = os.path.abspath(plugfolder)
if not os.path.isdir(plugfoler):
logging.warning("plug [%s] src files not exist?", plug)
continue
exts = cfg.get("DEFAULT", "srcexts", None)
if exts:
exts = exts.split(';')
copyrootdir = os.path.join(plugdestfolder, projname)
if srcdirs:
srcdirs = srcdirs.split(';')
for srcdir in srcdirs:
copysrcdir = os.path.join(plugfolder, srcdir)
if not os.path.isdir(copysrcdir):
continue
destfolder = os.path.join(copyrootdir, srcdir)
utils.copy(copysrcdir, destfolder, copysrcdir, exts)
delfolders = cfg.get(plug, "del")
delfolders = delfolders.split(';')
for delfolder in delfolders:
cmdrunner.system("rd /s /q " + os.path.join(destfolder, delfolder))
else:
utils.copy(plugfoler, copyrootdir, plugfoler, exts)
# shutil.copytree(plugfoler, os.path.join(plugdestfolder, projname))
else: # copy dll
dllfolder = buildplug.modulefolder + "Dll"
config = args.config
if not os.path.isdir(dllfolder):
logging.warning("can't find plug [%s] dll for dll folder not exist. ", plug)
continue
globpattern = os.path.join(dllfolder, "*", config, projname+".dll")
ret = glob.glob(globpattern)
if len(ret) == 0:
logging.warning("can't find plug [%s] dll for dll not exist:%s", plug, globpattern)
continue
srcdllfile = ret[0]
shutil.copy(srcdllfile, plugdestfolder)
splited_paths = os.path.splitext(srcdllfile)
srcpdbfile = splited_paths[0] + ".pdb"
if os.path.exists(srcpdbfile):
shutil.copy(srcpdbfile, plugdestfolder)
srcxmlfile = splited_paths[0] + ".xml"
if os.path.exists(srcxmlfile):
shutil.copy(srcxmlfile, plugdestfolder)
logging.info("create project over.")
def compilemodule(args):
if args.moduleini:
buildplug.moduleini = args.moduleini
if args.config:
buildplug.buildconfig = args.config
if args.moduledllfolder:
buildplug.moduledllfolder = args.moduledllfolder
if args.modulefolder:
buildplug.modulefolder = args.modulefolder
clean = utils.str2bool(args.clean)
cfg = utils.Config(buildplug.moduleini)
plugs = buildplug.getusedplugs(cfg)
for plug in plugs:
usedll = utils.str2bool(cfg.get(plug, "usedll"))
if not usedll:
logging.warning("skip compile plug for %s marked not using dll.", plug)
continue
csproj = buildplug.gen_csproj(plug)
if csproj:
buildplug.gen_dll(csproj)
buildplug.gain_dll(plug, clean)
parser = argparse.ArgumentParser()
parser.set_defaults(help="useless")
subparsers = parser.add_subparsers(help="projector sub-command", description='valid subcommands')
parser_create = subparsers.add_parser("create", help="create unity project using HostUnityProj")
parser_create.add_argument("--pathname", required=True, help="project destination folder.")
parser_create.add_argument("--config", help="it works when using dll is true in cfg",
choices=["Debug", "Release"], default="Debug")
parser_create.set_defaults(func=create)
parser_compile = subparsers.add_parser("compile", help="compile a module by cfg, default using plugs.ini")
parser_compile.add_argument("--modulefolder", help="module root folder.")
parser_compile.add_argument("--moduleini", help="module cfg file, see Plugs.ini.")
parser_compile.add_argument("--moduledllfolder", help="dll folder copied after compile.")
parser_compile.add_argument("--config", help="build config type",
choices=["Debug", "Release"], default="Debug")
parser_compile.add_argument("--clean", help="clean build folder. this is not moduledllfolder")
parser_compile.set_defaults(func=compilemodule)
sys_args = sys.argv[1:]
if len(sys_args) == 0:
sys_args.append("--help")
args = parser.parse_args(sys_args)
args.func(args)
|
from app.common.util import formatDatetime
from app.models.model import Entry, Feed, db
from feedparser import parse as feed_parse
from flask_apscheduler import APScheduler
from app.common.util import get_tags
scheduler = APScheduler()
@scheduler.task('interval', id='title', minutes=10, misfire_grace_time=900)
def fresh_feed_info():
feed_list = [feed for feed in Feed.query.all()]
for feed in feed_list:
try:
title = feed_parse(feed.feedURL).feed.title
except:
continue
if title != feed.title:
feed.title = title
db.session.commit()
@scheduler.task('interval', id='tags', minutes=10, misfire_grace_time=900)
def fresh_tags():
# 过滤掉RSShub生成的内容
feed_list = [
feed
for feed in Feed.query.filter(Feed.subtitle.notlike("%RSSHub%")).all()
]
for feed in feed_list:
feed.tag = get_tags(feed)
db.session.commit()
@scheduler.task('interval', id='fresh_entry', minutes=5)
def fresh_entry():
# 这个任务用于定时刷新文章
# 如果订阅源未更新,则下一个
# 订阅源更新,若文章更新则刷新,而后更新订阅源日期
feed_list = [feed for feed in Feed.query.all()]
for feed in feed_list:
try:
feed_updated = formatDatetime(
feed_parse(feed.feedURL).feed.updated_parsed)
except:
continue
if str(feed.updateddate) != feed_updated:
entries = feed_parse(feed.feedURL).entries
for entry in entries:
article = Entry.query.filter_by(link=entry.link).one_or_none()
if article:
article_update = formatDatetime(entry.updated_parsed)
if article_update != str(article.publisheddate):
# 如果文章更新
if hasattr(entry, 'content'):
content = entry.content[0]["value"]
elif hasattr(entry, 'summary'):
content = entry.summary
else:
content = None
article.title = entry.title
article.content = content
article.updateddate = article_update
db.session.commit()
else:
# 如果文章不存在
if hasattr(entry, 'content'):
content = entry.content[0]["value"]
elif hasattr(entry, 'summary'):
content = entry.summary
else:
content = None
feed_entry = Entry(entry.title, entry.link, content,
entry.published_parsed)
feed.entries.append(feed_entry)
db.session.commit()
feed.updateddate = feed_updated
db.session.commit()
|
import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(1,16,5),
nn.ReLU(),
nn.Conv2d(16,32,5),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(32,64,5),
nn.ReLU(),
nn.MaxPool2d(2,2)
)
self.fc_layer = nn.Sequential(
nn.Linear(64*3*3,100),
nn.ReLU(),
nn.Linear(100,10)
)
def forward(self,x):
out = self.layer(x)
out = out.view(-1,64*3*3)
out = self.fc_layer(out)
return out |
import numpy as np
import torch
import torch.nn.functional as F
from xnas.core.timer import Timer
from xnas.search_space.cellbased_DARTS_cnn import DartsCNN
from xnas.search_space.cellbased_NASBench201_cnn import NASBench201CNN
def basic_darts_cnn_test():
# dartscnn test
time_ = Timer()
print("Testing darts CNN")
search_net = DartsCNN().cuda()
_random_architecture_weight = torch.randn(
[search_net.num_edges * 2, len(search_net.basic_op_list)]).cuda()
_input = torch.randn([2, 3, 32, 32]).cuda()
time_.tic()
_out_put = search_net(_input, _random_architecture_weight)
time_.toc()
print(_out_put.shape)
print(time_.average_time)
time_.reset()
_random_one_hot = torch.Tensor(np.eye(len(search_net.basic_op_list))[
np.random.choice(len(search_net.basic_op_list), search_net.num_edges * 2)]).cuda()
_input = torch.randn([2, 3, 32, 32]).cuda()
time_.tic()
_out_put = search_net(_input, _random_one_hot)
time_.toc()
print(_out_put.shape)
print(time_.average_time)
def basic_nas_bench_201_cnn_test():
# nas_bench_201 test
time_ = Timer()
print("Testing nas bench 201 CNN")
search_net = NASBench201CNN()
_random_architecture_weight = torch.randn(
[search_net.num_edges, len(search_net.basic_op_list)])
_input = torch.randn([2, 3, 32, 32])
time_.tic()
_out_put = search_net(_input, _random_architecture_weight)
time_.toc()
print(_out_put.shape)
print(time_.average_time)
time_.reset()
_random_one_hot = torch.Tensor(np.eye(len(search_net.basic_op_list))[
np.random.choice(len(search_net.basic_op_list), search_net.num_edges)])
_input = torch.randn([2, 3, 32, 32])
time_.tic()
_out_put = search_net(_input, _random_one_hot)
time_.toc()
print(_out_put.shape)
print(time_.average_time)
if __name__ == "__main__":
basic_darts_cnn_test()
basic_nas_bench_201_cnn_test()
pass
|
from abc import ABC
class EYELink(ABC):
"""Type hint for eye-link device."""
pass
|
from traitlets.config.configurable import SingletonConfigurable, Configurable
import traitlets
import atexit
import logging
from jetbot import AStar
import sys
class RobotAStar(SingletonConfigurable):
a_star = AStar()
def __init__(self, *args, **kwargs):
super(RobotAStar, self).__init__(*args, **kwargs)
def set_motors(self, left_speed, right_speed):
self.a_star.motors(int(min(left_speed, 1) * 400), int(min(right_speed, 1) * 400))
def forward(self, speed=1.0, duration=None):
self.set_motors(speed, speed)
def backward(self, speed=1.0):
self.set_motors(-speed, -speed)
def left(self, speed=1.0):
self.set_motors(-speed, speed)
def right(self, speed=1.0):
self.set_motors(speed, -speed)
def stop(self):
self.set_motors(0, 0)
class MotorSpeed(Configurable):
l_value = traitlets.Float()
r_value = traitlets.Float()
def __init__(self, astar, *args, **kwargs):
super(MotorSpeed, self).__init__(*args, **kwargs) # initializes traitlets
self._astar = astar
atexit.register(self._release)
@traitlets.observe('l_value')
def _observe_value_left(self, change):
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
logging.info("Updating l_value: " + str(change['new']) + " using existing r_value: " + str(self.r_value))
self._write_value(change['new'], self.r_value)
@traitlets.observe('r_value')
def _observe_value_right(self, change):
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
logging.info("Updating r_value: " + str(change['new']) + " using existing l_value: " + str(self.l_value))
self._write_value(self.l_value, change['new'])
def _write_value(self, left_speed, right_speed):
self._astar.motors(int(min(left_speed, 1) * 400), int(min(right_speed, 1) * 400))
def _release(self):
"""Stops motor by releasing control"""
self._write_value(0, 0)
class RobotAStarWithTrait(SingletonConfigurable):
motor_speed = traitlets.Instance(MotorSpeed)
a_star = AStar()
def __init__(self, *args, **kwargs):
super(RobotAStarWithTrait, self).__init__(*args, **kwargs)
self.motor_speed = MotorSpeed(self.a_star)
def forward(self, speed=1.0, duration=None):
self.set_motors(speed, speed)
def backward(self, speed=1.0):
self.set_motors(-speed, -speed)
def left(self, speed=1.0):
self.set_motors(-speed, speed)
def right(self, speed=1.0):
self.set_motors(speed, -speed)
def stop(self):
self.set_motors(0, 0)
def set_motors(self, left_speed, right_speed):
self.a_star.motors(int(min(left_speed, 1) * 400), int(min(right_speed, 1) * 400)) |
from unittest import TestCase
from mock import mock
from ..tf_generic_log import TFGenericLog
class TestTFGenericLog(TestCase):
def setUp(self):
self.tf_generic_log = TFGenericLog([
'192.168.1.10 - - [13/Sep/2006:07:01:53 -0700] foo bar baz',
'192.168.1.20 - - [28/Jul/2006:10:27:10 -0300] foo bar baz',
'192.178.1.30 - - [28/Jul/2006:10:27:32 -0300] foo bar baz',
'Line that does not match ip regex'
], 'foo', ports=['123:udp'])
def test_ports_must_be_specified(self):
with self.assertRaisesRegexp(Exception, "Ports must be specified for generic parsing"):
TFGenericLog([], 'foo')
def test_running_with_no_input_succeeds_but_tracks_no_log_lines(self):
self.tf_generic_log = TFGenericLog([], 'foo', ports=['123:udp'])
self.tf_generic_log.run()
self.assertEqual(self.tf_generic_log.noisy_logs, [])
self.assertEqual(self.tf_generic_log.quiet_logs, [])
def test_can_filter_noisy_and_quiet_lines(self):
with mock.patch.object(self.tf_generic_log, '_send_features', return_value={'ips': ['192.168.1.20']}):
self.tf_generic_log.run()
self.assertEqual(self.tf_generic_log.noisy_logs, [
{'ip': '192.168.1.20',
'raw': '192.168.1.20 - - [28/Jul/2006:10:27:10 -0300] foo bar baz'}
])
self.assertEqual(self.tf_generic_log.quiet_logs, [
{'ip': '192.168.1.10',
'raw': '192.168.1.10 - - [13/Sep/2006:07:01:53 -0700] foo bar baz'},
{'ip': '192.178.1.30',
'raw': '192.178.1.30 - - [28/Jul/2006:10:27:32 -0300] foo bar baz'},
{'raw': 'Line that does not match ip regex'}
])
|
import re
from os import listdir
from os.path import join, getsize
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from zstandard import ZstdDecompressor
import config
class Collate(object):
alphabet_to_num = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10,
'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19, 'v': 20,
'u': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
num_to_alphabet = dict((v, k) for k, v in alphabet_to_num.items())
def __init__(self, min_noise: int, max_noise: int):
self.min_noise = max(0, min_noise)
self.max_noise = max_noise % len(self.alphabet_to_num)
def __call__(self, batch: list) -> torch.tensor:
batch = [list(entry) for entry in batch]
sizes = [len(entry) for entry in batch]
batch_size, seq_len, token_size = len(batch), max(sizes), len(self.alphabet_to_num)
src = torch.zeros((batch_size, seq_len, token_size), dtype=torch.float)
tgt_inp = torch.zeros((batch_size, seq_len, token_size), dtype=torch.float)
tgt = list()
padding_mask = torch.zeros((batch_size, seq_len), dtype=torch.bool)
for i in range(len(batch)):
i_tgt = torch.full((seq_len,), fill_value=-1, dtype=torch.long)
for j in range(len(batch[i])):
num_repr = self.alphabet_to_num[batch[i][j]]
src[i, j, num_repr] = 1
tgt_inp[i, j, num_repr] = 1
i_tgt[j] = num_repr
noise_size = np.random.randint(low=self.min_noise, high=self.max_noise, size=1)[0]
noise_indexes = np.random.randint(low=0, high=len(self.alphabet_to_num), size=noise_size)
src[i, j, noise_indexes] = 1
tgt.append(i_tgt)
padding_mask[i, sizes[i]:] = True
empty_token = torch.zeros(batch_size, 1, token_size)
tgt_inp = torch.cat([empty_token, tgt_inp[:, :-1, :]], dim=1)
tgt = torch.stack(tgt)
subsequent_mask = self.get_subsequent_mask(seq_len)
return src, tgt_inp, tgt, padding_mask, padding_mask, subsequent_mask
@staticmethod
def get_subsequent_mask(size: int) -> torch.tensor:
return torch.triu(torch.ones((size, size), dtype=torch.float), diagonal=1) == 1
class PileDataset(Dataset):
def __init__(self, filenames: list, min_threshold: int = 150, max_threshold: int = 200,
drop_threshold: float = 0.62, dataset_size: int = 16_384):
self.filenames = filenames
self.n_files = len(self.filenames)
self.file_sizes = [getsize(file) for file in self.filenames]
self.min_threshold = min_threshold
self.max_threshold = max_threshold
self.drop_threshold = drop_threshold
self.dataset_size = dataset_size
def __getitem__(self, idx=None):
file_id = np.random.randint(low=0, high=self.n_files, size=1)[0]
shift = np.random.randint(low=0, high=self.file_sizes[file_id] - self.max_threshold, size=1)[0]
line_size = np.random.randint(low=self.min_threshold, high=self.max_threshold, size=1)[0]
try:
with open(self.filenames[file_id], mode="rb") as f:
with ZstdDecompressor().stream_reader(f) as reader:
reader.seek(shift)
wiki_page = reader.read(line_size)
sentence = wiki_page.decode("unicode_escape", errors='ignore')
sentence = re.sub(r'\s', '', sentence)
cleaned_sentence = re.sub(r'[^A-Za-z]', '', sentence)
if len(sentence) == 0 or len(cleaned_sentence) / len(sentence) < self.drop_threshold:
return self.__getitem__()
return cleaned_sentence.lower()
except EOFError:
return self.__getitem__()
def __len__(self):
return self.dataset_size
class WikiDataset(Dataset):
def __init__(self, filenames: list, min_threshold: int = 150, max_threshold: int = 200, dataset_size: int = 16_384):
self.filenames = filenames
self.n_files = len(self.filenames)
self.file_sizes = [getsize(file) for file in self.filenames]
self.min_threshold = min_threshold
self.max_threshold = max_threshold
self.dataset_size = dataset_size
def __getitem__(self, idx=None):
np.random.seed(None)
file_id = np.random.randint(low=0, high=self.n_files, size=1)[0]
shift = np.random.randint(low=0, high=self.file_sizes[file_id] - self.max_threshold, size=1)[0]
line_size = np.random.randint(low=self.min_threshold, high=self.max_threshold, size=1)[0]
with open(self.filenames[file_id], mode="r") as f:
f.seek(shift)
return f.read(line_size)
def __len__(self):
return self.dataset_size
def clean_wiki_text(filename: str, drop_threshold: float = 0.62) -> None:
result_size = 0
with open(filename, mode='r') as wiki_file, open(f'{filename}.clean', mode='w') as writer:
while True:
try:
line = wiki_file.readline()
except UnicodeDecodeError:
continue
if not line:
break
if not line.strip() or line.startswith(' = '):
continue
cleaned_line = re.sub(r'[^A-Za-z]', '', line)
if len(cleaned_line) / len(line) < drop_threshold:
continue
writer.write(cleaned_line.lower())
result_size += len(cleaned_line)
print(f'Result size: {result_size}')
if __name__ == '__main__':
train_files = [join(config.train_path, file) for file in listdir(config.train_path)]
dataset = WikiDataset(train_files, min_threshold=10, max_threshold=11)
loader = DataLoader(dataset=dataset, batch_size=2, shuffle=False, num_workers=4, pin_memory=True,
collate_fn=Collate(min_noise=0, max_noise=8))
for _src, _tgt_inp, _tgt, _src_pad_mask, _tgt_inp_pad_mask, _tgt_inp_attn_mask in loader:
# print(f'| src: {_src.size()} '
# f'| tgt_inp: {_tgt_inp.size()} '
# f'| tgt: {_tgt.size()} '
# f'| src_pad_mask: {_src_pad_mask.size()} '
# f'| tgt_inp_pad_mask: {_tgt_inp_pad_mask.size()} '
# f'| tgt_inp_attn_mask: {_tgt_inp_attn_mask.size()}')
print(f'| src: {_src} '
f'| tgt_inp: {_tgt_inp} '
f'| tgt: {_tgt} '
f'| src_pad_mask: {_src_pad_mask} '
f'| tgt_inp_pad_mask: {_tgt_inp_pad_mask} '
f'| tgt_inp_attn_mask: {_tgt_inp_attn_mask}')
|
from coza.objects import Context, Result
from coza.api import BacktestApi
from coza.errors import InputValueValidException
from coza.utils import now
from coza.logger import logger
from datetime import datetime, timedelta
from copy import deepcopy
import pandas as pd
import sys
import os
import time
class BacktestContext(Context):
def __init__(self, initialize, run_strategy, make_orders, user_uuid=None, model_name=None, running_mode='LOCAL',
use_data='LIVE', data_path='data', save_result=False, return_result=True):
if use_data.upper() not in ('LOCAL', 'LIVE'):
raise InputValueValidException(msg='at init', use_data=use_data)
else:
self.use_data = use_data.upper()
if not isinstance(data_path, str):
raise InputValueValidException(msg='at init', data_path=data_path)
else:
self.data_path = data_path
if not isinstance(save_result, bool):
raise InputValueValidException(msg='at init', save_result=save_result)
else:
self.save_result = save_result
if not isinstance(return_result, bool):
raise InputValueValidException(msg='at init', return_result=return_result)
else:
self.return_result = return_result
self.context = dict()
self.result = dict()
super().__init__(
initialize=initialize, run_strategy=run_strategy, make_orders=make_orders, running_mode=running_mode)
if self.running_mode == 'LIVE':
try:
if not isinstance(user_uuid, str):
raise InputValueValidException(msg='at init', user_uuid=user_uuid)
else:
BacktestApi.initialize(user_uuid)
if not isinstance(model_name, str):
raise InputValueValidException(msg='at init', model_name=model_name)
else:
self.model_info = BacktestApi.get_model(model_name)
except Exception as e:
logger.info('Initialize failed')
sys.exit()
self.initialize(self)
if use_data == 'LOCAL':
try:
os.listdir(data_path)
except FileNotFoundError:
logger.info("data path를 찾지 못 하였습니다.")
sys.exit()
def run(self, exchange, start_date=None, end_date=None, init_budget=10000000.0, backtest_type=None, slippage_rate=None):
logger.debug('Start backtest')
if backtest_type is not None:
backtest_type = backtest_type.lower()
if backtest_type.lower() in ('day', 'week', 'month'):
self.backtest_type = backtest_type
end_date = now(exchange=exchange, rounding_seconds=True) - timedelta(minutes=1)
if backtest_type == 'day':
start_date = end_date - timedelta(days=1)
elif backtest_type == 'week':
start_date = end_date - timedelta(days=7)
elif backtest_type == 'month':
start_date = end_date - timedelta(days=30)
else:
raise InputValueValidException(msg='at run', backtest_type=backtest_type)
else:
if isinstance(start_date, (datetime, str)):
start_date = datetime.strptime(datetime.strftime(start_date, "%Y-%m-%dT%H:%M"), "%Y-%m-%dT%H:%M")
else:
raise InputValueValidException(msg='at run', start_date=start_date)
if isinstance(end_date, (datetime, str)):
end_date = datetime.strptime(datetime.strftime(end_date, "%Y-%m-%dT%H:%M"), "%Y-%m-%dT%H:%M")
else:
raise InputValueValidException(msg='at run', end_date=end_date)
if not isinstance(init_budget, (int, float)):
raise InputValueValidException(msg='at run', init_budget=init_budget)
if exchange in ('coinone', 'upbit'):
self.exchange = exchange
trade_info = self.context['trade_info'].get(exchange)
logger.info(f'start_date : {start_date}')
logger.info(f'end_date : {end_date}')
logger.info(f'trade_info of exchange {exchange} : {trade_info}')
if trade_info is not None:
self.exchanges[exchange] = self.make_exchange(
exchange=exchange, start_date=start_date, end_date=end_date, init_budget=init_budget,
currency_list=trade_info['currency'], interval_list=trade_info['interval'],
fiat=trade_info['fiat'], slippage_rate=slippage_rate, use_data=self.use_data,
data_path=self.data_path)
return self.backtest(self.exchanges[exchange])
else:
return dict(result=False, msg=f'입력한 거래소 {exchange}가 Context Trade Info에 없습니다.')
else:
logger.info(f'잘 못된 거래소명을 입력하셨습니다. {exchange}')
sys.exit()
def make_exchange(self, exchange, start_date, end_date, init_budget, currency_list, interval_list, fiat,
slippage_rate, use_data, data_path):
if exchange == 'upbit':
from coza.exchange import UpbitBacktest
exchange = UpbitBacktest(
start_date=start_date, end_date=end_date, init_budget=init_budget, currency_list=currency_list,
interval_list=interval_list, fiat=fiat, slippage_rate=slippage_rate, use_data=use_data, data_path=data_path)
elif exchange == 'coinone':
from coza.exchange import CoinoneBacktest
exchange = CoinoneBacktest(
start_date=start_date, end_date=end_date, init_budget=init_budget, currency_list=currency_list,
interval_list=interval_list, fiat=fiat, slippage_rate=slippage_rate, use_data=use_data, data_path=data_path)
return exchange
def backtest(self, exchange):
logger.debug('Running Backtest...')
created_time = now(exchange=exchange.name, rounding_seconds=True)
base_time = time.time()
exchange.init_dataframe()
logger.debug('Running run_strategy...')
self.run_strategy(
self, is_update=exchange.is_update, trade_info=self.context['trade_info'],
update_len=exchange.updated_len, data=exchange.data)
exchange.init_test_dataframe()
exchange.estimated_list.append({'date': exchange.start_date,
'estimated': deepcopy(exchange.balance['fiat'])})
logger.debug('Running make_orders...')
for _datetime in pd.date_range(start=exchange.start_date, end=exchange.end_date, freq='1min'):
is_updated = exchange.update_dataframe(_datetime)
if is_updated:
self.make_orders(
self, is_update=exchange.is_update, trade_info=self.context['trade_info'],
update_len=exchange.updated_len, data=exchange.data)
exchange.update_balance(_datetime)
else:
continue
estimated_dict = exchange.calc_estimated()
exchange.max_profit = estimated_dict.get('earning_rate') if estimated_dict.get('earning_rate') > exchange.max_profit else exchange.max_profit
exchange.max_loss = estimated_dict.get('earning_rate') if estimated_dict.get('earning_rate') < exchange.max_loss else exchange.max_loss
exchange.estimated_list.append({'date':exchange._get_df_datetime(), 'estimated': round(estimated_dict.get('estimated'), 4)})
elapsed_time = time.time() - base_time
if self.running_mode == 'LIVE':
if BacktestApi.user_uuid is not None:
result_data = dict(
model_id=self.model_info['id'], exchange=exchange.name, created_time=created_time.strftime("%Y-%m-%d %H:%M"),
backtest_type=self.backtest_type, start_date=exchange.start_date.strftime("%Y-%m-%d %H:%M"),
end_date=exchange.end_date.strftime("%Y-%m-%d %H:%M"), init_budget=exchange.init_budget,
final_balance=estimated_dict.get('estimated'), total_fee=exchange.total_fee, total_slippage=exchange.total_slippage,
fee_rate=exchange.fee_rate, slippage_rate=exchange.slippage_rate, earning_rate=estimated_dict.get('earning_rate'),
max_profit=exchange.max_profit, max_loss=exchange.max_loss, estimated_list=[{'date': i['date'].strftime(
"%Y-%m-%d %H:%M"), 'estimated': i['estimated']} for i in exchange.estimated_list]
)
BacktestApi.result(result_data)
else:
return dict(result=False, msg='user_uuid가 입력되지 않았습니다.')
else:
if self.return_result:
logger.debug(f'Backtest finished {elapsed_time}')
self.result[exchange.name] = Result(
exchange=exchange.name, currencies=exchange.currencies, intervals=exchange.intervals,
created_time=created_time, elapsed_time=elapsed_time, start_date=exchange.start_date, end_date=exchange.end_date,
init_budget=exchange.init_budget, final_balance=estimated_dict.get('estimated'), estimated_list = exchange.estimated_list,
fiat=exchange.fiat, total_fee = exchange.total_fee, total_slippage=exchange.total_slippage,
fee_rate=exchange.fee_rate, slippage_rate=exchange.slippage_rate, earning_rate= estimated_dict.get('earning_rate'),
max_profit=exchange.max_profit, max_loss=exchange.max_loss, trade_history=exchange.trade_history,
data=exchange.data
)
del(exchange.test_df)
del(exchange.data)
return self.result[exchange.name]
else:
return dict(result=True, msg='Backtest를 완료했습니다.')
def set_order(self, exchange, o, t=None):
self.exchanges[exchange].set_order(o=o, t=t)
def set_cancel(self, exchange, currency=None, order_id=None, qty=None):
self.exchanges[exchange].send_cancel(currency=currency, order_id=order_id, qty=qty)
def get_balance(self, exchange):
return self.exchanges[exchange].get_balance()
def get_order_list(self, exchange):
return self.exchanges[exchange].get_order_list()
def get_orders(self, exchange):
return self.exchanges[exchange].get_orders()
def get_time(self, exchange):
return self.exchanges[exchange].get_time()
def get_estimated(self, exchange):
return self.exchanges[exchange].calc_estimated()
def clear_balance(self, exchange):
return self.exchanges[exchange].clear_balance()
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import*
d={}
for i in range(int(input())):
word=input()
if word in d:
d[word]+=1
else:
d[word]=1
print(len(d))
print(*d.values())
|
import requests
import logging
import weibo
from config import tuling_apiUrl, tuling_key, weibo_username, weibo_password, robot_tuling, robot_xiaoice
logger = logging.getLogger('MyItChatDemo.robot')
weibo_instance = None
def get_reply_msg(info, userid):
raise NotImplementedError()
def get_reply_from_tuling(info, userid):
apiUrl = tuling_apiUrl
data = {
'key': tuling_key,
'info': info,
'userid': userid,
}
return requests.post(apiUrl, data=data).json()['text']
def get_reply_from_xiaoice(info, userid):
return weibo_instance.get_msg_from_xiaoice(info)
# set reply robot tuling
if robot_tuling:
get_reply_msg = get_reply_from_tuling
# set reply robot xiaoice
if robot_xiaoice:
weibo_instance = weibo.Weibo(weibo_username, weibo_password)
weibo_instance.login()
weibo_instance.im_init()
get_reply_msg = get_reply_from_xiaoice
|
# Copyright 2021 chris
# See LICENSE file for licensing details.
#
# Learn more about testing at: https://juju.is/docs/sdk/testing
import unittest
import unittest.mock as mock
from charm import KingfisherCharm
from ops.model import (
BlockedStatus,
MaintenanceStatus,
)
from ops.testing import Harness
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(KingfisherCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
@mock.patch('charm.KingfisherCharm.install_pkgs')
@mock.patch('charm.ch_templating')
@mock.patch('charm.subprocess')
def test_install(self, mock_subprocess, mock_templating, mock_install_pkgs):
self.harness.charm.on.install.emit()
self.assertEqual(
self.harness.model.unit.status,
MaintenanceStatus(''))
mock_subprocess.check_call.assert_has_calls([
mock.call(['snap', 'install', '--classic', 'kubectl']),
mock.call(['snap', 'install', 'yq']),
mock.call(['snap', 'install', 'jq']),
])
mock_install_pkgs.assert_called_once()
@mock.patch('charm.os')
@mock.patch('charm.subprocess')
@mock.patch('charm.ch_templating')
@mock.patch('charm.KingfisherCharm._get_credentials')
def test_config_changed(self, mock_get_credentials, mock_templating, mock_subprocess,
mock_os):
mock_os.getenv.side_effect = [
'http://squid.internal:3128',
'http://squid.internal:3128',
'10.0.0.0/8']
mock_get_credentials.return_value = None
self.harness.update_config({})
self.assertEqual(
self.harness.model.unit.status,
BlockedStatus('missing credentials access; grant with: juju trust')
)
mock_get_credentials.assert_called()
mock_templating.render.assert_called_once_with(
'http-proxy.conf',
'/etc/systemd/system/docker.service.d/http-proxy.conf',
context={
'http_proxy': 'http://squid.internal:3128',
'https_proxy': 'http://squid.internal:3128',
'no_proxy': '10.0.0.0/8'})
mock_subprocess.check_call.assert_has_calls([
mock.call(['systemctl', 'daemon-reload']),
])
@mock.patch('charm.os')
@mock.patch('charm.KingfisherCharm._get_credentials')
@mock.patch('charm.ch_templating')
@mock.patch('charm.subprocess')
def test_config_changed_with_trust(self, mock_subprocess, mock_templating,
mock_get_credentials, mock_os):
mock_os.getenv.return_value = None
mock_get_credentials.return_value = {'name': 'value'}
self.harness.update_config({})
self.assertEqual(
self.harness.model.unit.status,
MaintenanceStatus(message="")
)
expected_context = {
'name': 'value',
'config': {
'kubernetes_version': '1.21.1',
'kubernetes_controllers': 3, 'kubernetes_workers': 3,
'control_plane_machine_flavor': 'm1.medium',
'worker_machine_flavor': 'm1.medium',
'dns_nameservers': '10.245.160.2', 'availability_zones': 'nova',
'image_name': 'cluster-api', 'ssh_key_name': 'cluster-api',
'source': None, 'key': None, 'ssl_ca': None, 'timeout': 60}}
mock_templating.render.assert_has_calls([
mock.call(
'http-proxy.conf',
'/etc/systemd/system/docker.service.d/http-proxy.conf',
context={}),
mock.call(
'clouds.yaml',
'/root/.config/openstack/clouds.yaml',
context=expected_context),
mock.call(
'os_environment.sh',
'/etc/profile.d/os_environment.sh',
context=expected_context),
mock.call(
'env.sh',
'/etc/profile.d/env.sh',
context=expected_context)
])
@mock.patch('charm.KingfisherCharm._check_deploy_done')
@mock.patch('charm.subprocess.check_output')
def test_deploy_action(self, mock_check_output, mock_check_deploy_done):
# the harness doesn't (yet!) help much with actions themselves
action_event = mock.Mock(params={"fail": ""})
mock_check_deploy_done.return_value = True
mock_output = mock.MagicMock()
mock_check_output.return_value = mock_output
self.harness.charm._on_deploy_action(action_event)
mock_check_deploy_done.assert_called_once()
mock_check_output.assert_has_calls([
mock.call([
'/bin/bash', '-c',
'source /etc/profile; None config cluster test-cluster '
'--kubernetes-version 1.21.1 --control-plane-machine-count 3 '
'--worker-machine-count 3'], cwd='/root'),
mock.call(['kubectl', 'apply', '-f', '-'], input=mock_output, cwd='/root')])
# self.assertTrue(action_event.set_results.called)
@mock.patch('charm.subprocess.check_call')
def test_destroy_action(self, mock_check_call):
# the harness doesn't (yet!) help much with actions themselves
action_event = mock.Mock(params={"fail": ""})
self.harness.charm._on_destroy_action(action_event)
mock_check_call.assert_called_once_with(
['kubectl', '--kubeconfig=/root/.kube/config',
'delete', 'cluster', 'test-cluster'],
cwd="/root"
)
# self.assertTrue(action_event.set_results.called)
# def test_action_fail(self):
# action_event = Mock(params={"fail": "fail this"})
# self.harness.charm._on_fortune_action(action_event)
# self.assertEqual(action_event.fail.call_args, [("fail this",)])
# def test_httpbin_pebble_ready(self):
# # Check the initial Pebble plan is empty
# initial_plan = self.harness.get_container_pebble_plan("httpbin")
# self.assertEqual(initial_plan.to_yaml(), "{}\n")
# # Expected plan after Pebble ready with default config
# expected_plan = {
# "services": {
# "httpbin": {
# "override": "replace",
# "summary": "httpbin",
# "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
# "startup": "enabled",
# "environment": {"thing": "🎁"},
# }
# },
# }
# # Get the httpbin container from the model
# container = self.harness.model.unit.get_container("httpbin")
# # Emit the PebbleReadyEvent carrying the httpbin container
# self.harness.charm.on.httpbin_pebble_ready.emit(container)
# # Get the plan now we've run PebbleReady
# updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
# # Check we've got the plan we expected
# self.assertEqual(expected_plan, updated_plan)
# # Check the service was started
# service = self.harness.model.unit.get_container("httpbin").get_service("httpbin")
# self.assertTrue(service.is_running())
# # Ensure we set an ActiveStatus with no message
# self.assertEqual(self.harness.model.unit.status, ActiveStatus())
|
#! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
# new version of Bruno Cuconato's script
# changes made by Leonel F. de Alencar
import codecs
import re
from ConvertDELAF import EndsInNasalDiphthong
def parse_entry(e):
[f,ts] = e.split("\t")
parts = ts.split("+")
return (f,parts[0],parts[1:])
def enrich_clitic(ve):
def clitic_tags(f):
cmap = {"a":[".ele.ACC.3.F.SG"],"as":[".ele.ACC.3.F.PL"],"la":
[".ele.ACC.3.F.SG"],"las":[".ele.ACC.3.F.PL"],"lhe":
[".ele.DAT.3.SG"],"lhes":[".ele.DAT.3.PL"],"lo":
[".ele.ACC.3.M.SG"],"los":[".ele.ACC.3.M.PL"],"me":
[".eu.AD.1.SG"],"na":["ele.ACC.3.F.SG"],"nas":
[".ele.ACC.3.F.PL"],"no":[".ele.ACC.3.M.SG"],"nos":
[".nós.AD.1.PL".decode("utf-8"), ".ele.ACC.3.M.PL"], # the "nós" reading is more basic, so it is now the first element in the list
"o":["ele.ACC.3.M.SG"],"os":[".ele.ACC.3.M.PL"],"se":
[".ele.REFL"],"te":["tu.AD.2.SG"],"vos":[".vós.AD.2.PL".decode("utf-8")]}
# the original version doesn't cope with mesoclisis, e.g. "ababadar-nos-emos ababadar+V+PRO+FUT+1+PL"
# i = f.rfind("-")
parts=re.split(r"\-",f)
if len(parts) == 1:
return [""]
else:
verb_form=parts[0]
c = parts[1]
clitic_feats=cmap[c]
if len(clitic_feats) == 2 and EndsInNasalDiphthong(verb_form): # handling ambiguity of "nos"
return clitic_feats
else:
return [clitic_feats[0]]
f, l, fs = ve
#cl, *fs = fs
cl= fs.pop(0)
if cl == "V": # this condition is two weak because there are verb forms with hyphen which don't contain any clitic (e.g. "pré-selecionar"; one should instead check for the existence of a PRO tag in the input entry
return [(f, l, [(cl + cts)] + fs) for cts in clitic_tags(f)]
else:
return [ve]
def print_entry(pe):
f, l, fs = pe
return "%s\t%s" % (f,"+".join([l]+fs))
def read_dict(fp):
with codecs.open(fp, mode='r', encoding="utf8") as fh:
for l in fh:
yield l
if __name__ == "__main__":
from sys import argv
ls = read_dict(argv[1])
for l in ls:
e = parse_entry(l)
ee = enrich_clitic(e)
for i in ee:
print print_entry(i).encode("utf-8")
|
import logging
import matplotlib.axes
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
import alf.io
from brainbox.core import Bunch
import ibllib.io.spikeglx as spikeglx
from ibllib.misc import log2session_static
from ibllib.io.extractors.ephys_fpga import _get_sync_fronts, get_ibl_sync_map
_logger = logging.getLogger('ibllib')
def apply_sync(sync_file, times, forward=True):
"""
:param sync_file: probe sync file (usually of the form _iblrig_ephysData.raw.imec1.sync.npy)
:param times: times in seconds to interpolate
:param forward: if True goes from probe time to session time, from session time to probe time
otherwise
:return: interpolated times
"""
sync_points = np.load(sync_file)
if forward:
fcn = interp1d(sync_points[:, 0],
sync_points[:, 1], fill_value='extrapolate')
else:
fcn = interp1d(sync_points[:, 1],
sync_points[:, 0], fill_value='extrapolate')
return fcn(times)
@log2session_static('ephys')
def sync(ses_path, **kwargs):
"""
Wrapper for sync_probes.version3A and sync_probes.version3B that automatically determines
the version
:param ses_path:
:return: bool True on a a successful sync
"""
version = spikeglx.get_neuropixel_version_from_folder(ses_path)
if version == '3A':
version3A(ses_path, **kwargs)
elif version == '3B':
version3B(ses_path, **kwargs)
def version3A(ses_path, display=True, type='smooth', tol=2.1):
"""
From a session path with _spikeglx_sync arrays extracted, locate ephys files for 3A and
outputs one sync.timestamps.probeN.npy file per acquired probe. By convention the reference
probe is the one with the most synchronisation pulses.
Assumes the _spikeglx_sync datasets are already extracted from binary data
:param ses_path:
:param type: linear, exact or smooth
:return: bool True on a a successful sync
"""
ephys_files = spikeglx.glob_ephys_files(ses_path)
nprobes = len(ephys_files)
if nprobes == 1:
timestamps = np.array([[0., 0.], [1., 1.]])
sr = _get_sr(ephys_files[0])
_save_timestamps_npy(ephys_files[0], timestamps, sr)
return True
def get_sync_fronts(auxiliary_name):
d = Bunch({'times': [], 'nsync': np.zeros(nprobes, )})
# auxiliary_name: frame2ttl or right_camera
for ind, ephys_file in enumerate(ephys_files):
sync = alf.io.load_object(ephys_file.ap.parent, '_spikeglx_sync', short_keys=True)
sync_map = get_ibl_sync_map(ephys_file, '3A')
# exits if sync label not found for current probe
if auxiliary_name not in sync_map:
return
isync = np.in1d(sync['channels'], np.array([sync_map[auxiliary_name]]))
# only returns syncs if we get fronts for all probes
if np.all(~isync):
return
d.nsync[ind] = len(sync.channels)
d['times'].append(sync['times'][isync])
return d
d = get_sync_fronts('frame2ttl')
if not d:
_logger.warning('Ephys sync: frame2ttl not detected on both probes, using camera sync')
d = get_sync_fronts('right_camera')
if not min([t[0] for t in d['times']]) > 0.2:
raise(ValueError('Cameras started before ephys, no sync possible'))
# chop off to the lowest number of sync points
nsyncs = [t.size for t in d['times']]
if len(set(nsyncs)) > 1:
_logger.warning("Probes don't have the same number of synchronizations pulses")
d['times'] = np.r_[[t[:min(nsyncs)] for t in d['times']]].transpose()
# the reference probe is the one with the most sync pulses detected
iref = np.argmax(d.nsync)
# islave = np.setdiff1d(np.arange(nprobes), iref)
# get the sampling rate from the reference probe using metadata file
sr = _get_sr(ephys_files[iref])
qc_all = True
# output timestamps files as per ALF convention
for ind, ephys_file in enumerate(ephys_files):
if ind == iref:
timestamps = np.array([[0., 0.], [1., 1.]])
else:
timestamps, qc = sync_probe_front_times(d.times[:, ind], d.times[:, iref], sr,
display=display, type=type, tol=tol)
qc_all &= qc
_save_timestamps_npy(ephys_file, timestamps, sr)
return qc_all
def version3B(ses_path, display=True, type=None, tol=2.5):
"""
From a session path with _spikeglx_sync arrays extraccted, locate ephys files for 3A and
outputs one sync.timestamps.probeN.npy file per acquired probe. By convention the reference
probe is the one with the most synchronisation pulses.
Assumes the _spikeglx_sync datasets are already extracted from binary data
:param ses_path:
:param type: linear, exact or smooth
:return: None
"""
DEFAULT_TYPE = 'smooth'
ephys_files = spikeglx.glob_ephys_files(ses_path, bin_exists=False)
for ef in ephys_files:
ef['sync'] = alf.io.load_object(ef.path, '_spikeglx_sync', short_keys=True)
ef['sync_map'] = get_ibl_sync_map(ef, '3B')
nidq_file = [ef for ef in ephys_files if ef.get('nidq')]
ephys_files = [ef for ef in ephys_files if not ef.get('nidq')]
# should have at least 2 probes and only one nidq
assert(len(nidq_file) == 1)
nidq_file = nidq_file[0]
sync_nidq = _get_sync_fronts(nidq_file.sync, nidq_file.sync_map['imec_sync'])
qc_all = True
for ef in ephys_files:
sync_probe = _get_sync_fronts(ef.sync, ef.sync_map['imec_sync'])
sr = _get_sr(ef)
assert(sync_nidq.times.size == sync_probe.times.size)
# if the qc of the diff finds anomalies, do not attempt to smooth the interp function
qcdiff = _check_diff_3b(sync_probe)
if not qcdiff:
qc_all = False
type_probe = type or 'exact'
else:
type_probe = type or DEFAULT_TYPE
timestamps, qc = sync_probe_front_times(sync_probe.times, sync_nidq.times, sr,
display=display, type=type_probe, tol=tol)
qc_all &= qc
_save_timestamps_npy(ef, timestamps, sr)
return qc_all
def sync_probe_front_times(t, tref, sr, display=False, type='smooth', tol=2.0):
"""
From 2 timestamps vectors of equivalent length, output timestamps array to be used for
linear interpolation
:param t: time-serie to be synchronized
:param tref: time-serie of the reference
:param sr: sampling rate of the slave probe
:return: a 2 columns by n-sync points array where each row corresponds
to a sync point: sample_index (0 based), tref
:return: quality Bool. False if tolerance is exceeded
"""
qc = True
"""
the main drift is computed through linear regression. A further step compute a smoothed
version of the residual to add to the linear drift. The precision is enforced
by ensuring that each point lies less than one sampling rate away from the predicted.
"""
pol = np.polyfit(t, tref, 1) # higher order terms first: slope / int for linear
residual = tref - np.polyval(pol, t)
if type == 'smooth':
"""
the interp function from camera fronts is not smooth due to the locking of detections
to the sampling rate of digital channels. The residual is fit using frequency domain
smoothing
"""
import ibllib.dsp as dsp
CAMERA_UPSAMPLING_RATE_HZ = 300
PAD_LENGTH_SECS = 60
STAT_LENGTH_SECS = 30 # median length to compute padding value
SYNC_SAMPLING_RATE_SECS = 20
t_upsamp = np.arange(tref[0], tref[-1], 1 / CAMERA_UPSAMPLING_RATE_HZ)
res_upsamp = np.interp(t_upsamp, tref, residual)
# padding needs extra care as the function oscillates and numpy fft performance is
# abysmal for non prime sample sizes
nech = res_upsamp.size + (CAMERA_UPSAMPLING_RATE_HZ * PAD_LENGTH_SECS)
lpad = 2 ** np.ceil(np.log2(nech)) - res_upsamp.size
lpad = [int(np.floor(lpad / 2) + lpad % 2), int(np.floor(lpad / 2))]
res_filt = np.pad(res_upsamp, lpad, mode='median',
stat_length=CAMERA_UPSAMPLING_RATE_HZ * STAT_LENGTH_SECS)
fbounds = [0.001, 0.002]
res_filt = dsp.lp(res_filt, 1 / CAMERA_UPSAMPLING_RATE_HZ, fbounds)[lpad[0]:-lpad[1]]
tout = np.arange(0, np.max(tref) + SYNC_SAMPLING_RATE_SECS, 20)
sync_points = np.c_[tout, np.polyval(pol, tout) + np.interp(tout, t_upsamp, res_filt)]
if display:
if isinstance(display, matplotlib.axes.Axes):
ax = display
else:
ax = plt.axes()
ax.plot(tref, residual * sr, label='residual')
ax.plot(t_upsamp, res_filt * sr, label='smoothed residual')
ax.plot(tout, np.interp(tout, t_upsamp, res_filt) * sr, '*', label='interp timestamps')
ax.legend()
ax.set_xlabel('time (sec)')
ax.set_ylabel('Residual drift (samples @ 30kHz)')
elif type == 'exact':
sync_points = np.c_[t, tref]
if display:
plt.plot(tref, residual * sr, label='residual')
plt.ylabel('Residual drift (samples @ 30kHz)')
plt.xlabel('time (sec)')
pass
elif type == 'linear':
sync_points = np.c_[np.array([0., 1.]), np.polyval(pol, np.array([0., 1.]))]
if display:
plt.plot(tref, residual * sr)
plt.ylabel('Residual drift (samples @ 30kHz)')
plt.xlabel('time (sec)')
# test that the interp is within tol sample
fcn = interp1d(sync_points[:, 0], sync_points[:, 1], fill_value='extrapolate')
if np.any(np.abs((tref - fcn(t)) * sr) > (tol)):
_logger.error(f'Synchronization check exceeds tolerance of {tol} samples. Check !!')
qc = False
# plt.plot((tref - fcn(t)) * sr)
# plt.plot( (sync_points[:, 0] - fcn(sync_points[:, 1])) * sr)
return sync_points, qc
def _get_sr(ephys_file):
meta = spikeglx.read_meta_data(ephys_file.ap.with_suffix('.meta'))
return spikeglx._get_fs_from_meta(meta)
def _save_timestamps_npy(ephys_file, tself_tref, sr):
# this is the file with self_time_secs, ref_time_secs output
file_sync = ephys_file.ap.parent.joinpath(ephys_file.ap.name.replace('.ap.', '.sync.')
).with_suffix('.npy')
np.save(file_sync, tself_tref)
# this is the timestamps file
file_ts = ephys_file.ap.parent.joinpath(ephys_file.ap.name.replace('.ap.', '.timestamps.')
).with_suffix('.npy')
timestamps = np.copy(tself_tref)
timestamps[:, 0] *= np.float64(sr)
np.save(file_ts, timestamps)
def _check_diff_3b(sync):
"""
Checks that the diff between consecutive sync pulses is below 150 PPM
Returns True on a pass result (all values below threshold)
"""
THRESH_PPM = 150
d = np.diff(sync.times[sync.polarities == 1])
dt = np.median(d)
qc_pass = np.all(np.abs((d - dt) / dt * 1e6) < THRESH_PPM)
if not qc_pass:
_logger.error(f'Synchronizations bursts over {THRESH_PPM} ppm between sync pulses. '
'Sync using "exact" match between pulses.')
return qc_pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import generateLaunch, logger, os, sys
cf_logger = logger.get_logger(__name__) # debug(), info(), warning(), error(), exception(), critical()
LAUNCH_FOLDER = "../launch"
LAUNCH_FILE = "crazy_game.launch"
LAUNCH_PATH = "{}/{}".format(LAUNCH_FOLDER, LAUNCH_FILE)
INVENTORY_FILE = "inventory.txt"
IP = "172.16.1.1"
PORT = "3883"
def renameArguments(arguments):
good_drones = []
good_leds = []
for argument in arguments:
if (len(argument) == 10) and (argument[:9] == "crazyflie") and (48 <= ord(argument[-1]) <= 57):
good_drones.append(argument)
elif (len(argument) == 1) and (48 <= ord(argument[-1]) <= 57):
good_drones.append("crazyflie{}".format(argument))
elif (len(argument) == 4) and (argument[:3] == "led") and (48 <= ord(argument[-1]) <= 57):
good_leds.append(argument)
return [good_drones, good_leds]
def main(arguments):
drones, leds = renameArguments(arguments)
if (len(drones) == 0) or (4 < len(drones)):
cf_logger.error("Invalid call, Usage:")
cf_logger.error("python main.py {[crazyflie0] ..}")
cf_logger.error("For example:")
cf_logger.error("python main.py crazyflie2 crazyflie3")
cf_logger.error("Another equivalent usage example is:")
cf_logger.error("python main.py 2 3")
elif not generateLaunch.generate(LAUNCH_PATH, IP, PORT, drones, leds):
cf_logger.info("Failed to create {}".format(LAUNCH_PATH))
else:
cf_logger.info("File {} created successfully".format(LAUNCH_PATH))
with open(INVENTORY_FILE, "w+") as f:
f.write("\n".join(drones + leds))
os.system("roslaunch crazyflie_demo {}".format(LAUNCH_FILE))
if __name__ == "__main__":
cf_logger.info("######################################################")
cf_logger.info("#### Started ####")
cf_logger.info("######################################################")
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
from PyQt4 import QtSql
from DB.BaseDBRoutine import CDatabaseRoutine
from DB.Database import CDatabase
from DB.MySQLRoutine import CMySqlRoutineMap
from DB.Tools import CSqlExpression
from Utils.Exceptions import CDatabaseException
from Utils.Forcing import forceRef
class CMySqlDatabase(CDatabase):
limit1 = 'LIMIT 0, %d'
limit2 = 'LIMIT %d, %d'
CR_SERVER_GONE_ERROR = 2006
CR_SERVER_LOST = 2013
returnedDeadlockErrorText = u'Deadlock found when trying to get lock;'
def __init__(
self,
serverName,
serverPort,
databaseName,
userName,
password,
connectionName=None,
compressData=False,
**kwargs
):
CDatabase.__init__(self)
self.createConnection('QMYSQL', connectionName, serverName, serverPort, databaseName, userName, password)
options = []
if compressData:
options.append('CLIENT_COMPRESS=1')
if options:
self.db.setConnectOptions(';'.join(options))
self.connectUp()
self.query('SET NAMES \'utf8\' COLLATE \'utf8_general_ci\';')
self.query('SET SQL_AUTO_IS_NULL=0;')
self.query('SET SQL_MODE=\'\';')
self._func = None
self._proc = None
def escapeFieldName(self, name):
u = unicode(name)
if u.startswith('`') and u.endswith('`'):
return u
else:
return '`' + u + '`'
escapeTableName = escapeFieldName
escapeSchemaName = escapeFieldName
NULL = property(lambda self: CSqlExpression(self, 'NULL'))
func = property(lambda self: self.loadFunctions()._func)
proc = property(lambda self: self.loadFunctions()._proc)
def loadFunctions(self):
if self._func is None:
self._func = CMySqlRoutineMap(self, CDatabaseRoutine.FUNCTION)
if self._proc is None:
self._proc = CMySqlRoutineMap(self, CDatabaseRoutine.PROCEDURE)
return self
def getConnectionId(self):
query = self.query('SELECT CONNECTION_ID();')
return forceRef(query.record().value(0)) if query.first() else None
def prepareLimit(self, limit):
if isinstance(limit, (list, tuple)):
assert len(limit) == 2
return self.limit2 % limit
elif isinstance(limit, int):
return self.limit1 % limit
else:
return ''
def nestedTransaction(self):
QtSql.QSqlQuery(self.db).exec_('SAVEPOINT LEVEL_%d' % (self._openTransactionsCount + 1))
if self.db.lastError().isValid():
raise CDatabaseException(CDatabase.errTransactionError, self.db.lastError())
def nestedCommit(self):
QtSql.QSqlQuery(self.db).exec_('RELEASE SAVEPOINT LEVEL_%d' % self._openTransactionsCount)
if self.db.lastError().isValid():
raise CDatabaseException(CDatabase.errNestedCommitTransactionError, self.db.lastError())
def nestedRollback(self):
QtSql.QSqlQuery(self.db).exec_('ROLLBACK TO SAVEPOINT LEVEL_%d' % self._openTransactionsCount)
if self.db.lastError().isValid():
raise CDatabaseException(CDatabase.errNestedRollbackTransactionError, self.db.lastError())
def isConnectionLostError(self, sqlError):
if sqlError and sqlError.number() in [CMySqlDatabase.CR_SERVER_GONE_ERROR,
CMySqlDatabase.CR_SERVER_LOST]:
return True
return CDatabase.isConnectionLostError(self, sqlError)
|
"""Circle model"""
#django
from django.db import models
#utilities
from cride.utils.models import CRideModel
class Circle(CRideModel):
"""Circle model
A circle is a private group where rides are offered and taken
by its members. To join a circle a user must receive an unique
invitation code from an existing circle member.
"""
name = models.CharField('circle name', max_length=140)
slug_name = models.SlugField(unique=True, max_length=40) #el equivalente al username del grupo, seutilizara en las urls
about = models.CharField('circle description', max_length=255)
picture =models.ImageField(upload_to='circles/pictures', blank=True, null=True)
members = models.ManyToManyField(
'users.User', #
through='circles.Membership', #se especifica con que otro modelo se realizara la relacion para agregar mas fields
through_fields=('circle', 'user')
#si tiene mas de una llave foranea que apuentan a un mismo campo se debe especificar por cual los une
)
#Stas
rides_offered = models.PositiveIntegerField(default=0)
rides_taken = models.PositiveIntegerField(default=0)
verified = models.BooleanField(
'verifed circle',
default=False,
help_text='Verified circle are also known as official cmmunities'
)
is_public = models.BooleanField(
default=True,
help_text= 'Oublic circles are listed in the main page so everyone know about their existence'
)
is_limited= models.BooleanField(
'Limited',
default=False,
help_text='Limited circle can grow up to a fixed number of members'
)
members_limit = models.PositiveIntegerField(
default=0,
help_text='If circle is limited, this will be the limit on the number of members'
)
def __str__(self):
"""Return circle name"""
return self.name
#se extiende de la clase meta para modificar el orden en la que se presentaran los circle
class Meta(CRideModel.Meta):
"""Meta class."""
ordering = ['-rides_taken', '-rides_offered'] #el signo menos es para ordenar de manera desendente
|
# -*- coding: utf-8 -*-
import six
class Broker(object):
def __init__(self, observable_class):
"""
Parameters
----------
observable_class : clare.common.event_driven.Observable
"""
self._observable_class = observable_class
self._topics_index = dict()
def create_topic(self, name):
"""
A new topic is created only if one by the same name does not
already exist.
Parameters
----------
name : str
"""
if name not in self._topics_index:
self._topics_index[name] = self._observable_class()
def list_topics(self):
"""
Returns
-------
collections.KeysView
"""
return six.viewkeys(self._topics_index)
def publish(self, event, topic_name):
"""
Parameters
----------
event : str
topic_name : str
"""
observable = self._topics_index[topic_name]
observable.notify(event=event)
def subscribe(self, subscriber, topic_name):
"""
Parameters
----------
subscriber : clare.common.event_driven.interfaces.INotifyable
topic_name : str
"""
observable = self._topics_index[topic_name]
observable.register(observer=subscriber)
def __repr__(self):
repr_ = '{}(observable_class={})'
return repr_.format(self.__class__.__name__, self._observable_class)
|
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
from mementos import MementoMetaclass
from utils import get_logger
logger = get_logger(__name__)
class Subscription(metaclass=MementoMetaclass):
def __init__(self, config):
self.project_id = config.application.project_id
self.topic_id = config.application.topic_id
self.subscription_id = config.application.subscription_id
self.endpoint = config.application.endpoint
def create_pull_subscription(self):
"""Create a new pull subscription on the given topic."""
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
topic_path = publisher.topic_path(self.project_id, self.topic_id)
subscription_path = subscriber.subscription_path(self.project_id, self.subscription_id)
with subscriber:
subscription = subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path
}
)
logger.debug("Subscription created: {}".format(subscription))
def create_push_subscription(self):
"""Create a new push subscription on the given topic."""
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
topic_path = publisher.topic_path(
self.project_id,
self.topic_id
)
subscription_path = subscriber.subscription_path(
self.project_id,
self.subscription_id
)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=self.endpoint
)
with subscriber:
subscription = subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path,
"push_config": push_config,
}
)
logger.debug(f"Push subscription created: {subscription}.")
logger.debug(f"Endpoint for subscription is: {endpoint}")
def delete_subscription(self):
"""Deletes an existing Pub/Sub topic."""
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
self.project_id,
self.subscription_id
)
with subscriber:
subscriber.delete_subscription(request={"subscription": subscription_path})
logger.debug("Subscription deleted: {}".format(subscription_path))
def receive_messages(self, timeout=None):
"""Receives messages from a pull subscription."""
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
self.project_id,
self.subscription_id
)
streaming_pull_future = subscriber.subscribe(
self.subscription_path,
callback=self._on_receive_callback
)
logger.debug(f"Listening for messages on {subscription_path}..\n")
with subscriber:
try:
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
def receive_messages_with_flow_control(self, timeout=None):
"""Receives messages from a pull subscription with flow control."""
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
self.project_id,
self.subscription_id
)
# Limit the subscriber to only have ten outstanding messages at a time.
flow_control = pubsub_v1.types.FlowControl(max_messages=10)
streaming_pull_future = subscriber.subscribe(
subscription_path,
callback=self._on_receive_callback,
flow_control=flow_control
)
logger.debug(f"Listening for messages on {subscription_path}..\n")
with subscriber:
try:
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
def _on_receive_callback(self, message):
logger.debug(f"Received {message.data}.")
message.ack()
|
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def printGraph(self):
for i in self.graph.keys():
print(i, '->', ' -> '.join([str(j) for j in self.graph[i]]))
def BFS(self, start):
explored = set()
queue = [start]
explored.add(start)
while queue:
v = queue.pop(0)
print(v, end=" ")
for w in self.graph[v]:
if w not in explored:
explored.add(w)
queue.append(w)
g = Graph()
# g.addEdge(1, 2)
# g.addEdge(1, 3)
# g.addEdge(2, 4)
# g.addEdge(2, 5)
# g.addEdge(3, 5)
# g.addEdge(4, 6)
# g.addEdge(5, 6)
# g.addEdge(6, 7)
g.addEdge('A', 'F')
g.addEdge('F', 'C')
g.addEdge('C', 'B')
g.addEdge('C', 'E')
g.addEdge('B', 'H')
g.addEdge('H', 'E')
g.addEdge('E', 'D')
g.addEdge('D', 'G')
g.addEdge('G', 'I')
g.addEdge('I', 'A')
# g.printGraph()
print("Breadth First Search: ")
g.BFS('C')
print()
# 1
# / \
# 2 3
# | \ /
# 4 5
# \ /
# 6 -- 7
# https://www.koderdojo.com/media/default/articles/directed-acyclic-graph-computer-science.png
# def BFS(self, start):
# queue = []
# visited = [False] * self.E
# queue.append(start)
# visited[start] = True
# while queue:
# current = queue.pop(0)
# print(current, end=" ")
# for i in self.graph[current]:
# if not visited[i]:
# queue.append(i)
# visited[i] = True
|
import subprocess
subprocess.run(["ls -l ~; sleep 4"], shell=True)
print('lalal')
|
# Copyright 2021 Universität Tübingen, DKFZ and EMBL for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import joinedload
from sqlalchemy import or_, and_
from collections import defaultdict, Counter
from . import resource, linting, errors
from .models import File, MetaDataSet, MetaDatumRecord, MetaDatum
from .security import authz
from .api.metadata import get_all_metadata
from .utils import get_record_from_metadataset
def validate_submission_access(db, db_files, db_msets, auth_user):
"""Validates a submission with regard to
- Entities that cannot be found
- Entities the user does not have access to
Raises:
400 HTTPBadRequest
"""
# Collect missing files
val_errors = [
({ 'site' : file_id, 'uuid' : file_id }, None, "Not found")
for file_id, db_file in db_files.items()
if db_file is None
]
# Collect authorization issues
val_errors += [
({ 'site' : file_id, 'uuid' : file_id }, None, "Access denied")
for file_id, db_file in db_files.items()
if db_file is not None and not authz.submit_file(auth_user, db_file)
]
# Collect missing metadatasets
val_errors += [
({ 'site' : mset_id, 'uuid' : mset_id }, None, "Not found")
for mset_id, db_mset in db_msets.items()
if db_mset is None
]
# Collect authorization issues
val_errors += [
({ 'site' : mset_id, 'uuid' : mset_id }, None, "Access denied")
for mset_id, db_mset in db_msets.items()
if db_mset is not None and not authz.submit_mset(auth_user, db_mset) ]
# If we collected any val_errors so far, raise 400 to avoid exposing internals
# about data the user should not be able to access
if val_errors:
entities, fields, messages = zip(*val_errors)
raise errors.get_validation_error(messages=messages, fields=fields, entities=entities)
def validate_submission_association(db_files, db_msets, ignore_submitted_metadatasets = False):
"""Validates a submission with regard to
- All submitted files being associated to metadata
- All files referenced in the metadata being part of the submission
- All referenced entities have not been submitted before
Returns (tuple):
f_names_obj - A dict mapping file names to database file object
ref_fnames - A dict mapping file names referenced by metadatumrecords to the metadatumrecord
errors - A list of tuples (entity, field, message) describing the errors that occurred
"""
errors = []
# Collect files with no data associated
errors += [ (db_file, None, "No data uploaded") for file_id, db_file in db_files.items() if not db_file.content_uploaded ]
# Collect files which already have other metadata associated
errors += [ (db_file, None, "Already submitted") for file_id, db_file in db_files.items() if db_file.metadatumrecord is not None ]
# Collect metadatasets that were already submitted
if not ignore_submitted_metadatasets:
errors += [ (db_mset, None, "Already submitted") for mset_id, db_mset in db_msets.items() if db_mset.submission_id is not None ]
# Collect the file names of the provided files
f_names_obj = defaultdict(list)
for db_file in db_files.values():
if db_file is not None:
f_names_obj[db_file.name].append(db_file)
# Make sure the filenames are unique
for fname, db_objs in f_names_obj.items():
if len(db_objs) > 1:
errors += [ (db_file, None, "Filename occurs multiple times among provided files") for db_file in db_objs ]
# Collect the file names referenced by the metadata sets - null values are
# not considered here, neither are records for which the file has already
# been linked (this case can occur when this function is called during a
# service execution)
mdat_fnames_obj = defaultdict(list)
for mset in db_msets.values():
for mdatrec in mset.metadatumrecords:
if mdatrec.value is not None and mdatrec.metadatum.isfile and mdatrec.file_id is None:
mdat_fnames_obj[mdatrec.value].append(mdatrec)
ref_fnames = { mdatrec.value : mdatrec for mset in db_msets.values() for mdatrec in mset.metadatumrecords if mdatrec.metadatum.isfile and mdatrec.value and mdatrec.file_id is None}
# Make sure referenced file names are unique
ref_fname_counts = Counter(mdatrec.value for mdatrecs in mdat_fnames_obj.values() for mdatrec in mdatrecs)
errors += [ (mdatrec.metadataset, mdatrec.metadatum.name, "Filename occurs multiple times in metadata")
for ref_fname, count in ref_fname_counts.items() if count > 1
for mdatrec in mdat_fnames_obj[ref_fname]]
# Make sure the files' filenames and the referenced filenames match
errors += [ (db_file, None, "File included without reference in metadata") for db_file in db_files.values() if db_file.name not in ref_fnames.keys() ]
errors += [ (mdatrec.metadataset, mdatrec.metadatum.name, "Referenced file not provided") for ref_fname, mdatrec in ref_fnames.items() if ref_fname not in f_names_obj ]
return f_names_obj, ref_fnames, errors
def validate_submission_uniquekeys(db, db_files, db_msets):
errors = []
# Submission unique keys (includes those that are globally unique)
keys_submission_unique = [ md.name for md in db.query(MetaDatum).filter(or_(MetaDatum.submission_unique.is_(True), MetaDatum.site_unique.is_(True))) ]
# Globally unique keys
keys_site_unique = [ md.name for md in db.query(MetaDatum).filter(MetaDatum.site_unique.is_(True)) ]
# Validate the set of metadatasets with regard to submission unique key constraints
for key in keys_submission_unique:
value_msets = defaultdict(list)
# Associate all values for that key with the metadatasets it occurs in
for db_mset in db_msets.values():
for mdatrec in db_mset.metadatumrecords:
if mdatrec.metadatum.name == key:
value_msets[mdatrec.value].append(db_mset)
# Reduce to those values that occur in more than one metadatast
value_msets = { k: v for k, v in value_msets.items() if len(v) > 1 }
# Produce errrors
errors += [ (db_mset, key, "Violation of intra-submission unique constraint") for msets in value_msets.values() for db_mset in msets ]
# Validate the set of metadatasets with regard to site-wise unique key constraints
for key in keys_site_unique:
value_msets = defaultdict(list)
# Associate all values for that key with the metadatasets it occurs in
for db_mset in db_msets.values():
for mdatrec in db_mset.metadatumrecords:
if mdatrec.metadatum.name == key:
value_msets[mdatrec.value].append(db_mset)
# Query the database for the supplied values
q = db.query(MetaDatumRecord)\
.join(MetaDataSet)\
.join(MetaDatum)\
.filter(and_(
MetaDataSet.submission_id.isnot(None),
MetaDatum.name == key,
MetaDatumRecord.value.in_(value_msets.keys())
))
db_values = [ rec.value for rec in q ]
errors += [ (db_mset, key, "Violation of global unique constraint") for value, msets in value_msets.items() if value in db_values for db_mset in msets ]
return errors
def validate_submission(request, auth_user):
db = request.dbsession
# Collect files, drop duplicates
db_files = { file_id : resource.resource_query_by_id(db, File, file_id).options(joinedload(File.metadatumrecord)).one_or_none() for file_id in set(request.openapi_validated.body['fileIds']) }
# Collect metadatasets, drop duplicates
db_msets = { mset_id : resource.resource_query_by_id(db, MetaDataSet, mset_id).options(joinedload(MetaDataSet.metadatumrecords).joinedload(MetaDatumRecord.metadatum)).one_or_none()
for mset_id in set(request.openapi_validated.body['metadatasetIds']) }
if not db_files and not db_msets:
raise errors.get_validation_error(messages=["Neither data nor metadata provided in submission."])
# Check for access critical failures
validate_submission_access(db, db_files, db_msets, auth_user)
# Validate file and metadata association and submit status
fnames, ref_fnames, val_errors = validate_submission_association(db_files, db_msets)
# Get all non-service metadata definitions
metadata = get_all_metadata(db, include_service_metadata=False)
# Convert metadatasets to dictionaries
msets = { mset_id : get_record_from_metadataset(db_mset, metadata, False) for mset_id, db_mset in db_msets.items() }
# Validate every metadataset individually
for mset_id, mset_values in msets.items():
mset_errors = linting.validate_metadataset_record(metadata, mset_values, return_err_message=True, rendered=True)
val_errors += [ (db_msets[mset_id], mset_error['field'], mset_error['message']) for mset_error in mset_errors ]
# Validate unique field constraints
val_errors += validate_submission_uniquekeys(db, db_files, db_msets)
# If we collected any val_errors, raise 400
if val_errors:
entities, fields, messages = zip(*val_errors)
raise errors.get_validation_error(messages=messages, fields=fields, entities=entities)
# Given that validation hasn't failed, we know that file names are unique. Flatten the dict.
fnames = { k : v[0] for k, v in fnames.items() }
return fnames, ref_fnames, db_files, db_msets
|
from django.conf import settings
UPLOAD = getattr(settings, 'UPLOAD', {
'collection_model': 'upload.Collection',
'media_root': settings.MEDIA_ROOT,
'downsize_to': (1024, 768),
'fill_transparent': (255, 255, 255), # Use False to keep PNG aplha
})
|
import sys
import json
import csv
COCO_ANN_PATH = 'datasets/coco/annotations/instances_val2017.json'
OID_ANN_PATH = 'datasets/oid/annotations/openimages_challenge_2019_val_v2_expanded.json'
OBJECTS365_ANN_PATH = 'datasets/objects365/annotations/objects365_val.json'
MAPILLARY_ANN_PATH = 'datasets/mapillary/annotations/validation.json'
COL = {'coco': 4, 'objects365': 3, 'oid': 1, 'mapillary': 5}
def csvread(file):
with open(file, 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
data = []
for row in csv_f:
data.append(row)
return data
def get_unified_label_map(unified_label, cats):
'''
Inputs:
Return:
unified_label_map: dict of dict
(dataset (string), cat_id (int)) --> unified_id (int)
'''
unified_label_map = {}
for dataset in cats:
unified_label_map[dataset] = {}
col = COL[dataset]
table_names = [x[col].lower().strip() for x in unified_label[1:]]
cat_ids = sorted([x['id'] for x in cats[dataset]])
id2contid = {x: i for i, x in enumerate(cat_ids)}
for cat_info in cats[dataset]:
if dataset != 'oid':
cat_name = cat_info['name']
else:
cat_name = cat_info['freebase_id']
cat_id = id2contid[cat_info['id']]
if cat_name.lower().strip() in table_names:
unified_id = table_names.index(cat_name.lower().strip())
unified_label_map[dataset][cat_id] = unified_id
else:
print('ERROR!', cat_name, 'not find!')
print(dataset, 'OK')
return unified_label_map
if __name__ == '__main__':
unified_label_path = sys.argv[1]
unified_label = csvread(unified_label_path)
cats = {}
print('Loading')
cats['coco'] = json.load(open(COCO_ANN_PATH, 'r'))['categories']
cats['oid'] = json.load(open(OID_ANN_PATH, 'r'))['categories']
cats['objects365'] = json.load(open(OBJECTS365_ANN_PATH, 'r'))['categories']
cats['mapillary'] = json.load(open(MAPILLARY_ANN_PATH, 'r'))['categories']
unified_label_map = get_unified_label_map(unified_label, cats)
unified_label_map_list = {d: [unified_label_map[d][i] \
for i in range(len(cats[d]))] for d in cats}
dataset_inds = {d: sorted(unified_label_map[d].values()) \
for d in cats}
dataset_mask = {d: [1 if i in dataset_inds[d] else 0 \
for i in range(len(unified_label) - 1)] for d in cats}
categories = [{'id': i, 'name': x[0]} for i, x in \
enumerate(unified_label[1:])]
out = {'categories': categories, 'label_map_dict': unified_label_map,
'label_map': unified_label_map_list,
'raw_data': unified_label, 'dataset_inds': dataset_inds,
'dataset_mask': dataset_mask}
json.dump(out, open(
'{}_4d.json'.format(unified_label_path[:-4]), 'w'))
for x in categories:
print(' {' + "'id': {}, 'name': '{}'".format(x['id'], x['name']) + '},')
|
import os
from aiogram import Bot, Dispatcher
from aiogram.contrib.fsm_storage.files import PickleStorage
import dotenv
# Load dotenv
dotenv.load_dotenv()
bot = Bot(token=os.getenv("API_TOKEN"))
storage = PickleStorage("db.pickle")
dp = Dispatcher(bot, storage=storage)
|
#!/usr/bin/env python
# coding: utf-8
# # Tarea #2
# # Estudiante: Heiner Romero Leiva
# ### Importacion de paquetes
# In[1]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix
from pandas import DataFrame
from matplotlib import colors as mcolors
# ### Función para calcular los índices de calidad de la predicción
# In[2]:
def indices_general(MC, nombres = None):
precision_global = np.sum(MC.diagonal()) / np.sum(MC)
error_global = 1 - precision_global
precision_categoria = pd.DataFrame(MC.diagonal()/np.sum(MC,axis = 1)).T
if nombres!=None:
precision_categoria.columns = nombres
return {"Matriz de Confusión":MC,
"Precisión Global":precision_global,
"Error Global":error_global,
"Precisión por categoría":precision_categoria}
# ### Función para graficar la distribución de la variable a predecir
# In[3]:
def distribucion_variable_predecir(data:DataFrame,variable_predict:str):
colors = list(dict(**mcolors.CSS4_COLORS))
df = pd.crosstab(index=data[variable_predict],columns="valor") / data[variable_predict].count()
fig = plt.figure(figsize=(10,9))
g = fig.add_subplot(111)
countv = 0
titulo = "Distribución de la variable %s" % variable_predict
for i in range(df.shape[0]):
g.barh(1,df.iloc[i],left = countv, align='center',color=colors[11+i],label= df.iloc[i].name)
countv = countv + df.iloc[i]
vals = g.get_xticks()
g.set_xlim(0,1)
g.set_yticklabels("")
g.set_title(titulo)
g.set_ylabel(variable_predict)
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
countv = 0
for v in df.iloc[:,0]:
g.text(np.mean([countv,countv+v]) - 0.03, 1 , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
# ### Funciones para ver la distribución de una variable respecto a la predecir (poder predictivo)
# ### Función para ver la distribución de una variable categórica respecto a la predecir
# In[4]:
def poder_predictivo_categorica(data:DataFrame, var:str, variable_predict:str):
df = pd.crosstab(index= data[var],columns=data[variable_predict])
df = df.div(df.sum(axis=1),axis=0)
titulo = "Distribución de la variable %s según la variable %s" % (var,variable_predict)
g = df.plot(kind='barh',stacked=True,legend = True, figsize = (10,9), xlim = (0,1),title = titulo, width = 0.8)
vals = g.get_xticks()
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
for bars in g.containers:
plt.setp(bars, width=.9)
for i in range(df.shape[0]):
countv = 0
for v in df.iloc[i]:
g.text(np.mean([countv,countv+v]) - 0.03, i , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
# ### Función para ver la distribución de una variable numérica respecto a la predecir
# In[5]:
def poder_predictivo_numerica(data:DataFrame, var:str, variable_predict:str):
sns.FacetGrid(data, hue=variable_predict, height=6).map(sns.kdeplot, var, shade=True).add_legend()
# ### Ejercicio 1:
# ### Pregunta 1: [25 puntos] En este ejercicio usaremos los datos (voces.csv). Se trata de un problema de reconocimiento de genero mediante el analisis de la voz y el habla. Esta base de datos fue creada para identificar una voz como masculina o femenina, basandose en las propiedades acusticas de la voz y el habla. El conjunto de datos consta de 3.168 muestras de voz grabadas, recogidas de hablantes masculinos y femeninos.
# ### 1. Cargue la tabla de datos voces.csv en Python
# In[6]:
voces = pd.read_csv("voces.csv", delimiter = ',', decimal = '.')
voces
# In[7]:
voces.info()
voces.shape
voces.info
# In[8]:
voces.describe()
# Se sacan estadisticas basicas para ver distribuciones y si es necesario centrar y normalizar las tabla.
# In[9]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas
voices = voces.iloc[:,0:20]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(voices)
voices.loc[:,:] = scaled_values
voices.head()
# #### Distribución de la variable a predecir¶
# In[10]:
distribucion_variable_predecir(voces,"genero")
# La variable a predecir esta completamente balanceada, por lo que, en el testing las predicciones
# deben de dar muy parecidas.
# ### 2. Genere al azar una tabla de testing con una 20 % de los datos y con el resto de los datos genere una tabla de aprendizaje.
# #### Elimina la variable categorica, deja las variables predictoras en X
# In[11]:
X = voices.iloc[:,0:20]
X.head()
# #### Deja la variable a predecir en y
# In[12]:
y = voces.iloc[:,20:21]
y.head()
# #### Se separan los datos con el 80% de los datos para entrenamiento y el 20% para testing
# In[13]:
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
# ### 3. Usando los metodos de Bayes, Discriminante Lineal y Discriminante Cuadratico genere modelos predictivos para la tabla de aprendizaje
# In[65]:
cadena = "-- Utilizando Metodo de Bayes --"
print(cadena.center(120," "))
# In[15]:
# Se usan los parámetros por defecto
bayes = GaussianNB()
print(bayes)
# #### Entrenando Modelo
# In[16]:
bayes.fit(X_train, y_train.iloc[:,0].values)
# #### Imprimiendo prediccion
# In[17]:
print("Las predicciones en Testing son: {}".format(bayes.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[18]:
prediccion = bayes.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[64]:
cadena = "-- Utilizando Metodo de Discriminante Lineal --"
print(cadena.center(120, " "))
# In[26]:
# Se usan los parámetros por defecto
lda = LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto')
print(lda)
# #### Entrenando el Modelo
# In[27]:
lda.fit(X_train, y_train.iloc[:,0].values)
# #### Imprimiendo predicciones
# In[28]:
print("Las predicciones en Testing son: {}".format(lda.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[29]:
prediccion = lda.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[66]:
cadena = "-- Utilizando Metodo de Discriminante Cuadratico --"
print(cadena.center(120, " "))
# In[15]:
# Se usan los parámetros por defecto
qda = QuadraticDiscriminantAnalysis(store_covariance=True)
print(qda)
# #### Entrenando Modelo
# In[16]:
qda.fit(X_train, y_train.iloc[:,0].values)
# #### Python genera un warning:
#
# Explicacion: dado que el Discriminante cuadratico implica computar la inversion de una matriz, lo que no incorrecto si el determinante esta cerco de 0 (puede pasar que dos variables sean caso la combinacion lineal una de la otra), Lo que genera que los coeficientes sean imposibles de interpretar, ya que un incremento en X1 creara un decrecimiento en X2 y viceversa.
#
# Hay que evaluar el modelo con la precision global y ver si la precision es mayor de un 85% para tratar la colinearidad, como la primera vez que se corrio el modelo, dio muy malos resultados (menos de un 70% de precision) se cambia el store_covariance de False a True para que calcule y guarde la matriz de covarianza en el atributo covariance y pueda aumentar la precision).
#
# #### Imprime las predicciones
# In[17]:
print("Las predicciones en Testing son: {}".format(qda.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[18]:
prediccion = qda.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# ### 4. Con la tabla de testing calcule la matriz de confusion, la precision, la precision positiva, la precision negativa, los falsos positivos, los falsos negativos, la acertividad positiva y la acertividad negativa. Luego construya un cuadro comparativo.
# In[67]:
cadena = "-- Desplegando indices Globales Personalizados de Metodo de Bayes --"
print(cadena.center(120, " "))
# In[22]:
# Desplegando funcion programada
def indices_personalizados(MC):
print(MC)
precision_global = (MC.iloc[0,0] + MC.iloc[1,1]) / (MC.iloc[0,0]
+ MC.iloc[0,1] + MC.iloc[1,0] + MC.iloc[1,1])
error_global = 1 - precision_global
precision_positiva = (MC.iloc[1,1]) / (MC.iloc[1,0] + MC.iloc[1,1])
precision_negativa = (MC.iloc[0,0]) / (MC.iloc[0,0] + MC.iloc[0,1])
falsos_positivos = (MC.iloc[0,1]) / (MC.iloc[0,0] + MC.iloc[0,1])
falsos_negativos = (MC.iloc[1,0]) / (MC.iloc[1,0] + MC.iloc[1,1])
asertividad_positiva = (MC.iloc[1,1]) / (MC.iloc[0,1] + MC.iloc[1,1])
asertividad_negativa = (MC.iloc[0,0]) / (MC.iloc[0,0] + MC.iloc[1,0])
return {"Precisión Global":precision_global,
"Error Global":error_global,
"Precision Positiva (PP)":precision_positiva,
"Precision Negativa (PN)":precision_negativa,
"Falsos Positivos (PFP)":falsos_positivos,
"Falsos Negativos (PFN)":falsos_negativos,
"Asertividad Positiva (AP)":asertividad_positiva,
"Asertividad Negativa (AN)":asertividad_negativa}
# #### Desplegando Indices Personalizados
# In[23]:
datos = (([284, 42],[33, 275]))
df = pd.DataFrame(datos, columns = ["Masculino", "Femenino"])
MC = df
indices_personalizados(MC)
# In[68]:
cadena = "-- Desplegando indices Globales Personalizados de Metodo Discriminante Lineal --"
print(cadena.center(120, " "))
# #### Desplegando Indices Personalizados
# In[25]:
datos = (([315, 11],[5, 303]))
df = pd.DataFrame(datos, columns = ["Masculino", "Femenino"])
MC = df
indices_personalizados(MC)
# In[69]:
cadena = "-- Desplegando indices Globales Personalizados de Metodo Discriminante Cuadratico --"
print(cadena.center(120, " "))
# #### Desplegando Indices Personalizados
# In[28]:
datos = (([276, 39],[11, 308]))
df = pd.DataFrame(datos, columns = ["Masculino", "Femenino"])
MC = df
indices_personalizados(MC)
# ### Construya un cuadro comparativo con respecto a la tarea anterior y las tareas del curso anterior. ¿Cual metodo es mejor?
# In[31]:
cadena = "Cuadro Comparativo entre Modelos Supervisados"
print(cadena.center(35," "))
print(" ========================================")
print(" Modelo K Vecinos Mas Cercanos:\n**************************")
print("Precisión Global: 0.9479495268138801\nError Global: 0.05205047318611988\nPrecision Positiva (PP): 0.9779874213836478\nPrecision Negativa (PN): 0.9177215189873418\nFalsos Positivos (PFP): 0.08227848101265822\nFalsos Negativos (PFN): 0.0220125786163522\nAsertividad Positiva (AP): 0.9228486646884273\nAsertividad Negativa (AN): 0.9764309764309764\n**************************")
print(" Arbol de decision:\n**************************")
print("Precisión Global: 0.9684542586750788\nError Global: 0.03154574132492116\nPrecision Positiva (PP): 0.9688473520249221\nPrecision Negativa (PN): 0.9680511182108626\nFalsos Positivos (PFP): 0.03194888178913738\nFalsos Negativos (PFN): 0.03115264797507788\nAsertividad Positiva (AP): 0.9688473520249221\nAsertividad Negativa (AN): 0.9680511182108626\n**************************")
print(" Arboles Aleatorios:\n**************************")
print("Precisión Global: 0.9889589905362776\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo ADA Boosting:\n**************************")
print("Precisión Global: 0.9810725552050473,\nError Global: 0.018927444794952675\nPrecision Positiva (PP): 0.990625\nPrecision Negativa (PN): 0.9713375796178344\nFalsos Positivos (PFP): 0.028662420382165606\nFalsos Negativos (PFN): 0.009375\nAsertividad Positiva (AP): 0.9723926380368099\nAsertividad Negativa (AN): 0.9902597402597403\n**************************")
print(" Modelo XG Boosting:\n**************************")
print("Precisión Global: 0.9889589905362776,\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo Maquinas de Soporte Vectorial:\n**************************")
print("Precisión Global: 0.9826498422712934\nError Global: 0.017350157728706628\nPrecision Positiva (PP): 0.9821958456973294\nPrecision Negativa (PN): 0.9831649831649831\nFalsos Positivos (PFP): 0.016835016835016835\nFalsos Negativos (PFN): 0.017804154302670624\nAsertividad Positiva (AP): 0.9851190476190477\nAsertividad Negativa (AN): 0.9798657718120806\n**************************")
print(" Modelo Redes Neuronales - MLPClassifier\n**************************")
print("Precisión Global: 0.9842271293375394\nError Global: 0.01577287066246058\nPrecision Positiva (PP): 0.9797101449275363\nPrecision Negativa (PN): 0.9896193771626297\nFalsos Positivos (PFP): 0.010380622837370242\nFalsos Negativos (PFN): 0.020289855072463767\nAsertividad Positiva (AP): 0.9912023460410557\nAsertividad Negativa (AN): 0.9761092150170648\n**************************")
print(" Modelo Redes Neuronales - Keras - TensorFlow\n**************************")
print("Precisión Global: 0.9794952681388013\nError Global: 0.02050473186119872\nPrecision Positiva (PP): 0.975975975975976\nPrecision Negativa (PN): 0.9833887043189369\nFalsos Positivos (PFP): 0.016611295681063124\nFalsos Negativos (PFN): 0.024024024024024024\nAsertividad Positiva (AP): 0.9848484848484849\nAsertividad Negativa (AN): 0.9736842105263158\n**************************")
print(" Modelo Metodo de Bayes\n**************************")
print("Precisión Global: 0.8817034700315457\nError Global: 0.1182965299684543\nPrecision Positiva (PP): 0.8928571428571429\nPrecision Negativa (PN): 0.8711656441717791\nFalsos Positivos (PFP): 0.12883435582822086\nFalsos Negativos (PFN): 0.10714285714285714\nAsertividad Positiva (AP): 0.8675078864353313\nAsertividad Negativa (AN): 0.8958990536277602\n**************************")
print(" Modelo Metodo de Discriminante Lineal\n**************************")
print("Precisión Global: 0.9747634069400631\nError Global: 0.025236593059936863\nPrecision Positiva (PP): 0.9837662337662337\nPrecision Negativa (PN): 0.9662576687116564\nFalsos Positivos (PFP): 0.03374233128834356\nFalsos Negativos (PFN): 0.016233766233766232\nAsertividad Positiva (AP): 0.964968152866242\n Asertividad Negativa (AN): 0.984375\n**************************")
print(" Modelo Metodo de Discriminante Cuadratico\n**************************")
print("Precisión Global: 0.9211356466876972\nError Global: 0.07886435331230279\nPrecision Positiva (PP): 0.9655172413793104\nPrecision Negativa (PN): 0.8761904761904762\nFalsos Positivos (PFP): 0.12380952380952381\nFalsos Negativos (PFN): 0.034482758620689655\nAsertividad Positiva (AP): 0.8876080691642652\nAsertividad Negativa (AN): 0.9616724738675958\n**************************")
print(" ========================================")
# #### Analisis
#
# * Haciendo la comparacion con todos los modelos que se han visto hasta el momento y con respecto al cuadro comparativo se puede ver que el Modelo que da los mejores resultados es el de Arboles Aleatorios junto con el XG Boosting, ya que ambos tienen la precision global mas alta de casi un 99%, ademas que la Asertividad Positiva es de mas de un 98% mientras que la negativa es de mas de un 99% lo que los hace modelos bastante confiables. Sin embargo, para este caso el Modelo de Discriminante Lineal da muy buenos resultados, junto con el Discriminante cuadratico, no asi el de Bayes.
# ### Ejercicio 2:
# ### Esta pregunta utiliza los datos (tumores.csv). Se trata de un conjunto de datos de caracteristicas del tumor cerebral que incluye cinco variables de primer orden y ocho de textura y cuatro parametros de evaluacion de la calidad con el nivel objetivo. La variables son: Media, Varianza, Desviacion estandar, Asimetria, Kurtosis, Contraste, Energia, ASM (segundo momento angular), Entropıa, Homogeneidad, Disimilitud, Correlacion, Grosor, PSNR (Pico de la relacion senal-ruido), SSIM (Indice de Similitud Estructurada), MSE (Mean Square Error), DC (Coeficiente de Dados) y la variable a predecir tipo (1 = Tumor, 0 = No-Tumor).
# ### 1. Usando Bayes, Discriminante Lineal y Discriminante Cuadratico genere modelospredictivos para la tabla tumores.csv usando 70 % de los datos para tabla aprendizaje y un 30 % para la tabla testing.
# In[32]:
tumores = pd.read_csv("tumores.csv", delimiter = ',', decimal = '.')
tumores.head()
# In[33]:
tumores.info()
# In[34]:
# Convierte las variables de object a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# Recodifica las categorías usando números
tumores["imagen"] = tumores["imagen"].cat.codes
print(tumores.info())
print(tumores.head())
# Convierte las variables de entero a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# In[35]:
tumores.tail() #variable categorica ha sido convertida a numero
# #### Distribucion de la variable a Predecir
# In[37]:
distribucion_variable_predecir(tumores,"tipo") #Problema altamente desequilibrado.
# In[38]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas
tumores_1 = tumores.iloc[:,0:17]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(tumores_1)
tumores_1.loc[:,:] = scaled_values
tumores_1.head()
# Variables con escalas diferentes han sido reescaladas.
# #### Elimina la variable catégorica, deja las variables predictoras en X
# In[39]:
X = tumores_1.iloc[:,0:17]
X.head()
# #### Deja la variable a predecir en y
# In[40]:
y = tumores.iloc[:,17:18]
y.head()
# #### Se separan los datos con el 70% de los datos para entrenamiento y el 30% para testing
# In[41]:
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# In[70]:
cadena = "-- Utilizando Metodo de Bayes --"
print(cadena.center(120," "))
# In[43]:
# Se usan los parámetros por defecto
bayes = GaussianNB()
print(bayes)
# #### Entrenamiento del Modelo
# In[44]:
bayes.fit(X_train, y_train.iloc[:,0].values)
# #### Imprimiendo predicciones del testing
# In[45]:
print("Las predicciones en Testing son: {}".format(bayes.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[46]:
prediccion = bayes.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[71]:
cadena = "-- Utilizando Metodo de Discriminante Lineal --"
print(cadena.center(120, " "))
# In[48]:
# Se usan los parámetros por defecto
lda = LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto')
print(lda)
# #### Entrenando Modelo
# In[50]:
lda.fit(X_train, y_train.iloc[:,0].values)
# #### Imprimiendo las predicciones
# In[52]:
print("Las predicciones en Testing son: {}".format(lda.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[53]:
prediccion = lda.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[72]:
cadena = "-- Utilizando Metodo de Discriminante Cuadratico --"
print(cadena.center(120, " "))
# In[55]:
# Se usan los parámetros por defecto
qda = QuadraticDiscriminantAnalysis()
print(qda)
# #### Entrenando el Modelo
# In[56]:
qda.fit(X_train, y_train.iloc[:,0].values)
# #### Imprimiendo predicciones
# In[57]:
print("Las predicciones en Testing son: {}".format(qda.predict(X_test)))
# #### Indices de Calidad del Modelo
# In[58]:
prediccion = qda.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# ### 2. Calcule para los datos de testing la precision global y la matriz de confusion. Interprete la calidad de los resultados. Ademas compare respecto a los resultados obtenidos en la tarea anterior y los resultados del curso anterior.
# In[60]:
cadena = "Cuadro Comparativo entre Calidades de los Modelos Supervisados"
print(cadena.center(100," "))
print(" ========================================")
print(" Modelo K Vecinos Mas Cercanos:\n**************************")
print("Precisión Global: 0.9479495268138801\nError Global: 0.05205047318611988\n**************************")
print(" Arbol de decision:\n**************************")
print("Precisión Global: 0.9684542586750788\nError Global: 0.03154574132492116\n**************************")
print(" Arboles Aleatorios:\n**************************")
print("Precisión Global: 0.9889589905362776\nError Global: 0.01104100946372244\n**************************")
print(" Modelo ADA Boosting:\n**************************")
print("Precisión Global: 0.9810725552050473,\nError Global: 0.018927444794952675\n**************************")
print(" Modelo XG Boosting:\n**************************")
print("Precisión Global: 0.9889589905362776,\nError Global: 0.01104100946372244\n**************************")
print(" Modelo Maquinas de Soporte Vectorial:\n**************************")
print("Precisión Global: 0.9826498422712934\nError Global: 0.017350157728706628\n**************************")
print(" Modelo utilizando paquete MLPClassifier\n**************************")
print("Precisión Global: 0.9686684073107049\nError Global: 0.031331592689295085\n**************************")
print(" Modelo Redes Neuronales - TensorFlow y Keras\n**************************")
print("Precisión Global: 0.9712793733681462\nError Global: 0.02872062663185382\n**************************")
print(" Modelo Metodo de Bayes\n**************************")
print("Precisión Global: 0.9817232375979112\nError Global:0.018276762402088753\n**************************")
print(" Modelo Metodo de Discriminante Lineal\n**************************")
print("Precisión Global: 0.9765013054830287\nError Global: 0.023498694516971286\n**************************")
print(" Modelo Metodo de Discriminante Cuadratico\n**************************")
print("Precisión Global: 0.9869451697127938\nError Global: 0.01305483028720622\nn**************************")
print(" ========================================")
# #### Analisis
#
# * De acuerdo a las predicciones obtenidas con el Metodo de Bayes, Discriminante Linel y Cuadratico, se puede ver como las tres dan bastante bien, no obstante, la que da mejor es la del Discriminante Cuadratico, ya que la Precision Global es de un 98.69%. La precision por categoria del "no posee cancer" es de un 100%, mientars que la del "posee cancer" es de un 98.62%. Seguida del Metodo de Bayes con valores muy similares, la del Discriminante Lineal pese a que la prediccion no es mala, la prediccion del no es de un 75% mucho menor a las de los dos otros metodos.
#
# * Comparando los resultados obtenidos con las redes neuronales con los de las tareas anteriores se puede ver como se mantienen los mejores resultados usando los Arboles Aleatorios y el XG Boosting a nivel de Precision Global (es casi de un 99%) mientras que el error es poco mas de un 1%.
# ### Ejercicio 3:
# ### [Filmina 19 de Bayes] Supongamos que se tiene una nueva fila o registro de la base de datos t = (Pedro, M, 4, ?), prediga (a mano) si Pedro corresponde a la clase pequeno, mediano o alto.
# In[61]:
from IPython.display import Image
Image(filename="/Users/heinerleivagmail.com/probabilidad.png")
# ### Ejercicio 4:
# #### [Filmina 24 de Bayes] Realice la predicciones (a mano) para el registro numero 101.
# In[62]:
from IPython.display import Image
Image(filename="/Users/heinerleivagmail.com/evento.png")
# In[73]:
cadena = "=============== FIN =============== "
print(cadena.center(120, " "))
# In[ ]:
|
#!/usr/bin/env python3
from app.main import app |
from setuptools import setup
setup(
name='py-certgenerator',
version='1',
packages=['py-certgenerator'],
url='https://the-cyber-squad.com',
license='MIT ',
author='will',
author_email='will@theapiguys.com',
description='Generates SSL x509 certificates'
)
|
from urldl import download
from pycallisto import fitsfile
callisto_archives = 'http://soleil80.cs.technik.fhnw.ch/' \
'solarradio/data/2002-20yy_Callisto/'
filelist = [
"BLEN7M_20110216_133009_24.fit.gz", "BLEN7M_20110216_134510_24.fit.gz",
"BLEN7M_20110216_140011_24.fit.gz", "BLEN7M_20110216_141512_24.fit.gz",
"BLEN7M_20110216_143014_24.fit.gz", "BLEN7M_20110216_144515_24.fit.gz",
"BLEN7M_20110216_150016_24.fit.gz", "BLEN7M_20110216_151517_24.fit.gz",
"BLEN7M_20110216_153019_24.fit.gz"]
for filename in filelist:
fits_year = filename.split('_')[1][:4]
fits_month = filename.split('_')[1][4:6]
fits_day = filename.split('_')[1][-2:]
fits_url = f'{callisto_archives}/{fits_year}/{fits_month}/' \
f'{fits_day}/{filename}'
download(fits_url)
title = "Flare classe M1.6, 16/02/2011 (BLEN7M)"
plot_filename = "for_publication"
fitsfile.ECallistoFitsFile.plot_fits_files_list(filelist,
title=title,
plot_filename=plot_filename,
show=True)
|
from . import parsing
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Bokeh imports
import bokeh.document as document
from bokeh.core.properties import Instance, Int, Nullable
from bokeh.document.events import (
ColumnDataChangedEvent,
ColumnsPatchedEvent,
ColumnsStreamedEvent,
ModelChangedEvent,
RootAddedEvent,
RootRemovedEvent,
)
from bokeh.model import Model
from bokeh.models import ColumnDataSource
from bokeh.protocol import Protocol
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
proto = Protocol()
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class AnotherModelInTestPatchDoc(Model):
bar = Int(1)
class SomeModelInTestPatchDoc(Model):
foo = Int(2)
child = Nullable(Instance(Model))
class TestPatchDocument:
def _sample_doc(self):
doc = document.Document()
another = AnotherModelInTestPatchDoc()
doc.add_root(SomeModelInTestPatchDoc(child=another))
doc.add_root(SomeModelInTestPatchDoc())
return doc
def test_create_no_events(self) -> None:
with pytest.raises(ValueError):
proto.create("PATCH-DOC", [])
def test_create_multiple_docs(self) -> None:
sample1 = self._sample_doc()
obj1 = next(iter(sample1.roots))
event1 = ModelChangedEvent(sample1, obj1, 'foo', 42)
sample2 = self._sample_doc()
obj2 = next(iter(sample2.roots))
event2 = ModelChangedEvent(sample2, obj2, 'foo', 42)
with pytest.raises(ValueError):
proto.create("PATCH-DOC", [event1, event2])
def test_create_model_changed(self) -> None:
sample = self._sample_doc()
obj = next(iter(sample.roots))
event = ModelChangedEvent(sample, obj, 'foo', 42)
proto.create("PATCH-DOC", [event])
def test_create_then_apply_model_changed(self) -> None:
sample = self._sample_doc()
foos = []
for r in sample.roots:
foos.append(r.foo)
assert foos == [ 2, 2 ]
obj = next(iter(sample.roots))
assert obj.foo == 2
event = ModelChangedEvent(sample, obj, 'foo', 42)
msg = proto.create("PATCH-DOC", [event])
copy = document.Document.from_json(sample.to_json())
msg.apply_to_document(copy)
foos = []
for r in copy.roots:
foos.append(r.foo)
foos.sort()
assert foos == [ 2, 42 ]
def test_patch_event_contains_setter(self) -> None:
sample = self._sample_doc()
root = None
other_root = None
for r in sample.roots:
if r.child is not None:
root = r
else:
other_root = r
assert root is not None
assert other_root is not None
new_child = AnotherModelInTestPatchDoc(bar=56)
cds = ColumnDataSource(data={'a': np.array([0., 1., 2.])})
sample.add_root(cds)
mock_session = object()
def sample_document_callback_assert(event):
"""Asserts that setter is correctly set on event"""
assert event.setter is mock_session
sample.on_change(sample_document_callback_assert)
# Model property changed
event = ModelChangedEvent(sample, root, 'child', new_child)
msg = proto.create("PATCH-DOC", [event])
msg.apply_to_document(sample, mock_session)
assert msg.buffers == []
# RootAdded
event2 = RootAddedEvent(sample, root)
msg2 = proto.create("PATCH-DOC", [event2])
msg2.apply_to_document(sample, mock_session)
assert msg2.buffers == []
# RootRemoved
event3 = RootRemovedEvent(sample, root)
msg3 = proto.create("PATCH-DOC", [event3])
msg3.apply_to_document(sample, mock_session)
assert msg3.buffers == []
# ColumnsStreamed
event4 = ColumnsStreamedEvent(sample, cds, "data", {"a": [3]}, None, mock_session)
msg4 = proto.create("PATCH-DOC", [event4])
msg4.apply_to_document(sample, mock_session)
assert msg4.buffers == []
# ColumnsPatched
event5 = ColumnsPatchedEvent(sample, cds, "data", {"a": [(0, 11)]})
msg5 = proto.create("PATCH-DOC", [event5])
msg5.apply_to_document(sample, mock_session)
assert msg5.buffers == []
# ColumnDataChanged
event7 = ColumnDataChangedEvent(sample, cds, "data")
msg7 = proto.create("PATCH-DOC", [event7])
msg7.apply_to_document(sample, mock_session)
assert len(msg7.buffers) == 1
# reports CDS buffer *as it is* Normally events called by setter and
# value in local object would have been already mutated.
[buf] = msg7.buffers
assert bytes(buf.data) == np.array([11., 1., 2., 3]).tobytes()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
from django.utils.translation import ugettext_lazy as _
__all__ = (
'MERCHANT_TYPES',
'DEFAULT_MERCHANT_TYPE',
'DEFAULT_PARTS_COUNT',
'CREATED',
'SUCCESS',
'CANCELED',
'FAIL',
'CLIENT_WAIT',
'OTP_WAITING',
'PP_CREATION',
'LOCKED',
'STATE_CHOICES',
'LOG_CHOICES'
)
MERCHANT_TYPES = (
('II', 'II'),
('PP', 'PP'),
('PB', 'PB'),
('IA', 'IA')
)
DEFAULT_MERCHANT_TYPE = 'II'
DEFAULT_PARTS_COUNT = 2
CREATED = 'CREATED'
SUCCESS = 'SUCCESS'
CANCELED = 'CANCELED'
FAIL = 'FAIL'
CLIENT_WAIT = 'CLIENT_WAIT'
OTP_WAITING = 'OTP_WAITING'
PP_CREATION = 'PP_CREATION'
LOCKED = 'LOCKED'
STATE_CHOICES = (
(CREATED, _('Created')),
(SUCCESS, _('Success')),
(FAIL, _('Fail')),
(CANCELED, _('Canceled')),
(CLIENT_WAIT, _('Client wait')),
(OTP_WAITING, _('OTP waiting')),
(PP_CREATION, _('PP creation')),
(LOCKED, _('Locked'))
)
LOG_CHOICES = (
('payment_create', _('Creation of payment')),
('callback', _('Payment callback'))
)
|
import numpy as np
from PIL import Image
from radiomics import featureextractor
import sys
import nipy
import SimpleITK as sitk
radiomics_parameter_path = r'C:\projects\pyradiomics\examples\exampleSettings\Params.yaml'
extractor = featureextractor.RadiomicsFeaturesExtractor(radiomics_parameter_path)
extractor.disableAllFeatures()
extractor.enableFeatureClassByName('glcm')
extractor.enableFeatureClassByName('glrlm')
extractor.enableFeatureClassByName('glszm')
extractor.enableFeatureClassByName('gldm')
extractor.enableFeatureClassByName('ngtdm')
extractor.enableFeatureClassByName('firstorder')
extractor.enableFeatureClassByName('shape')
def test_radiomics(image, roi, width, height, slice):
print('Invoking Method: [test_radiomics].')
image1 = sitk.GetImageFromArray(np.array(image))
image2 = sitk.GetImageFromArray(np.array(roi))
features = extractor.execute(image1,image2)
features.pop('general_info_BoundingBox');
features.pop('general_info_EnabledImageTypes');
features.pop('general_info_GeneralSettings');
features.pop('general_info_ImageHash');
features.pop('general_info_ImageSpacing');
features.pop('general_info_MaskHash');
features.pop('general_info_Version');
# for fn in features.keys():
# print("Compute %s : %s" %(fn,features[fn]))
f_value = list(features.values())
f_size = len(f_value)
return [1, f_value, f_size]
# print('Image size: ',width, height, slice)
# print('Image rank: ',np.min(np.array(image)),np.max(np.array(image)))
# images = np.array(image)[40].tolist()
# pil_image = Image.fromarray(np.array(images))
# pil_image.show()
# print('Resize Image: ',len(images),len(images[0]))
# return [ 2, images, width, height ]
|
def add_variable_to_context(request):
if request.COOKIES.get('uuid') is not None:
return {
'uuid': request.COOKIES.get('uuid')
}
else:
return {
'uuid': '000'
}
|
from django.conf.urls import url
from .views import (
ExpenseCreateView,
ExpenseListView,
ExpenseUpdateView,
ExpenseDeleteView,
)
urlpatterns = [
url(r'^$', ExpenseListView.as_view(), name='list'),
url(r'^create/$', ExpenseCreateView.as_view(), name='create'),
url(r'^update/(?P<pk>\d+)/$', ExpenseUpdateView.as_view(), name='detail'),
url(r'^delete/(?P<pk>\d+)/$', ExpenseDeleteView.as_view(), name='delete'),
]
|
import subprocess
import sys
import os
def run_web_app(
url: str, width: int = 800, height: int = 600, min_size: tuple = (800, 600)
):
cmd_py = ["import webview"]
cmd_py += [
f"window = webview.create_window("
f"'TINerator', url='{url}', width={width}, "
f"height={height}, min_size={min_size})"
]
cmd_py += ["window.closing += window.destroy"]
cmd_py += ["webview.start()"]
# TODO: on quit, this should kill the server and return
# so, use Popen with wait() while server.isAlive
cmd_sh = [sys.executable, "-c", '"' + ";".join(cmd_py) + '"']
exit_code = subprocess.call(" ".join(cmd_sh), cwd=os.getcwd(), shell=True)
return exit_code
|
from django.db import models
# Create your models here.
from django.db import models
COUNTRIES = (
('uk', 'UK'),
('us', 'USA'),
('in', 'India'),
)
LABELS = (
('0', 'NEGATIVE'),
('1', 'POSITIVE'),
('2', 'NEUTRAL'),
)
class Report(models.Model):
goodness = models.CharField(max_length=2)
country = models.CharField(max_length=2, choices=COUNTRIES)
pub_date = models.DateTimeField('date published', blank=True)
def __str__(self):
return self.goodness
|
from moceansdk.modules.command.template import wa_template_basic
class WaTextTemplate(wa_template_basic.WaTemplateBasic):
def type(self):
return 'text' |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 14:27:49 2019
@author: positiveoutlier
A digital root is the recursive sum of all the digits in a number.
Given n, take the sum of the digits of n. If the resulting value has more than
one digit, continue reducing in this way until a single-digit number is
produced. The digital root can only be calculated for natural numbers.
Expected input: integer
Expected output: integer
"""
def digitalroot(n):
result = int(sum([int(digit) for digit in str(n)]))
if result > 9:
return digitalroot(result)
else:
return result
digitalroot(132189)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.