content
stringlengths 5
1.05M
|
|---|
'''
04 - Sorting by index values:
Previously, you changed the order of the rows in a DataFrame by calling .sort_values().
It's also useful to be able to sort by elements in the index. For this, you need to
use .sort_index().
pandas is loaded as pd. temperatures_ind has a multi-level index of country and city,
and is available.
Instructions:
- Sort temperatures_ind by the index values.
- Sort temperatures_ind by the index values at the "city" level.
- Sort temperatures_ind by ascending country then descending city.
-------------------------------------------
temperatures_ind
date avg_temp_c
country city
Côte D'Ivoire Abidjan 2000-01-01 27.293
Abidjan 2000-02-01 27.685
Abidjan 2000-03-01 29.061
Abidjan 2000-04-01 28.162
Abidjan 2000-05-01 27.547
---------------------------------------------
'''
# Sort temperatures_ind by index values
print(temperatures_ind.sort_index())
# Sort temperatures_ind by index values at the city level
print(temperatures_ind.sort_index(level="city"))
# Sort temperatures_ind by country then descending city
print(temperatures_ind.sort_index(
level=["country", "city"], ascending=[True, False]))
|
import numpy as np
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision import transforms
import matplotlib.pyplot as plt
CPKT = 'checkpoints/'
PLOTS = 'plots/'
ROOT = './data'
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
D_STEPS_PER_ITER = 5 # Number of discriminator updates per iteration
G_STEPS_PER_ITER = 5 # Number of Generator updates per iteration
PLOT_NUM = 9 # Number of images to plot
LR_G = 1e-4 # Learning rate for generator
LR_D = 4e-4 # Learning rate for generator
LR_C = 3e-4 # Learning rate for classifier
BATCH_SIZE = 256 # Batch size
LATENT_DIM = 100 # Latent dimension of noise vector
EPOCHS = 100 # Epochs for GANs
C_EPOCHS = 10 # Epochs for classifier
G_CHANNELS = 256 # Generator input channels for conv layer after linear layer
DROPOUT = 0.3 # Dropout rate for discriminator
PRINT_FREQ = 100 # Print after this number of iterations
INTERPOLATE_STEPS = 10 # Number of steps to interpolate between 2 points while traversing in latent dimension
NROW = 10 # Number of images to print in row
NUM_SAMPLES = 20 # Number of images to generate for classifier
######### FLAGS ##########
G_SPECTRAL_NORM = True # Spectral norm in generator
D_SPECTRAL_NORM = True # Spectral norm in discriminator
BATCH_NORM = True # Batch Norm flag
BOOL_PLOT = True # Plot images while training
TRAIN_CLASSIFIER = True # Trains the classifier model [Better to train GANs first then train classifier]
PRED_ON_GAN_OUTPUT = True # Making prediction on images generated from GANs using classifier model
PLOT_DISTRIBUTION = False # Plot distribution of generated images over classes
PLOT_20 = True # Plot 20 random images from generator with predictions
SELF_ATTENTION = True # Apply Self Attention in Generator and Discriminator before Last conv. layer
GET_NOISE = lambda x : torch.rand(size = (x, LATENT_DIM), device = DEVICE)
TRANSFORM = transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
|
from flask_login.utils import login_required
from my_project import app
from flask import render_template, redirect, url_for, flash, request
from my_project.forms import RegisterForm, LoginForm
from my_project.models import User
from my_project import db
from flask_login import login_user, logout_user, current_user
# register page #TODO only acces when not logged in
@app.route("/register", methods=["GET", "POST"])
def register_page():
status_code = 200
if current_user.is_authenticated:
return redirect(url_for("landing_page"))
form = RegisterForm() # get the register form from import at top (Forms.py)
if request.method == "POST":
if form.validate_on_submit(): # validators
user_to_create = User( # set user_to_create to form data
username=form.username.data,
email_address=form.email_address.data,
password=form.password1.data,
)
db.session.add(
user_to_create
) # add and commit user_to_create to the Database, User
db.session.commit()
login_user(user_to_create) # log the user_to_create in
flash(
f"Account created succesfully and loged in as {user_to_create.username}",
category="success",
)
return redirect(url_for("landing_page"))
if (
form.errors != {}
): # checks for validations erros. if {} is empty the error is raised
for err_msg in form.errors.values():
flash(f"Error with creating your account: {err_msg[0]}", category="danger")
status_code = 400
return render_template("register.html", page_title="Register", form=form), status_code
# login page
@app.route("/login", methods=["GET", "POST"])
def login_page():
form = LoginForm() # get the LoginForm from import at top (Forms.py)
if form.validate_on_submit():
attempted_user = User.query.filter_by(
username=form.username.data
).first() # query the attempted login user by the username (the form input)
# check if the username and the password
if attempted_user and attempted_user.check_password_correction(
attempted_password=form.password.data
):
login_user(attempted_user) # login success
flash(
f"Succesfully logged in as: {attempted_user.username}",
category="success",
)
return redirect(url_for("landing_page"))
else: # login failed
flash(
"Username and password did not match! Please try again",
category="danger",
)
return render_template("login.html", form=form, page_title="Login")
# logout page
@app.route("/logout") # logout the current user and return the landing page #default method "GET"
@login_required
def logout_page():
logout_user()
flash("Succesfully logged out!", category="info")
return redirect(url_for("landing_page"))
|
# M6 #4
def add_html_tags(tag, words):
'''
Given a tag and words to enclose within the tag
return the result string
tag == the tag to enclose with
words == the words to enclose
'''
return '<' + tag + '>' + words + '</' + tag + '>'
print(add_html_tags('p', 'This is my first page.'))
print(add_html_tags('h2', 'A secondary header.'))
print(add_html_tags('p', 'Some more text.'))
|
# Copyright 2022 Maximilien Le Clei.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import torch
def compute_padding(d_input):
padding = ()
for d in d_input[-1:0:-1]:
if d == 1:
padding += (1,1)
elif d == 2:
padding += (0,1)
else:
padding += (0,0)
return padding
def neg(tup):
return tuple(-x for x in tup)
def avg_pool(x, d):
_, _, h, w = x.shape
x = x.numpy()
x = x[0]
x = np.transpose(x, (1, 2, 0))
x = cv2.resize(x, (h//d, w//d), interpolation=cv2.INTER_AREA)
if x.ndim == 2:
x = x[:, :, None]
x = np.transpose(x, (2, 0, 1))
x = x[None, :, :, :]
x = torch.Tensor(x)
return x
def torch_cat(x, i):
for x_i in x:
x_i = x_i.numpy()
return torch.Tensor(np.concatenate(x, i))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run time-lagged ensembles."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(*cubes: cli.inputcube):
"""Module to time-lag ensembles.
Combines the realization from different forecast cycles into one cube.
Takes an input CubeList containing forecasts from different cycles and
merges them into a single cube.
Args:
cubes (list of iris.cube.Cube):
List of individual ensemble cubes
Returns:
iris.cube.Cube:
Merged cube
Raises:
ValueError: If ensembles have mismatched validity times
"""
import warnings
from improver.utilities.time_lagging import GenerateTimeLaggedEnsemble
if len(cubes) == 1:
warnings.warn(
"Only a single cube input, so time lagging will have " "no effect."
)
return cubes[0]
# raise error if validity times are not all equal
time_coords = [cube.coord("time") for cube in cubes]
time_coords_match = [coord == time_coords[0] for coord in time_coords]
if not all(time_coords_match):
raise ValueError("Cubes with mismatched validity times are not compatible.")
return GenerateTimeLaggedEnsemble()(cubes)
|
from django.urls import path,include
from . import views
urlpatterns = (
# approval urls
path('approval/',views.ApprovalListView.as_view(),name = 'approval_approval_list'),
path('approval/detail/<int:pk>/',views.ApprovalDetailView.as_view(),name = 'approval_approval_detail'),
path('approval/create/',views.ApprovalCreateView.as_view(),name ='approval_approval_create'),
path('approval/update/<int:pk>/',views.ApprovalUpdateView.as_view(),name = 'approval_approval_update'),
path('approval/delete/<int:pk>/',views.ApprovalDeleteView.as_view(),name = 'approval_approval_delete'),
path('approval/post/<int:pk>/',views.post_approval,name = 'approval_post'),
path('approval/unpost/<int:pk>/',views.unpost_approval,name='approval_unpost'),
)
urlpatterns +=(
path('approvalline/create/',views.ApprovalLineCreateView.as_view(),
name ='approval_approvalline_create'),
path('approvallinereturn/',views.ApprovalLineReturnView,
name='approval_approvallinereturn_create'),
path('approvallinereturnlist/',views.ApprovalLineReturnListView.as_view(),
name ='approval_approvallinereturn_list'),
path('approvallinereturn/delete/<int:pk>/',views.ApprovalLineReturnDeleteView.as_view(),
name = 'approval_approvallinereturn_delete'),
path('approvallinereturn/post/<int:pk>/', views.post_approvallinereturn, name='approvallinereturn_post'),
path('approvallinereturn/unpost/<int:pk>/',
views.unpost_approvallinereturn, name='approvallinereturn_unpost'),
path('approval/convert_sale/<int:pk>/',views.convert_sales,name='approval_convert_sale'),
)
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, is_fully_defined, shape_array, strict_compare_tensors, dynamic_dimension_value
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class SparseReshape(Op):
"""
SparseReshape operation reshapes a sparse tensor in Coordinate list (COO) format
It recomputes indices for a new dense shape.
"""
op = 'SparseReshape'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'op': __class__.op,
'infer': self.infer,
'in_ports_count': 3,
'out_ports_count': 2,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return []
@staticmethod
def infer(node: Node):
name = node.soft_get('name', node.id)
input_indices_shape = node.in_port(0).data.get_shape()
input_indices_value = node.in_port(0).data.get_value()
input_shape = node.in_port(1).data.get_value()
new_shape = node.in_port(2).data.get_value()
new_shape_shape = node.in_port(2).data.get_shape()
assert input_shape is not None and new_shape is not None, \
"Values for input shape and new shape must be defined"
assert len(np.argwhere(new_shape == -1)) <= 1, \
"Value -1 occurs in new shape value more than once"
assert len(np.argwhere(new_shape < -1)) == 0, \
"Only non-negative or -1 values are allowed"
output_shape = np.ma.masked_array(new_shape, mask=new_shape == -1, fill_value=dynamic_dimension_value)
assert not is_fully_defined(input_shape) or not is_fully_defined(output_shape) or \
np.prod(input_shape) == np.prod(output_shape), \
"Number of elements in input {} and output {} of dynamic reshape node {} mismatch" \
"".format(input_shape, output_shape, name)
# we can deduce -1 only if input_shape is fully defined and
# there is one dynamic dimension in output_shape
if is_fully_defined(input_shape) and np.ma.count_masked(output_shape) == 1:
undefined_dim_size = np.prod(input_shape) // np.prod(output_shape)
undefined_idx = np.where(output_shape == dynamic_dimension)[0][0]
output_shape[undefined_idx] = undefined_dim_size
output_shape.mask[undefined_idx] = False
node.out_port(1).data.set_value(shape_array(output_shape))
output_indices_shape = np.concatenate((input_indices_shape[0:1], new_shape_shape))
node.out_port(0).data.set_shape(output_indices_shape)
# TODO: implement constant value propagation for common case with scipy.sparse.coo_matrix.reshape
# instead of compatible_shapes we intentionally use np.array_equal
if strict_compare_tensors(input_shape, output_shape) and input_indices_value is not None:
node.out_port(0).data.set_value(input_indices_value)
|
import unittest
from pprint import pprint
from random import randint
from pysyspol.util import random_alphanumeric_str
from pydsalg.hash_table import (HashTable0)
class TestHashTable0(unittest.TestCase):
@classmethod
def setUpClass(cls):
min_key_len = 8
max_key_len = 16
min_value_len = 1
max_value_len = 8
testdict0_len = 150
cls.testdict0 = {}
for i in range(testdict0_len):
key_len = randint(min_key_len, max_key_len)
value_len = randint(min_value_len, max_value_len)
key = random_alphanumeric_str(key_len)
value = random_alphanumeric_str(value_len)
cls.testdict0[key] = value
def test_misc_00(self):
ht0 = HashTable0()
self.assertRaises(KeyError, lambda: ht0['x'])
self.assertEqual(ht0.length, HashTable0._ARR_DEFAULT_LENGTH)
self.assertEqual(ht0.count, 0)
ht0["670ag;bn's"] = 'value00'
ht0["njdfg789d"] = 'value01'
ht0["z76g6234"] = 'value02'
self.assertEqual(ht0.count, 3)
def test_misc_01(self):
ht0 = HashTable0()
for k, v in self.testdict0.items():
ht0[k] = v
self.assertEqual(ht0.count, len(self.testdict0))
for k, v in self.testdict0.items():
ht0[k] = v
def test_membership(self):
ht0 = HashTable0()
for k in self.testdict0:
self.assertTrue(k not in ht0)
for k, v in self.testdict0.items():
ht0[k] = v
for k in self.testdict0:
self.assertTrue(k in ht0)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import pandas as pd
mensaje="there are three kinds of lies lies damn lies and statistics mark twain i could not claim that i was smarter than sixty five other guys but the average of sixty five other guys certainly feynman a single death is a tragedy a million deaths is a statistic joseph stalin coincidences in general are great stumbling blocks in the way of that class of thinkers who have been educated to know nothing of the theory of probabilities that theory to which the most glorious objects of human research are indebted for the most glorious of illustrations edgar allen poe morgue morpheus this is your last chance after this there is no turning back you take the blue pill the story ends you wake up in your bed and believe whatever you want to believe you take the red pill you stay in wonderland and i show you how deep the rabbit hole goes "
ocurrencias = pd.read_csv("AustenCount.txt",sep='\t',header=None).values
"Tomamos logaritmos para reducir el tamaño de los números (al calcular productorios serán demasiado grandes)"
logocurrencias=np.log(ocurrencias+1)
print(pd.DataFrame(ocurrencias))
abc="abcdefghijklmnopqrstuvwxyz"
#charIndex={"a":0,"b":1,"c":2,"d":3,"e":4,"f":5,"g":6,"h":7,"i":8,"j":9,"k":10,"l":11,"m":12,"n":13,"ñ":14,"o":15,"p":16,"q":17,"r":18,"s":19,"t":20,"u":21,"v":22,"w":23,"x":24,"y":25,"z":26," ":27}
charIndex={"a":0,"b":1,"c":2,"d":3,"e":4,"f":5,"g":6,"h":7,"i":8,"j":9,"k":10,"l":11,"m":12,"n":13,"o":14,"p":15,"q":16,"r":17,"s":18,"t":19,"u":20,"v":21,"w":22,"x":23,"y":24,"z":25," ":26}
to_Char=list(charIndex.keys())
def score(m):
p=0
'''Para cada par de letras en el mensaje m, ver qué score obtenemos según las ocurrencias
Sin embargo, como los números son muy grandes, vamos a tomar logaritmos (en vez de
productorio de ocurrencias, tomaremos la suma de los logaritmos)'''
for i in range(len(m)-1):
ic1=charIndex[m[i]]
ic2=charIndex[m[i+1]]
p+=logocurrencias[ic1,ic2]
return p
def desencriptar(mensaje,func):
desencriptared=list(mensaje)
func=list(func)
"Para cada caracter del mensaje, lo desencripta a tenor de la funcion que se le indique"
for i in range(len(desencriptared)):
value=charIndex[desencriptared[i]]
if value<26:
newchar=func[value]
desencriptared[i]=to_Char[newchar]
return "".join(desencriptared)
def permutar2(f):
fperm=[i for i in f]
swap=np.random.choice(26,size=2,replace=False)
fperm[swap[0]]=f[swap[1]]
fperm[swap[1]]=f[swap[0]]
return fperm
#####################################################
'''
BANCO DE PRUEBAS: los siguientes valores indican un ejemplo válido de encriptado y desencriptado correspondiente
encriptadora=[i-1 for i in [23,5,6,18,22,26,9,3,25,1,4,17,7,2,19,8,12,14,16,15,11,10,21,13,24,20]]
desencriptadora=[i-1 for i in [10,14,8,11,2,3,13,16,7,22,21,24,26,18,20,19,12,4,15,17,23,5,1,25,9,6,27]]
encriptado=desencriptar(mensaje,encriptadora)
print(encriptado)
print(desencriptar(encriptadito,desencriptadora))
exit()
'''
###################################################
funcAleatoria=list(np.random.choice(26,size=26,replace=False))
"Encripto el mensaje con una func Aleatoria"
encriptado=desencriptar(mensaje,funcAleatoria)
funcActual=list(range(27))
funcPropuesta=None
scoreActual = score(desencriptar(encriptado,funcActual))
iteraciones=300000
for i in range(iteraciones):
"Nueva funcion propuesta tras permutar"
funcPropuesta=permutar2(funcActual)
scorePropuesta=score(desencriptar(encriptado,funcPropuesta))
"Si Uniforme< a(i,j)=scoreProopuesta/scoreActual (recordemos que los scores están en logaritmos, por eso hacemos la exponencial) "
"aceptaremos la funcion (actualizando el scoreActual)"
if np.random.uniform() <= np.exp(scorePropuesta-scoreActual):
funcActual=funcPropuesta
scoreActual=scorePropuesta
if(i%100==0):
print(i,desencriptar(encriptado,funcActual))
print("FIN")
|
from __future__ import absolute_import
from pgoapi.exceptions import PleaseInstallProtobufVersion3
import pkg_resources
protobuf_exist = False
protobuf_version = 0
try:
protobuf_version = pkg_resources.get_distribution("protobuf").version
protobuf_exist = True
except:
pass
if (not protobuf_exist) or (int(protobuf_version[:1]) < 3):
raise PleaseInstallProtobufVersion3()
from pgoapi.pgoapi import PGoApi
from pgoapi.rpc_api import RpcApi
from pgoapi.auth import Auth
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
|
import h5py
import os
from .imports import code_dir, SensorInfo
def load_ordnance_dict(
directory=code_dir,
filenames=[
"ordnance_DoD_UltraTEM_5F_APG.h5",
"ordnance_DoD_UltraTEM_5F_ISOsmall.h5",
"ordnance_DoD_UltraTEM_NATA_dyn_F_scale0o86.h5"
]
):
"""
create a dictionary of ordnance object from h5 files
"""
ord_dict = {}
for file in filenames:
ord_file = os.path.join(code_dir, file)
f = h5py.File(ord_file, 'r')
for i in f['ordnance']:
ord_name = str(f[f'ordnance/{i}/Name'][()][0]).split("'")[1]
L3 = f[f'ordnance/{i}/L1ref'][()].flatten()
L2 = f[f'ordnance/{i}/L2ref'][()].flatten()
L1 = f[f'ordnance/{i}/L3ref'][()].flatten()
size_mm = int(f[f'ordnance/{i}/size_mm'][()].flatten())
common_name = f[f'ordnance/{i}/h5_Common_Name'][()].flatten()[0]
if isinstance(common_name, list):
common_name = common_name[0]
if ord_name not in ord_dict.keys():
times = f[f'ordnance/{i}/time'][()].flatten()
ord_dict[ord_name] = {
"L3": [L3],
"L2": [L2],
"L1": [L2],
"size mm": [size_mm],
"common name": [common_name],
"times": times,
}
else:
for key, val in zip(
["L3", "L2", "L1", "size mm", "common name"],
[L3, L2, L1, size_mm, common_name]
):
ord_dict[ord_name][key].append(val)
return ord_dict
def load_sensor_info(
filename = os.path.join(
code_dir, 'config','sensor_definitions','UltraTEMArrayNA___Default.yaml'
)
):
return SensorInfo.fromYAML(filename)[0]
|
"""GTLVQ example using the MNIST dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
from torchvision import transforms
from torchvision.datasets import MNIST
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = MNIST(
"~/datasets",
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
)
test_ds = MNIST(
"~/datasets",
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds,
num_workers=0,
batch_size=256)
test_loader = torch.utils.data.DataLoader(test_ds,
num_workers=0,
batch_size=256)
# Hyperparameters
num_classes = 10
prototypes_per_class = 1
hparams = dict(
input_dim=28 * 28,
latent_dim=28,
distribution=(num_classes, prototypes_per_class),
proto_lr=0.01,
bb_lr=0.01,
)
# Initialize the model
model = pt.models.ImageGTLVQ(
hparams,
optimizer=torch.optim.Adam,
prototypes_initializer=pt.initializers.SMCI(train_ds),
#Use one batch of data for subspace initiator.
omega_initializer=pt.initializers.PCALinearTransformInitializer(
next(iter(train_loader))[0].reshape(256, 28 * 28)))
# Callbacks
vis = pt.models.VisImgComp(
data=train_ds,
num_columns=10,
show=False,
tensorboard=True,
random_data=100,
add_embedding=True,
embedding_data=200,
flatten_data=False,
)
pruning = pt.models.PruneLoserPrototypes(
threshold=0.01,
idle_epochs=1,
prune_quota_per_epoch=10,
frequency=1,
verbose=True,
)
es = pl.callbacks.EarlyStopping(
monitor="train_loss",
min_delta=0.001,
patience=15,
mode="min",
check_on_train_epoch_end=True,
)
# Setup trainer
# using GPUs here is strongly recommended!
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
vis,
pruning,
# es,
],
terminate_on_nan=True,
weights_summary=None,
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)
|
"""
Class for simulate queen piece
"""
class Queen:
"""Queen class"""
@staticmethod
def attack(x, y, available):
"""Simulate attack
* Validate if x and is safe
* Validate if diagonal is safe
* Delete posibilities in board
"""
if Queen.safe_xy(x, y, available):
if Queen.safe_diagonal(x, y, available):
return True, available
return False, available
@staticmethod
def safe_xy(x, y, available):
"""Validate x in y
Delete positions in x
Delete cols for y positions in n rows
if one col will be empty, the solution ends
"""
if x in available.keys():
available.pop(x)
for row, col in available.items():
if y in col:
col.pop(y)
if len(col) == 0:
return False
return True
@staticmethod
def safe_diagonal(x, y, available):
"""Validate diagonal
Delete all restant positions in board for x and y
"""
for j, k in available.items():
# Right up
if j < x and y + x - j in k:
k.pop(y + x - j)
# Left up
if j < x and y - x + j in k:
k.pop(y - x + j)
# Right down
if j >= x and y + j - x in k:
k.pop(y + j - x)
# Left down
if j > x and y - j + x in k:
k.pop(y - j + x)
if len(k) == 0:
return False
return True
|
from base64 import b64decode
from hashlib import md5
from uuid import uuid1, uuid4
from bottle import HTTPError
from .nodes.node import Node
from .session import Session
def authenticate(session: Session, node: Node, header: str, method: str = "GET"):
# from bottlepy
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, str) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return str(s or ("" if s is None else s))
raise_login = True
realm = "user@TheOnionBox"
auth_scheme = "TheOnionBox"
if header != '':
try:
scheme, data = header.split(None, 1)
if scheme == auth_scheme:
# Basic Authentication
if session['auth'] == 'basic':
user, pwd = touni(b64decode(tob(data))).split(':', 1)
if user == session.id:
node.controller.authenticate(password=pwd)
raise_login = False
# Digest Authentication
elif session['auth'] == 'digest':
# the data comes as in as 'key1="xxx...", key2="xxx...", ..., key_x="..."'
# therefore we split @ ', '
# then remove the final '"' & split @ '="'
# to create a nice dict.
request_data = dict(item[:-1].split('="') for item in data.split(", "))
ha1_prep = (session.id + ":" + realm + ":" + node.controller.password).encode('utf-8')
ha1 = md5(ha1_prep).hexdigest()
ha2_prep = (method + ":" + request_data['uri']).encode('utf-8')
ha2 = md5(ha2_prep).hexdigest()
resp_prep = (ha1 + ":{}:".format(session['nonce']) + ha2).encode('utf-8')
response = md5(resp_prep).hexdigest()
# print(response)
# print(request_data['response'])
if response == request_data['response']:
node.controller.authenticate(password=node.controller.password)
raise_login = False
except Exception as e:
print(e)
pass
if raise_login:
acc_denied = HTTPError(401, 'Access denied!')
# Request Basic Authentication
if session['auth'] == 'basic':
acc_denied.add_header('WWW-Authenticate', '{} realm={}'.format(auth_scheme, realm))
# Request Digest Authentication
else:
session['nonce'] = uuid1().hex
session['opaque'] = uuid4().hex
header = '{} realm={}, nonce={}, opaque={}'
header = header.format(auth_scheme, realm, session['nonce'], session['opaque'])
acc_denied.add_header('WWW-Authenticate', header)
raise acc_denied
# at this stage, authentication was passed!
return node.controller.password
|
version_info = (1, 11, 2)
__version__ = '.'.join(map(str, version_info))
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import errno
import os
import sys
import subprocess
from subprocess import CalledProcessError
REPOS_PATH = os.path.abspath('repos')
# http://stackoverflow.com/a/600612/545027
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e: # Python >2.5
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def run_cmd(args, bypass_exit_code=False, **kwargs):
try:
return subprocess.check_output(args, **kwargs)
except CalledProcessError:
if not bypass_exit_code:
raise
def git_cmd(repo, args, no_git_dir=False):
repo_path = os.path.join(REPOS_PATH, repo)
env = {}
if not no_git_dir:
env['GIT_DIR'] = repo_path
return run_cmd(['git'] + args, env=env, cwd=repo_path)
def git_revlist(repo, *commits):
try:
out = git_cmd(repo, ['rev-list'] + list(commits) + ['--'])
except CalledProcessError:
return []
else:
return out.splitlines()
def git_show(repo, commit, format='%B'):
format_arg = '--format={}'.format(format) # deal with it
out = git_cmd(repo, ['show', '--no-patch', format_arg, commit])
return out
def git_diff_check(repo, commit):
try:
git_cmd(repo, ['diff-tree', '--check', '--no-color', commit])
except CalledProcessError as e:
out = e.output
return sum(line[0] == '+' for line in out.splitlines() if line) or 1
else:
return 0
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from mezzanine.core.models import TimeStamped
from group.models import Group, GroupMember
# Create your models here.
class Bill(TimeStamped):
who_paid = models.ForeignKey(GroupMember, related_name='bill_paid')
description = models.TextField()
quantity_paid = models.FloatField()
group = models.ForeignKey(Group, related_name='bill')
who_owes = models.ManyToManyField(GroupMember)
# class Debt(TimeStamped):
# who_incurred_cost = models.ForeignKey(HouseholdMember, related_name='money_owed')
# bill = models.ForeignKey(Bill, related_name='who_owes')
|
import requests
import logging
import json
class MPulseAPIHandler:
def __init__(self, logger, simulate = False):
self.logger = logger
self.simulate = simulate
def getSecurityToken(self, apiToken, tenant):
"""curl -X PUT -H "Content-type: application/json" --data-binary '{"apiToken":"<token>", "tenant": "<tenant>"}' \
"https://mpulse.soasta.com/concerto/services/rest/RepositoryService/v1/Tokens"
:param apiToken: mPulse API token
:type apiToken: a String
:param tenant: mPulse tenant
:type tenant: a String
:returns: a String with the security token, None in case of error
"""
payload = "{\"apiToken\": \"" + apiToken + "\", \"tenant\": \"" + tenant + "\"}"
self.logger.info("requesting an mPulse security token with: " + payload)
url = 'https://mpulse.soasta.com/concerto/services/rest/RepositoryService/v1/Tokens'
result = requests.put(url, data = payload, headers={'Content-Type':'application/json'})
if (result.status_code == 200):
json_data = result.json()
self.logger.info('mPulse security token returned: ' + str(json_data['token']) )
return str(json_data['token'])
else:
self.logger.error('Error ' + str(result.status_code) + ': no security token returned')
return None
def addAnnotation(self, token, title, text, start, end = None):
"""Add a new Annotation to mPulse dashboard.
:param token: the security token as returned by getSecurityToken()
:type token: a String object
:param title: the annotation title
:type title: a String object
:param text: the annotation body text
:type text: a String object
:param start: start time of the annotation in epoch time format in milliseconds
:type start: an int
:param end: (optional) end time of the annotation in epoch time format in milliseconds
:type end: an int
"""
if end is None:
payload = "{\"title\":\"" + title + "\", \"start\": \"" + str(start) + "\", \"text\":\"" + text + "\"}"
else:
payload = "{\"title\":\"" + title + "\", \"start\": \"" + str(start) + "\", \"end\":\"" + str(end) + "\", \"text\":\"" + text + "\"}"
if self.simulate:
self.logger.info("[SIMULATE] adding new annotation: " + payload)
return
self.logger.info("adding new annotation: " + payload)
#self.logger.info("WARNING: mpulse API handler disabled!")
#return
url = "https://mpulse.soasta.com/concerto/mpulse/api/annotations/v1"
result = requests.post(url, data = payload, headers={'Content-Type':'application/json', 'X-Auth-Token': token })
if (result.status_code == 200):
json_data = result.json()
self.logger.info('annotation successfully added')
else:
self.logger.error('Error ' + str(result.status_code) + ': annotation not added!')
|
# -*-coding:Utf-8 -*
##############
# Les listes #
##############
ma_liste = list() # initialisation 1
ma_liste = [] # initialisation 2
ma_liste = ['c', 'f', 'm']
ma_liste[0] # On accède au premier élément de la liste (c)
ma_liste[2] # Troisième élément (m)
ma_liste[1] = 'Z' # On remplace 'f' par 'Z'
ma_liste # ['c', 'Z', 'm']
# Ajouter un élément à la fin de la liste
ma_liste.append("O") # ['c', 'Z', 'm', O]
# Insérer un élément dans la liste
ma_liste.insert(2, "a") # insérer "a" à la 2ème position : ['c', 'Z', 'a', 'm', 'O']
# Concatener 2 listes
ma_liste1 = [3, 4, 5]
ma_liste2 = [8, 9, 10]
ma_liste1.extend(ma_liste2) # 1ère méthode : on insère ma_liste2 à la fin de ma_liste1 : [3, 4, 5, 8, 9, 10]
ma_liste1 = [3, 4, 5]
ma_liste1 + ma_liste2 # 2ème méthode (juste pour l'affichage car ne modifie pas la liste, renvoie juste les 2 listes): [3, 4, 5, 8, 9, 10]
ma_liste1 += ma_liste2 # 3ème méthode : dentique à extend : [3, 4, 5, 8, 9, 10]
# Suppression d'éléments d'une liste
ma_liste = ['c', 'Z', 'a', 'm', 'O']
del ma_liste[0] # Avec del : l'indice de l'élément à supprimer ('c')
ma_liste.remove('m') # Avec remove : l'élément lui meme
##########################
# Le parcours des listes #
##########################
# 1ère méthode
ma_liste = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
i = 0 # Notre indice pour la boucle while
while i < len(ma_liste):
print(ma_liste[i])
i += 1 # On incrémente i, ne pas oublier !
# 2ème méthode : cette méthode est cependant préférable
for elt in ma_liste: # elt va prendre les valeurs successives des éléments de ma_liste
print(elt)
# La fonction enumerate
ma_liste = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
for i, elt in enumerate(ma_liste): # i l'indice, elt l'élément en cours
print("À l'indice {} se trouve {}.".format(i, elt))
##################################
# Un petit coup d'œil aux tuples #
##################################
tuple_vide = ()
tuple_non_vide = (1,) # est équivalent à ci dessous
tuple_non_vide = 1,
tuple_avec_plusieurs_valeurs = (1, 2, 5)
# Affectation multiple
a, b = 3, 4
# Une fonction renvoyant plusieurs valeurs
def decomposer(entier, divise_par):
"""Cette fonction retourne la partie entière et le reste de
entier / divise_par"""
p_e = entier // divise_par
reste = entier % divise_par
return p_e, reste
partie_entiere, reste = decomposer(20, 3) # partie_entiere = 6 et reste = 2
###########################
# Entre chaînes et listes #
###########################
# Des chaînes aux listes : split("séparateur")
ma_chaine = "Bonjour à tous"
ma_chaine.split(" ") # ['Bonjour', 'à', 'tous']
# Des listes aux chaînes : "séparateur".join(liste_à_fusionner)
ma_liste = ['Bonjour', 'à', 'tous']
" ".join(ma_liste) # 'Bonjour à tous'
# Exemple :
def afficher_flottant(flottant):
"""Fonction prenant en paramètre un flottant et renvoyant une chaîne de caractères représentant la troncature de ce nombre. La partie flottante doit avoir une longueur maximum de 3 caractères.
De plus, on va remplacer le point décimal par la virgule"""
if type(flottant) is not float:
raise TypeError("Le paramètre attendu doit être un flottant")
flottant = str(flottant)
partie_entiere, partie_flottante = flottant.split(".")
# La partie entière n'est pas à modifier
# Seule la partie flottante doit être tronquée
return ",".join([partie_entiere, partie_flottante[:3]])
###########################################################################
# Les fonctions dont on ne connaît pas à l'avance le nombre de paramètres #
###########################################################################
def fonction_inconnue(*parametres):
"""Test d'une fonction pouvant être appelée avec un nombre variable de paramètres"""
print("J'ai reçu : {}.".format(parametres))
fonction_inconnue() # J'ai reçu : ().
fonction_inconnue(33) # J'ai reçu : (33,).
fonction_inconnue('a', 'e', 'f') # J'ai reçu : ('a', 'e', 'f').
var = 3.5
fonction_inconnue(var, [4], "...") # J'ai reçu : (3.5, [4], '...').
########################################
# Exemple : fonction identique à print #
########################################
def afficher(*parametres, sep=' ', fin='\n'):
"""Fonction chargée de reproduire le comportement de print.
Elle doit finir par faire appel à print pour afficher le résultat.
Mais les paramètres devront déjà avoir été formatés.
On doit passer à print une unique chaîne, en lui spécifiant de ne rien mettre à la fin :
print(chaine, end='')"""
# Les paramètres sont sous la forme d'un tuple
# Or on a besoin de les convertir
# Mais on ne peut pas modifier un tuple
# On a plusieurs possibilités, ici je choisis de convertir le tuple en liste
parametres = list(parametres)
# On va commencer par convertir toutes les valeurs en chaîne
# Sinon on va avoir quelques problèmes lors du join
for i, parametre in enumerate(parametres):
parametres[i] = str(parametre)
# La liste des paramètres ne contient plus que des chaînes de caractères
# À présent on va constituer la chaîne finale
chaine = sep.join(parametres)
# On ajoute le paramètre fin à la fin de la chaîne
chaine += fin
# On affiche l'ensemble
print(chaine, end='')
###################################################
# Transformer une liste en paramètres de fonction #
###################################################
liste_des_parametres = [1, 4, 9, 16, 25, 36]
print(*liste_des_parametres) # 1 4 9 16 25 36
###############################
# Les compréhensions de liste #
###############################
# Parcours simple
liste_origine = [0, 1, 2, 3, 4, 5]
[nb * nb for nb in liste_origine] # [0, 1, 4, 9, 16, 25], nb * nb revient à écrire nb ** 2
# Filtrage avec un branchement conditionnel
liste_origine = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
[nb for nb in liste_origine if nb % 2 == 0] # [2, 4, 6, 8, 10]
# Filtrer et modifier une liste (partie 1)
qtt_a_retirer = 7 # On retire chaque semaine 7 fruits de chaque sorte
fruits_stockes = [15, 3, 18, 21] # Par exemple 15 pommes, 3 melons...
[nb_fruits - qtt_a_retirer for nb_fruits in fruits_stockes if nb_fruits > qtt_a_retirer] # [8, 11, 14]
# Filtrer et modifier une liste (partie 2)
inventaire = [
("pommes", 22),
("melons", 4),
("poires", 18),
("fraises", 76),
("prunes", 51),
]
# On change le sens de l'inventaire, la quantité avant le nom
inventaire_inverse = [(qtt, nom_fruit) for nom_fruit,qtt in inventaire]
# On trie l'inventaire inversé dans l'ordre décroissant
inventaire_inverse.sort(reverse=True)
# Et on reconstitue l'inventaire
inventaire = [(nom_fruit, qtt) for qtt,nom_fruit in inventaire_inverse]
|
#!/usr/bin/env python
# coding: utf-8
import networkx as nx
import matplotlib.pyplot as plt
with open('nodes.txt') as nodesfile:
nodes = [(map(int, l.split(';'))) for l in nodesfile]
nodesdict = {node[0] : (node[1], node[2]) for node in nodes}
with open('edges.txt') as edgesfile:
edges = [map(lambda x: nodesdict[int(x)], line.split(';')) for line in edgesfile]
graph = nx.Graph()
graph.add_edges_from(edges)
for node in graph.nodes():
if nx.degree(graph, node) > 2:
graph.remove_node(node)
for road in nx.connected_component_subgraphs(graph):
endpoints = [n for n in road if nx.degree(graph, n) == 1]
if not endpoints:
continue
path = nx.shortest_path(road, endpoints[0], endpoints[1])
lons, lats = zip(*path)
plt.plot(lons, lats)
plt.axis('equal')
plt.grid()
plt.show()
# plt.figure()
# for edge in edges:
# lons, lats = zip(*edge)
# plt.plot(lons, lats)
# plt.axis('equal')
# plt.grid()
# plt.figure()
# indices, lons, lats = zip(*nodes)
# plt.scatter(lons, lats, color='b', s=5, linewidths=0, alpha=0.5)
# plt.axis('equal')
# plt.grid()
# plt.show()
|
# coding: utf-8
import xlsxwriter
import models
def formatindicator(indicator):
data = indicator
if indicator == 0:
data = '0'
if type(indicator) == str:
if '.' in indicator and '>' not in indicator:
data = float(indicator)
if type(indicator) == float:
data = indicator
return data
# Creates the Excel folder and add a worksheet
workbook = xlsxwriter.Workbook('output/scopus_list_20180406.xlsx')
worksheet = workbook.add_worksheet('Scopus_list')
# HEADER
col = 0
wrap = workbook.add_format({'text_wrap': True})
wrap_blue = workbook.add_format({'text_wrap': True, 'bg_color': '#6495ED'})
wrap_red = workbook.add_format({'text_wrap': True, 'bg_color': '#DC143C'})
wrap_orange = workbook.add_format({'text_wrap': True, 'bg_color': '#FFA500'})
wrap_green = workbook.add_format({'text_wrap': True, 'bg_color': '#99FF99'})
for h in [
'ISSNs',
'Main Title (SciELO, Scopus, JCR or WOS)',
'OECD major categories',
'OECD minor categories',
'Scimago Region',
'Scopus Country',
'SciELO Country',
'JCR country',
'is Scopus',
'is SciELO',
'is JCR',
'is WOS',
'Open Access(Scopus or SciELO)'
]:
worksheet.write(0, col, h, wrap)
col += 1
# Scopus fields
for h in [
'Scopus Title',
'Scopus Publisher',
'Scopus source type',
'Scopus Codes ASJC'
]:
worksheet.write(0, col, h, wrap_blue)
col += 1
# Scopus SciteScore 2016-2011
for y in range(2016, 2010, -1):
worksheet.write(0, col, 'Scopus CiteScore ' + str(y), wrap_blue)
col += 1
# CWTS SNIP 2016-1999
for y in range(2016, 1998, -1):
worksheet.write(0, col, 'CWTS SNIP ' + str(y), wrap)
col += 1
# Scimago fields
worksheet.write(0, col, 'Scimago Title', wrap_orange)
col += 1
# Scimago Indicators 2016-1999
for y in range(2016, 1998, -1):
for h in [
'Scimago SJR',
'Scimago SJR Best Quartile',
'Scimago H Index',
'Scimago Total Docs',
'Scimago Total Docs 3years',
'Scimago Total Refs',
'Scimago Total Cites 3years',
'Scimago Citable Docs 3years',
'Scimago Cites by Doc(2 years)',
'Scimago Ref by Doc'
]:
worksheet.write(0, col, h + ' ' + str(y), wrap_orange)
col += 1
# SciELO fields and Subjects
for h in [
'SciELO Title',
'SciELO Publisher',
'SciELO thematic areas',
'SciELO agricultural sciences',
'SciELO applied social sciences',
'SciELO biological sciences',
'SciELO engineering',
'SciELO exact and earth sciences',
'SciELO health sciences',
'SciELO human sciences',
'SciELO linguistics letters and arts',
'SciELO multidisciplinary'
]:
worksheet.write(0, col, h, wrap_red)
col += 1
# SciELO metrics - 2016-2012
for y in range(2016, 2011, -1):
for h in [
'SciELO Total Docs',
'SciELO Citable Docs'
]:
worksheet.write(0, col, h + ' ' + str(y), wrap_red)
col += 1
# JCR CIs and Thematic Areas
for h in [
'JCR SCIE',
'JCR SSCI',
'JCR Title',
'JCR Publisher',
'JCR Thematic Areas'
]:
worksheet.write(0, col, h, wrap_green)
col += 1
# JCR Indicators 2016-1997
for y in range(2016, 1996, -1):
for h in [
'JCR Total cites',
'JCR Journal Impact Factor',
'JCR Impact Factor without Journal Self Cites',
'JCR 5 years Impact Factor',
'JCR Immediacy Index',
'JCR Citable Items',
'JCR Cited half life',
'JCR Citing half life',
'JCR Eigenfactor Score',
'JCR Article Influence Score',
'JCR % Articles in Citable Items',
'JCR Average Journal Impact Factor Percentile',
'JCR Normalized Eigenfactor'
]:
worksheet.write(0, col, h + ' ' + str(y), wrap_green)
col += 1
row = 1
# SCOPUS
scopus = models.Scopus.objects()
for docscopus in scopus:
print('Scopus : ' + docscopus.title)
col = 0
if hasattr(docscopus, 'issn_list'):
worksheet.write(row, col, '; '.join(docscopus.issn_list))
col += 1
if docscopus.is_scielo == 1:
query = models.Scielo.objects.filter(id=str(docscopus.scielo_id))
worksheet.write(row, col, query[0]['title'])
else:
if hasattr(docscopus, 'title'):
worksheet.write(row, col, docscopus.title)
col += 1
# OECD categories
col = 2
if hasattr(docscopus, 'oecd'):
loecd = sorted(docscopus.oecd, key=lambda k: k['code'])
major = []
minor = []
for d in loecd:
if '.' not in d['code']:
major.append(d['code'] + ' ' + d['description'])
if '.' in d['code']:
minor.append(d['code'] + ' ' + d['description'])
worksheet.write(row, col, '; '.join(major))
col += 1
worksheet.write(row, col, '; '.join(minor))
col += 1
col = 4
if docscopus.is_scielo == 1:
query = models.Scielo.objects.filter(id=str(docscopus.scielo_id))
worksheet.write(row, col, query[0]['region'])
else:
if docscopus.is_scimago == 1:
query = models.Scimago.objects.filter(id=str(docscopus.scimago_id))
worksheet.write(row, col, query[0]['region'])
col += 1
if hasattr(docscopus, 'publishers_country'):
worksheet.write(row, col, docscopus.publishers_country)
col += 1
if hasattr(docscopus, 'country_scielo'):
worksheet.write(row, col, docscopus.country_scielo)
col += 1
if hasattr(docscopus, 'country_jcr'):
worksheet.write(row, col, docscopus.country_jcr)
col += 1
if hasattr(docscopus, 'is_scopus'):
worksheet.write(row, col, docscopus.is_scopus)
col += 1
if hasattr(docscopus, 'is_scielo'):
worksheet.write(row, col, docscopus.is_scielo)
col += 1
if hasattr(docscopus, 'is_jcr'):
worksheet.write(row, col, docscopus.is_jcr)
col += 1
if hasattr(docscopus, 'is_wos'):
worksheet.write(row, col, docscopus.is_wos)
col += 1
# Open Access
if hasattr(docscopus, 'open_access_status'):
worksheet.write(row, col, 1)
elif docscopus.is_scielo == 1:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
# Scopus fields
if hasattr(docscopus, 'title'):
worksheet.write(row, col, docscopus.title)
col += 1
if hasattr(docscopus, 'publishers_name'):
worksheet.write(row, col, docscopus.publishers_name)
col += 1
if hasattr(docscopus, 'source_type'):
worksheet.write(row, col, docscopus.source_type)
col += 1
if hasattr(docscopus, 'asjc_code_list'):
worksheet.write(row, col, '; '.join(docscopus['asjc_code_list']))
col += 1
col = 17
for year in range(2016, 2010, -1):
if hasattr(docscopus, str(year)):
# print(docscopus[str(year)])
if 'citescore' in docscopus[str(year)]:
worksheet.write(row, col, docscopus[str(year)]['citescore'])
col += 1
# CWTS SNIP
col = 23
if docscopus.is_cwts == 1:
cwts = models.Cwts.objects(id=str(docscopus.cwts_id))[0]
for year in range(2016, 1998, -1):
if hasattr(cwts, str(year)):
if 'snip' in cwts[str(year)]:
worksheet.write(row, col, round(cwts[str(year)]['snip'], 3))
col += 1
# SCIMAGO indicators
col = 41
if docscopus.is_scimago == 1:
scimago = models.Scimago.objects(id=str(docscopus.scimago_id))[0]
if hasattr(scimago, 'title'):
worksheet.write(row, col, scimago['title'])
col += 1
for year in range(2016, 1998, -1):
if hasattr(scimago, str(year)):
if 'sjr' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr'])
col += 1
if 'sjr_best_quartile' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr_best_quartile'])
col += 1
if 'h_index' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['h_index'])
col += 1
if 'total_docs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs'])
col += 1
if 'total_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs_3years'])
col += 1
if 'total_refs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_refs'])
col += 1
if 'total_cites_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_cites_3years'])
col += 1
if 'citable_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['citable_docs_3years'])
col += 1
if 'cites_by_doc_2years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['cites_by_doc_2years'])
col += 1
if 'ref_by_doc' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['ref_by_doc'])
col += 1
else:
col += 10
# SciELO - subjects
col = 222
if docscopus.is_scielo == 1:
scielo = models.Scielo.objects(id=str(docscopus.scielo_id))[0]
worksheet.write(row, col, scielo['title'])
col += 1
worksheet.write(row, col, scielo['publisher_name'])
col += 1
for k in [
'title_thematic_areas',
'title_is_agricultural_sciences',
'title_is_applied_social_sciences',
'title_is_biological_sciences',
'title_is_engineering',
'title_is_exact_and_earth_sciences',
'title_is_health_sciences',
'title_is_human_sciences',
'title_is_linguistics_letters_and_arts',
'title_is_multidisciplinary'
]:
if scielo[k]:
worksheet.write(row, col, scielo[k])
else:
worksheet.write(row, col, 0)
col += 1
for year in range(2016, 2011, -1):
k = 'documents_at_' + str(year)
if hasattr(scielo, k):
worksheet.write(row, col, scielo[k])
col += 1
k = 'citable_documents_at_' + str(year)
if hasattr(scielo, k):
worksheet.write(row, col, scielo[k])
col += 1
else:
col += 2
# JCR Indicators 2016-1997
col = 244
if docscopus.is_jcr == 1:
jcr = models.Jcr.objects(id=str(docscopus.jcr_id))[0]
if hasattr(jcr, 'citation_database'):
if 'SCIE' in jcr['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
if 'SSCI' in jcr['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 2
col = 246
worksheet.write(row, col, jcr['title'])
col += 1
if hasattr(jcr, 'publisher'):
worksheet.write(row, col, jcr['publisher'])
col += 1
col = 248
if docscopus.is_scielo == 1:
scielo = models.Scielo.objects(id=str(docscopus.scielo_id))[0]
if hasattr(scielo, 'thematic_areas'):
worksheet.write(row, col, '; '.join(scielo['thematic_areas']))
else:
if hasattr(jcr, 'thematic_areas'):
worksheet.write(row, col, '; '.join(jcr['thematic_areas']))
col = 249
for year in range(2016, 1996, -1):
if hasattr(jcr, str(year)):
for k in [
'total_cites',
'journal_impact_factor',
'impact_factor_without_journal_self_cites',
'five_year_impact_factor',
'immediacy_index',
'citable_items',
'cited_half_life',
'citing_half_life',
'eigenfactor_score',
'article_influence_score',
'percentage_articles_in_citable_items',
'average_journal_impact_factor_percentile',
'normalized_eigenfactor'
]:
if k in jcr[str(year)]:
worksheet.write(
row,
col,
formatindicator(jcr[str(year)][k])
)
col += 1
else:
col += 13
# Avançar linha - prox. documento
row += 1
print('last line of Scopus: %s' % row)
# -----------------------
# SciELO - is_scopus = 0
scielo = models.Scielo.objects.filter(is_scopus=0)
for doc in scielo:
print('SciELO : ' + doc.title)
col = 0
if hasattr(doc, 'issn_list'):
worksheet.write(row, col, '; '.join(doc.issn_list))
col += 1
if hasattr(doc, 'title'):
worksheet.write(row, col, doc.title)
col += 1
# OECD categories
col = 2
if hasattr(doc, 'oecd'):
loecd = sorted(doc.oecd, key=lambda k: k['code'])
major = []
minor = []
for d in loecd:
if '.' not in d['code']:
major.append(d['code'] + ' ' + d['description'])
if '.' in d['code']:
minor.append(d['code'] + ' ' + d['description'])
worksheet.write(row, col, '; '.join(major))
col += 1
worksheet.write(row, col, '; '.join(minor))
col += 1
col = 4
if hasattr(doc, 'region'):
worksheet.write(row, col, doc.region)
col += 1
# Scopus country
col += 1
if hasattr(doc, 'country'):
worksheet.write(row, col, doc.country)
col += 1
if hasattr(doc, 'country_jcr'):
worksheet.write(row, col, doc.country_jcr)
col += 1
if hasattr(doc, 'is_scopus'):
worksheet.write(row, col, doc.is_scopus)
col += 1
if hasattr(doc, 'is_scielo'):
worksheet.write(row, col, doc.is_scielo)
col += 1
if hasattr(doc, 'is_jcr'):
worksheet.write(row, col, doc.is_jcr)
col += 1
if hasattr(doc, 'is_wos'):
worksheet.write(row, col, doc.is_wos)
col += 1
# Open Access
worksheet.write(row, col, 1)
# CWTS SNIP
col = 23
if doc.is_cwts == 1:
cwts = models.Cwts.objects(id=str(doc.cwts_id))[0]
for year in range(2016, 1998, -1):
if hasattr(cwts, str(year)):
if 'snip' in cwts[str(year)]:
worksheet.write(row, col, round(cwts[str(year)]['snip'], 3))
col += 1
# SCIMAGO indicators
col = 41
if doc.is_scimago == 1:
scimago = models.Scimago.objects(id=str(doc.scimago_id))[0]
worksheet.write(row, col, scimago['title'])
col += 1
for year in range(2016, 1998, -1):
if hasattr(scimago, str(year)):
if 'sjr' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr'])
col += 1
if 'sjr_best_quartile' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr_best_quartile'])
col += 1
if 'h_index' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['h_index'])
col += 1
if 'total_docs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs'])
col += 1
if 'total_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs_3years'])
col += 1
if 'total_refs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_refs'])
col += 1
if 'total_cites_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_cites_3years'])
col += 1
if 'citable_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['citable_docs_3years'])
col += 1
if 'cites_by_doc_2years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['cites_by_doc_2years'])
col += 1
if 'ref_by_doc' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['ref_by_doc'])
col += 1
else:
col += 10
# SciELO - subjects
col = 222
worksheet.write(row, col, doc['title'])
col += 1
if hasattr(doc, 'publisher_name'):
worksheet.write(row, col, doc.publisher_name)
col += 1
for k in [
'title_thematic_areas',
'title_is_agricultural_sciences',
'title_is_applied_social_sciences',
'title_is_biological_sciences',
'title_is_engineering',
'title_is_exact_and_earth_sciences',
'title_is_health_sciences',
'title_is_human_sciences',
'title_is_linguistics_letters_and_arts',
'title_is_multidisciplinary'
]:
if doc[k]:
worksheet.write(row, col, doc[k])
else:
worksheet.write(row, col, 0)
col += 1
for year in range(2016, 2011, -1):
k = 'documents_at_' + str(year)
if hasattr(doc, k):
worksheet.write(row, col, doc[k])
col += 1
k = 'citable_documents_at_' + str(year)
if hasattr(doc, k):
worksheet.write(row, col, doc[k])
col += 1
else:
col += 2
# JCR Indicators 2016-1997
col = 244
if doc.is_jcr == 1:
jcr = models.Jcr.objects(id=str(doc.jcr_id))[0]
if hasattr(jcr, 'citation_database'):
if 'SCIE' in jcr['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
if 'SSCI' in jcr['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 2
col = 246
worksheet.write(row, col, jcr['title'])
col += 1
if hasattr(jcr, 'publisher'):
worksheet.write(row, col, jcr['publisher'])
col += 1
col = 248
if hasattr(jcr, 'thematic_areas'):
worksheet.write(row, col, '; '.join(jcr['thematic_areas']))
col = 249
for year in range(2016, 1996, -1):
if hasattr(jcr, str(year)):
for k in [
'total_cites',
'journal_impact_factor',
'impact_factor_without_journal_self_cites',
'five_year_impact_factor',
'immediacy_index',
'citable_items',
'cited_half_life',
'citing_half_life',
'eigenfactor_score',
'article_influence_score',
'percentage_articles_in_citable_items',
'average_journal_impact_factor_percentile',
'normalized_eigenfactor'
]:
if k in jcr[str(year)]:
worksheet.write(
row,
col,
formatindicator(jcr[str(year)][k])
)
col += 1
else:
col += 13
# Avançar linha - prox. documento
row += 1
print('last line of SciELO: %s' % row)
# --------------------------------
# JCR - is_scopus=0, is_scielo = 0
jcr = models.Jcr.objects.filter(is_scopus=0, is_scielo=0)
for doc in jcr:
print('JCR: ' + doc.title)
col = 0
if hasattr(doc, 'issn_list'):
worksheet.write(row, col, '; '.join(doc.issn_list))
col += 1
if hasattr(doc, 'title'):
worksheet.write(row, col, doc.title)
col += 1
# OECD categories
col = 2
if hasattr(doc, 'oecd'):
loecd = sorted(doc.oecd, key=lambda k: k['code'])
major = []
minor = []
for d in loecd:
if '.' not in d['code']:
major.append(d['code'] + ' ' + d['description'])
if '.' in d['code']:
minor.append(d['code'] + ' ' + d['description'])
worksheet.write(row, col, '; '.join(major))
col += 1
worksheet.write(row, col, '; '.join(minor))
col += 1
col = 4
if doc.is_scimago == 1:
query = models.Scimago.objects.filter(id=str(doc.scimago_id))
worksheet.write(row, col, query[0]['region'])
col += 1
# Scopus country
col += 1
# SciELO country
col += 1
if hasattr(doc, 'country'):
worksheet.write(row, col, doc.country)
col += 1
if hasattr(doc, 'is_scopus'):
worksheet.write(row, col, doc.is_scopus)
col += 1
if hasattr(doc, 'is_scielo'):
worksheet.write(row, col, doc.is_scielo)
col += 1
if hasattr(doc, 'is_jcr'):
worksheet.write(row, col, doc.is_jcr)
col += 1
# CWTS SNIP
col = 22
if doc.is_cwts == 1:
cwts = models.Cwts.objects(id=str(doc.cwts_id))[0]
for year in range(2016, 1998, -1):
if hasattr(cwts, str(year)):
if 'snip' in cwts[str(year)]:
worksheet.write(row, col, round(cwts[str(year)]['snip'], 3))
col += 1
# SCIMAGO indicators
col = 41
if doc.is_scimago == 1:
scimago = models.Scimago.objects(id=str(doc.scimago_id))[0]
worksheet.write(row, col, scimago['title'])
col += 1
for year in range(2016, 1998, -1):
if hasattr(scimago, str(year)):
if 'sjr' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr'])
col += 1
if 'sjr_best_quartile' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['sjr_best_quartile'])
col += 1
if 'h_index' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['h_index'])
col += 1
if 'total_docs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs'])
col += 1
if 'total_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_docs_3years'])
col += 1
if 'total_refs' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_refs'])
col += 1
if 'total_cites_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['total_cites_3years'])
col += 1
if 'citable_docs_3years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['citable_docs_3years'])
col += 1
if 'cites_by_doc_2years' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['cites_by_doc_2years'])
col += 1
if 'ref_by_doc' in scimago[str(year)]:
worksheet.write(row, col, scimago[str(year)]['ref_by_doc'])
col += 1
else:
col += 10
# JCR Indicators 2016-1997
col = 244
if hasattr(doc, 'citation_database'):
if 'SCIE' in doc['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
if 'SSCI' in doc['citation_database']:
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 2
col = 246
if hasattr(doc, 'title'):
worksheet.write(row, col, doc.title)
col += 1
if hasattr(doc, 'publisher'):
worksheet.write(row, col, doc.publisher)
col += 1
col = 248
if hasattr(doc, 'thematic_areas'):
worksheet.write(row, col, '; '.join(doc['thematic_areas']))
col = 249
for year in range(2016, 1996, -1):
if hasattr(doc, str(year)):
for k in [
'total_cites',
'journal_impact_factor',
'impact_factor_without_journal_self_cites',
'five_year_impact_factor',
'immediacy_index',
'citable_items',
'cited_half_life',
'citing_half_life',
'eigenfactor_score',
'article_influence_score',
'percentage_articles_in_citable_items',
'average_journal_impact_factor_percentile',
'normalized_eigenfactor'
]:
if k in doc[str(year)]:
worksheet.write(
row,
col,
formatindicator(doc[str(year)][k])
)
col += 1
else:
col += 13
# Avançar linha - prox. documento
row += 1
print('last line of JCR: %s' % row)
# Grava planilha Excel
workbook.close()
|
import re
from struct import unpack, pack
def parse(buf):
# Compile a regex that can parse a buffer with an arbitrary number of
# records, each consisting of a short, a null-terminated string,
# and two more shorts. Incomplete records at the end of the file
# will be ignored. re.DOTALL ensures we treat newlines as data.
r = re.compile("(..)(.*?)\0(..)(..)", re.DOTALL)
# packed will be a list of tuples: (packed short, string, short, short).
# You can use finditer instead to save memory on a large file, but
# it will return MatchObjects rather than tuples.
packed = r.findall(buf)
# Create an unpacked list of tuples, mirroring the packed list.
# Perl equivalent: @objlist = unpack("(S Z* S S)*", $buf);
# Note that we do not need to unpack the string, because its
# packed and unpacked representations are identical.
objlist = map(lambda x: (short(x[0]), x[1], short(x[2]), short(x[3])), packed)
# Alternatively, unpack using a list comprehension:
# objlist = [ ( short(x[0]), x[1], short(x[2]), short(x[3]) ) for x in packed ]
# Create a dictionary from the packed list. The records hold object id,
# description, and x and y coordinates, and we want to index by id.
# We could also create it from the unpacked list, of course.
objdict = {}
for x in packed:
id = short(x[0])
objdict[id] = {}
objdict[id]["desc"] = x[1]
objdict[id]["x"] = short(x[2])
objdict[id]["y"] = short(x[3])
return objlist, objdict
# Converts 2-byte string to little-endian short value.
# unpack returns a tuple, so we grab the first (and only) element.
def short(x):
return unpack("<H", x)[0]
# Packs the arguments into a string that parse() can read,
# for testing.
def packobj(id, desc, x, y):
return pack("<H", id) + desc + "\0" + pack("<HH", x, y)
if __name__ == '__main__':
# Pack test objects into string buffer. Normally, you'd load buf
# with file data, perhaps with buf = file(filename, "rb").read()
buf = ""
buf += packobj(768, "golden helmet", 3, 4)
buf += packobj(234, "windmill", 20, 30)
# Test inclusion of newline in string
buf += packobj( 35, "pitcher\nand stone", 1, 2)
# Also add a bit of garbage at the end,
# which the parser should ignore.
buf += "garbage";
# Parse buffer into list and dictionary of objects
olist, odict = parse(buf)
print olist
print odict
print odict[35]["desc"] # should retain the newline
|
class MoveTarget:
def __init__(self, identifier: str, name: str, description: str):
self.identifier = identifier
self.name = name
self.description = description
def __str__(self):
return self.name
|
from google.appengine.ext import ndb
class MyModel(ndb.Model):
@classmethod
def get_create_by_id(cls, *args, **kwargs):
"""
Get an entity or create one if it doesn't exist.
:param args: Arbitrary number of args. These will be used to create a key if 'id' is not in kwargs.
:param kwargs: Arbitrary kwargs. If 'id' is in kwargs, it will be used to look up the entity.
:return: tuple. First element is entity. Second element indicates whether the entity was created.
"""
created = False
if 'id' not in kwargs:
kwargs['id'] = '_'.join(args)
entity = cls.get_by_id(kwargs['id'])
if entity is None:
entity = cls(**kwargs)
created = True
return entity, created
@classmethod
def get_by_id(cls, *args):
return super(MyModel, cls).get_by_id('_'.join(args))
class SheetStock(MyModel):
bound_lower = ndb.FloatProperty('lb')
bound_upper = ndb.FloatProperty('ub')
class User(MyModel):
notify = ndb.BooleanProperty('n', default=False)
email = ndb.StringProperty('e')
sheet_keys = ndb.KeyProperty(kind='Sheet', repeated=True)
credentials = ndb.BlobProperty('c')
class Sheet(MyModel):
title = ndb.StringProperty('t')
last_updated = ndb.DateTimeProperty(auto_now=True)
stock_keys = ndb.KeyProperty(kind='Stock', repeated=True)
user_keys = ndb.KeyProperty(kind='User', repeated=True)
class Stock(MyModel):
price = ndb.FloatProperty('p')
sheet_keys = ndb.KeyProperty(kind='Sheet', repeated=True)
|
#!/usr/bin/env python
import serial
import time
ser=serial.Serial(
port = '/dev/ttyUSB0',
baudrate = 115200,
parity = serial.PARITY_NONE,
bytesize = serial.EIGHTBITS,
stopbits = serial.STOPBITS_ONE,
timeout = None,
xonxoff = 0,
rtscts = 0,
# interCharTimeout = None
)
#ser.open()
while 1:
print ser.readline(),
|
__version__ = "1.0.0-beta1"
|
# Thermal Camera Demo
#
# This example shows off how to overlay a heatmap onto your OpenMV Cam's
# live video output from the main camera.
import image, time, fir
drawing_hint = image.BICUBIC # or image.BILINEAR or 0 (nearest neighbor)
# Initialize the thermal sensor
fir.init()
w = fir.width()
h = fir.height()
if (fir.type() == fir.FIR_MLX90621):
w = w * 10
h = h * 10
elif (fir.type() == fir.FIR_MLX90640):
w = w * 10
h = h * 10
elif (fir.type() == fir.FIR_MLX90641):
w = w * 10
h = h * 10
elif (fir.type() == fir.FIR_AMG8833):
w = w * 20
h = h * 20
# FPS clock
clock = time.clock()
while (True):
clock.tick()
try:
img = fir.snapshot(x_size=w, y_size=h,
color_palette=fir.PALETTE_IRONBOW, hint=drawing_hint,
copy_to_fb=True)
except OSError:
continue
# Print FPS.
print(clock.fps())
|
import Estrapy
from Estrapy import OsuClients
import asyncio
client_id = "" # Put your own osu client_id
client_secret = "" # Put your own osu client_secret
Osu = OsuClients(client_id=client_id, client_secret=client_secret)
OsuObject = OsuClients(
client_id=client_id, client_secret=client_secret, output="object"
)
async def beatmap():
data = await Osu.osubeatmap(beatmap_id="2405223")
data_formatter = await Osu.osubeatmap(
beatmap_id="2405223",
formatter=True, # Keep it on mind, this will only making the output with better formatting JSON format
)
print(data["beatmapset"]["artist"])
print(data_formatter)
asyncio.run(beatmap())
async def profile():
data = await Osu.osuprofile(username="Stawa")
data_formatter = await Osu.osuprofile(
username="Stawa",
formatter=True, # Keep it on mind, this will only making the output with better formatting JSON format
)
print(data["country"]["name"])
print(data_formatter)
asyncio.run(profile())
async def osu_object():
example = await OsuObject.osuprofile(username="Stawa")
print(example.avatar_url)
print(example.discord)
asyncio.run(osu_object())
|
import discord
from discord import Option, OptionChoice
from discord.ext import commands
from discord.ext.commands import BucketType, cooldown
import asyncio
import random
from Utilities import Checks, PlayerObject, Vars
from Utilities.ConfirmationMenu import OneButtonView
from Utilities.AyeshaBot import Ayesha
class Casino(commands.Cog):
"""Casino text"""
def __init__(self, bot : Ayesha):
self.bot = bot
# EVENTS
@commands.Cog.listener()
async def on_ready(self):
print("Casino is ready.")
# COMMANDS
@commands.slash_command()
@commands.check(Checks.is_player)
async def coinflip(self, ctx : discord.ApplicationContext,
call : Option(str,
description="Bet on heads or tails",
choices=[OptionChoice("Heads"), OptionChoice("Tails")],
default="Heads"
),
wager : Option(int,
description="The amount of gold you are betting (up to 25k)",
min_value=1,
max_value=25000,
default=1000)):
"""Wager some money on a coinflip for the classic 50/50 gamble."""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if player.gold < wager:
raise Checks.NotEnoughGold(wager, player.gold)
msg = f"The coin landed on **{call}**!"
if random.choice(["Heads", "Tails"]) == call:
await player.give_gold(conn, wager)
await ctx.respond(f"{msg} You made `{wager}` gold.")
else:
await player.give_gold(conn, -wager)
call = "Tails" if call == "Heads" else "Heads"
await ctx.respond(
f"{msg} You lost your `{wager}` gold wager.")
@commands.slash_command()
@commands.check(Checks.is_player)
async def roulette(self, ctx : discord.ApplicationContext,
bet_type : Option(str,
description="The odds you want to play at",
required=True,
choices=[
OptionChoice(
"Straight-up: Bet on rolling a single number",
"Straight-up"),
OptionChoice(
("Snake: Bet on rolling 1, 5, 9, 12, 14, 16, 19, 23, "
"27, 30, 32, or 34"),
"Snake"),
OptionChoice("Even: Bet on rolling an even number", "Even"),
OptionChoice("Odd: Bet on rolling an odd number", "Odd")]),
bet_number : Option(int,
description=(
"If you bet straight-up, call the number you think the "
"ball will land on"),
default = 7,
min_value=1,
max_value=36),
wager : Option(int,
description="The amount of gold you are betting (up to 50k)",
min_value=1,
max_value=50000,
default=1000)):
"""Play a game of Roulette"""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if player.gold < wager:
raise Checks.NotEnoughGold(wager, player.gold)
# Perform the roulette
SNAKE = [1, 5, 9, 12, 14, 16, 19, 23, 27, 30, 32, 34]
landing = random.randint(0, 36)
msg = f"The ball landed on **{landing}**!"
if landing == 0:
await player.give_gold(conn, -wager//2)
return await ctx.respond(f"{msg} You regained half your wager.")
# Determine outcome based on bet type
# Payouts taken from https://www.onlinegambling.com/casino/roulette/bets-payouts/
if bet_type == "Straight-up" and bet_number == landing:
await player.give_gold(conn, wager*35) # 35:1 stakes
await ctx.respond(f"{msg} You made `{wager*35}` gold!")
elif bet_type == "Snake" and landing in SNAKE:
await player.give_gold(conn, wager*2)
await ctx.respond(f"{msg} You made `{wager*2}` gold!")
elif bet_type == "Even" and landing % 2 == 0:
await player.give_gold(conn, wager)
await ctx.respond(f"{msg} You made `{wager}` gold!")
elif bet_type == "Odd" and landing % 2 == 1:
await player.give_gold(conn, wager)
await ctx.respond(f"{msg} You made `{wager}` gold!")
else:
await player.give_gold(conn, -wager)
await ctx.respond(f"{msg} You lost your bet.")
@commands.slash_command()
@cooldown(1, 10, BucketType.user)
@commands.check(Checks.is_player)
async def craps(self, ctx : discord.ApplicationContext,
wager : Option(int,
description="The amount of gold you are betting (up to 100k)",
min_value=1,
max_value=100000,
default=1000)):
"""Play a game of craps/seven-elevens on Pass Line rules."""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if player.gold < wager:
raise Checks.NotEnoughGold(wager, player.gold)
# Game is printed as an embed
display = discord.Embed(
title=f"Craps: {ctx.author.display_name}", color=Vars.ABLUE)
display.set_footer(text=ctx.author, icon_url=ctx.author.avatar.url)
display.add_field(
name=f"Press Shoot! to roll the die!",
value=(f"Numbers to win: 7, 11\nNumbers to lose: 2, 3, 12"))
# Create a button that will determine dice rolls
view = OneButtonView("Shoot!", ctx.author, True, 15)
interaction = await ctx.respond(embed=display, view=view)
turn_counter = 1
goal_number = 0
die1, die2 = 0, 0
victory, loss = False, False
# Game loops until over
while not victory and not loss:
await view.wait()
if view.value:
die1, die2 = random.randint(1, 6), random.randint(1, 6)
total = die1 + die2
if turn_counter == 1:
if total in (7, 11): # win
victory = True
elif total in (2, 3, 12): # lose
loss = True
else: # goal_number becomes the number to repeat
goal_number = total
else:
if total == goal_number: # win
victory = True
elif total == 7: # lose
loss = True
else: # Player didn't respond
await player.give_gold(conn, -wager)
msg = (
f"You left the game and forfeit your `{wager}` "
f"gold wager.")
await interaction.edit_original_message(view=None)
return await interaction.followup.send(msg) # end game here
# Edit the message to reflect gameplay
display.set_field_at(0,
name=f"Press Shoot! to roll the die!",
value=(
f"You rolled a **{die1}** and a **{die2}**.\n\n"
f"Your Roll: **{total}**\n"
f"Number to Win: **{goal_number}**\n"
f"Number to Lose: **7**"))
# View needs to be reloaded to be interactive again
view = OneButtonView("Shoot!", ctx.author, True, 15)
await interaction.edit_original_message(
embed=display, view=view)
turn_counter += 1
await interaction.edit_original_message(view=None)
if victory:
await player.give_gold(conn, wager)
await interaction.followup.send(
f"You won and received `{wager}` gold!")
else: # then loss
await player.give_gold(conn, -wager)
await interaction.followup.send((
f"You rolled a **{total}** and lost the game and your "
f"`{wager}` gold wager!"))
@commands.slash_command()
@cooldown(1, 10, BucketType.user)
@commands.check(Checks.is_player)
async def race(self, ctx : discord.ApplicationContext,
bet : Option(str,
description="The animal you are betting on winning the race",
required=True,
choices=[
OptionChoice(c) for c in
["Duck", "Swan", "Dog", "Horse", "Turtle", "Rabbit"]]),
wager : Option(int,
description="The amount of gold you are betting (up to 100k)",
default=1000,
min_value=1,
max_value=100000)):
"""Bet on an animal race!"""
names = ["Duck", "Swan", "Dog", "Horse", "Turtle", "Rabbit"]
emojis = ["🦆", "🦢", "🐕", "🐎", "🐢", "🐇"]
racers = {
n : {
"emoji" : e,
"string" : "||" + "."*19 + e,
"score" : 0,
"player" : True if n == bet else False
} for n, e in zip(names, emojis)}
async with self.bot.db.acquire() as conn: # Remember to reconnect later
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if player.gold < wager:
raise Checks.NotEnoughGold(wager, player.gold)
# Race is printed on an embed
output = "```" + "\n".join([racers[r]['string'] for r in racers]) +\
"```"
display = discord.Embed(
title="And the race is on!", color=Vars.ABLUE, description=output)
interaction = await ctx.respond(embed=display)
# Game Loop - extraordinarily bad code even for my standards geez
while max([racers[r]['score'] for r in racers]) <= 100:
bet_str = ""
for racer in racers:
# Change their progress and accompanying data
advance = random.randint(3, 14)
racers[racer]['score'] += advance
dots = racers[racer]['score'] // 5
if dots >= 20: # They pass the finish line
racers[racer]['string'] = "|" + racers[racer]['emoji'] +\
"."*20
else:
racers[racer]['string'] = "||" + "."*(19-dots) +\
racers[racer]['emoji'] + "."*(dots)
# If racer is the bet, come up with some string
if racer == bet:
e = racers[racer]['emoji']
if advance < 7:
bet_str = random.choice([
f"{e} falters!",
f"{e} has been outgunned!",
f"{e} trips!",
f"{e} was the sussy-impostor all along!",
f"{e} eats a fly and momentarily loses focus!"])
elif advance < 11:
bet_str = random.choice([
f"{e} keeps pace.",
f"{e} settles into mediocrity.",
f"{e} is showing signs of exhaustion.",
f"{e} passes a checkpoint!"])
else:
bet_str = random.choice([
f"{e} gains a quick burst of speed!",
f"{e} is one with nature.",
f"{e} is showing them who's boss!",
f"{e} leaps over an obstacle."])
output = bet_str +"\n```" +\
"\n".join([racers[r]['string'] for r in racers]) + "```"
display = discord.Embed(
title="And the race is on!", color=Vars.ABLUE,
description=output)
await interaction.edit_original_message(embed=display)
await asyncio.sleep(3)
# Find the winner
winners = [r for r in racers if racers[r]['score'] >= 100]
win_str = ", ".join([racers[r]["emoji"] for r in winners])
message = f"The winner(s) are {win_str}"
async with self.bot.db.acquire() as conn:
if bet in winners: # Then player's choice won
payout = int(wager * 6.25 / len(winners)) # // gave float anyway
await player.give_gold(conn, payout)
message += f"\n You received a payout of `{payout}` gold!"
else:
await player.give_gold(conn, -wager)
message +=f"\n You lost your bet and your `{wager}` gold wager!"
await interaction.followup.send(message)
def setup(bot):
bot.add_cog(Casino(bot))
|
"""
Module: 'random' on esp32_LoBo 3.2.24
"""
# MCU: (sysname='esp32_LoBo', nodename='esp32_LoBo', release='3.2.24', version='ESP32_LoBo_v3.2.24 on 2018-09-06', machine='ESP32 board with ESP32')
# Stubber: 1.2.0
def choice():
pass
def getrandbits():
pass
def randint():
pass
def random():
pass
def randrange():
pass
def seed():
pass
def uniform():
pass
|
from django.urls import path
from .views import (
ProfileRetrieveUpdateAPIView,
ProfileListAPIView
)
urlpatterns = [
path('api/profiles/<username>',
ProfileRetrieveUpdateAPIView.as_view(), name='profile'),
path('api/profiles', ProfileListAPIView.as_view(), name='profile_list'),
]
|
# Generated by Django 2.0.2 on 2019-03-10 09:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('delivery', '0005_delivery_enterprice'),
]
operations = [
migrations.RemoveField(
model_name='delivery',
name='interview',
),
]
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import stat
import sys
import time
from typing import Dict # noqa
from typing import List # noqa
import mock
import pytest
from pytest import raises
from paasta_tools import utils
def test_get_git_url_provided_by_serviceyaml():
service = 'giiiiiiiiiiit'
expected = 'git@some_random_host:foobar'
with (
mock.patch('service_configuration_lib.read_service_configuration', autospec=True)
) as mock_read_service_configuration:
mock_read_service_configuration.return_value = {'git_url': expected}
assert utils.get_git_url(service) == expected
mock_read_service_configuration.assert_called_once_with(service, soa_dir=utils.DEFAULT_SOA_DIR)
def test_get_git_url_default():
service = 'giiiiiiiiiiit'
expected = 'git@git.yelpcorp.com:services/%s' % service
with (
mock.patch('service_configuration_lib.read_service_configuration', autospec=True)
) as mock_read_service_configuration:
mock_read_service_configuration.return_value = {}
assert utils.get_git_url(service) == expected
mock_read_service_configuration.assert_called_once_with(service, soa_dir=utils.DEFAULT_SOA_DIR)
def test_format_log_line():
input_line = 'foo'
fake_cluster = 'fake_cluster'
fake_service = 'fake_service'
fake_instance = 'fake_instance'
fake_component = 'build'
fake_level = 'debug'
fake_now = 'fake_now'
expected = json.dumps(
{
'timestamp': fake_now,
'level': fake_level,
'cluster': fake_cluster,
'service': fake_service,
'instance': fake_instance,
'component': fake_component,
'message': input_line,
}, sort_keys=True,
)
with mock.patch('paasta_tools.utils._now', autospec=True) as mock_now:
mock_now.return_value = fake_now
actual = utils.format_log_line(
level=fake_level,
cluster=fake_cluster,
service=fake_service,
instance=fake_instance,
component=fake_component,
line=input_line,
)
assert actual == expected
def test_deploy_whitelist_to_constraints():
fake_whitelist = ('fake_location_type', ['fake_location', 'anotherfake_location'],)
expected_constraints = [['fake_location_type', 'LIKE', 'fake_location|anotherfake_location']]
constraints = utils.deploy_whitelist_to_constraints(fake_whitelist)
assert constraints == expected_constraints
def test_format_log_line_with_timestamp():
input_line = 'foo'
fake_cluster = 'fake_cluster'
fake_service = 'fake_service'
fake_instance = 'fake_instance'
fake_component = 'build'
fake_level = 'debug'
fake_timestamp = 'fake_timestamp'
expected = json.dumps(
{
'timestamp': fake_timestamp,
'level': fake_level,
'cluster': fake_cluster,
'service': fake_service,
'instance': fake_instance,
'component': fake_component,
'message': input_line,
}, sort_keys=True,
)
actual = utils.format_log_line(
fake_level,
fake_cluster,
fake_service,
fake_instance,
fake_component,
input_line,
timestamp=fake_timestamp,
)
assert actual == expected
def test_format_log_line_rejects_invalid_components():
with raises(utils.NoSuchLogComponent):
utils.format_log_line(
level='debug',
cluster='fake_cluster',
service='fake_service',
instance='fake_instance',
line='fake_line',
component='BOGUS_COMPONENT',
)
def test_ScribeLogWriter_log_raise_on_unknown_level():
with raises(utils.NoSuchLogLevel):
utils.ScribeLogWriter().log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
def test_get_log_name_for_service():
service = 'foo'
expected = 'stream_paasta_%s' % service
assert utils.get_log_name_for_service(service) == expected
def test_get_readable_files_in_glob_ignores_unreadable(tmpdir):
tmpdir.join('readable.json').ensure().chmod(0o644)
tmpdir.join('unreadable.json').ensure().chmod(0o000)
ret = utils.get_readable_files_in_glob('*.json', tmpdir.strpath)
assert ret == [tmpdir.join('readable.json').strpath]
def test_get_readable_files_in_glob_is_recursive(tmpdir):
a = tmpdir.join('a.json').ensure()
b = tmpdir.join('b.json').ensure()
c = tmpdir.join('subdir').ensure_dir().join('c.json').ensure()
ret = utils.get_readable_files_in_glob('*.json', tmpdir.strpath)
assert set(ret) == {a.strpath, b.strpath, c.strpath}
def test_load_system_paasta_config():
json_load_return_value: utils.SystemPaastaConfigDict = {'cluster': 'bar'}
expected = utils.SystemPaastaConfig(json_load_return_value, '/some/fake/dir')
file_mock = mock.mock_open()
with mock.patch(
'os.path.isdir', return_value=True, autospec=True,
), mock.patch(
'os.access', return_value=True, autospec=True,
), mock.patch(
'builtins.open', file_mock, autospec=None,
) as open_file_patch, mock.patch(
'paasta_tools.utils.get_readable_files_in_glob', autospec=True,
return_value=['/some/fake/dir/some_file.json'],
), mock.patch(
'paasta_tools.utils.json.load', autospec=True, return_value=json_load_return_value,
) as json_patch, mock.patch(
'paasta_tools.utils.os.stat', autospec=True,
), mock.patch(
'paasta_tools.utils.deep_merge_dictionaries', autospec=True, return_value=json_load_return_value,
) as mock_deep_merge:
actual = utils.load_system_paasta_config(path='/some/fake/dir')
assert actual == expected
# Kinda weird but without this load_system_paasta_config() can (and
# did! during development) return a plain dict without the test
# complaining.
assert actual.__class__ == expected.__class__
open_file_patch.assert_any_call('/some/fake/dir/some_file.json')
json_patch.assert_any_call(file_mock.return_value.__enter__.return_value)
assert json_patch.call_count == 1
mock_deep_merge.assert_called_with(json_load_return_value, {}, allow_duplicate_keys=False)
def test_load_system_paasta_config_file_non_existent_dir():
fake_path = '/var/dir_of_fake'
with mock.patch('os.path.isdir', return_value=False, autospec=True):
with raises(utils.PaastaNotConfiguredError) as excinfo:
utils.load_system_paasta_config(fake_path)
expected = "Could not find system paasta configuration directory: %s" % fake_path
assert str(excinfo.value) == expected
def test_load_system_paasta_config_file_non_readable_dir():
fake_path = '/var/dir_of_fake'
with mock.patch(
'os.path.isdir', return_value=True, autospec=True,
), mock.patch(
'os.access', return_value=False, autospec=True,
):
with raises(utils.PaastaNotConfiguredError) as excinfo:
utils.load_system_paasta_config(fake_path)
expected = "Could not read from system paasta configuration directory: %s" % fake_path
assert str(excinfo.value) == expected
def test_load_system_paasta_config_file_dne():
fake_path = '/var/dir_of_fake'
with mock.patch(
'os.path.isdir', return_value=True, autospec=True,
), mock.patch(
'os.access', return_value=True, autospec=True,
), mock.patch(
'builtins.open', side_effect=IOError(2, 'a', 'b'), autospec=None,
), mock.patch(
'paasta_tools.utils.os.stat', autospec=True,
), mock.patch(
'paasta_tools.utils.get_readable_files_in_glob', autospec=True, return_value=[fake_path],
):
with raises(utils.PaastaNotConfiguredError) as excinfo:
utils.load_system_paasta_config(fake_path)
assert str(excinfo.value) == "Could not load system paasta config file b: a"
def test_load_system_paasta_config_duplicate_keys_errors():
fake_file_a = {'cluster': 'this value will be overriden', 'sensu_host': 'fake_data'}
fake_file_b = {'cluster': 'overriding value'}
file_mock = mock.mock_open()
with mock.patch(
'os.path.isdir', return_value=True, autospec=True,
), mock.patch(
'os.access', return_value=True, autospec=True,
), mock.patch(
'builtins.open', file_mock, autospec=None,
), mock.patch(
'paasta_tools.utils.os.stat', autospec=True,
), mock.patch(
'paasta_tools.utils.get_readable_files_in_glob', autospec=True,
return_value=['a', 'b'],
), mock.patch(
'paasta_tools.utils.json.load', autospec=True, side_effect=[fake_file_a, fake_file_b],
):
with raises(utils.DuplicateKeyError):
utils.load_system_paasta_config(path='/some/fake/dir')
def test_SystemPaastaConfig_get_cluster():
fake_config = utils.SystemPaastaConfig(
{
'cluster': 'peanut',
}, '/some/fake/dir',
)
expected = 'peanut'
actual = fake_config.get_cluster()
assert actual == expected
def test_SystemPaastaConfig_get_cluster_dne():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
with raises(utils.PaastaNotConfiguredError):
fake_config.get_cluster()
def test_SystemPaastaConfig_get_volumes():
fake_config = utils.SystemPaastaConfig(
{
'volumes': [{'hostPath': "fake_other_path", 'containerPath': '/blurp', 'mode': 'ro'}],
}, '/some/fake/dir',
)
expected = [{'hostPath': "fake_other_path", 'containerPath': '/blurp', 'mode': 'ro'}]
actual = fake_config.get_volumes()
assert actual == expected
def test_SystemPaastaConfig_get_volumes_dne():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
with raises(utils.PaastaNotConfiguredError):
fake_config.get_volumes()
def test_SystemPaastaConfig_get_zk():
fake_config = utils.SystemPaastaConfig(
{
'zookeeper': 'zk://fake_zookeeper_host',
}, '/some/fake/dir',
)
expected = 'fake_zookeeper_host'
actual = fake_config.get_zk_hosts()
assert actual == expected
def test_SystemPaastaConfig_get_zk_dne():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
with raises(utils.PaastaNotConfiguredError):
fake_config.get_zk_hosts()
def test_get_service_registry():
fake_registry = 'fake_registry'
fake_service_config = {
"description": "This service is fake",
"external_link": "www.yelp.com",
"git_url": "git@mercurial-scm.org:fake-service",
"docker_registry": fake_registry,
}
with mock.patch(
'service_configuration_lib.read_service_configuration',
return_value=fake_service_config, autospec=True,
):
actual = utils.get_service_docker_registry('fake_service', 'fake_soa_dir')
assert actual == fake_registry
def test_get_service_registry_dne():
fake_registry = 'fake_registry'
fake_service_config = {
"description": "This service is fake",
"external_link": "www.yelp.com",
"git_url": "git@mercurial-scm.org:fake-service",
# no docker_registry configured for this service
}
fake_system_config = utils.SystemPaastaConfig(
{
"docker_registry": fake_registry,
}, '/some/fake/dir',
)
with mock.patch(
'service_configuration_lib.read_service_configuration',
return_value=fake_service_config, autospec=True,
):
with mock.patch(
'paasta_tools.utils.load_system_paasta_config',
return_value=fake_system_config, autospec=True,
):
actual = utils.get_service_docker_registry('fake_service', 'fake_soa_dir')
assert actual == fake_registry
def test_SystemPaastaConfig_get_sensu_host_default():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
actual = fake_config.get_sensu_host()
expected = 'localhost'
assert actual == expected
def test_SystemPaastaConfig_get_sensu_host():
fake_config = utils.SystemPaastaConfig({"sensu_host": "blurp"}, '/some/fake/dir')
actual = fake_config.get_sensu_host()
expected = 'blurp'
assert actual == expected
def test_SystemPaastaConfig_get_sensu_host_None():
fake_config = utils.SystemPaastaConfig({"sensu_host": None}, '/some/fake/dir')
actual = fake_config.get_sensu_host()
expected = None
assert actual == expected
def test_SystemPaastaConfig_get_sensu_port_default():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
actual = fake_config.get_sensu_port()
expected = 3030
assert actual == expected
def test_SystemPaastaConfig_get_sensu_port():
fake_config = utils.SystemPaastaConfig({"sensu_port": 4040}, '/some/fake/dir')
actual = fake_config.get_sensu_port()
expected = 4040
assert actual == expected
def test_SystemPaastaConfig_get_metrics_provider():
fake_config = utils.SystemPaastaConfig({"deployd_metrics_provider": 'bar'}, '/some/fake/dir')
actual = fake_config.get_metrics_provider()
expected = 'bar'
assert actual == expected
def test_SystemPaastaConfig_get_cluster_fqdn_format_default():
fake_config = utils.SystemPaastaConfig({}, '/some/fake/dir')
actual = fake_config.get_cluster_fqdn_format()
expected = 'paasta-{cluster:s}.yelp'
assert actual == expected
def test_SystemPaastaConfig_get_cluster_fqdn_format():
fake_config = utils.SystemPaastaConfig({"cluster_fqdn_format": "paasta-{cluster:s}.something"}, '/some/fake/dir')
actual = fake_config.get_cluster_fqdn_format()
expected = 'paasta-{cluster:s}.something'
assert actual == expected
def test_SystemPaastaConfig_get_deployd_number_workers():
fake_config = utils.SystemPaastaConfig({"deployd_number_workers": 3}, '/some/fake/dir')
actual = fake_config.get_deployd_number_workers()
expected = 3
assert actual == expected
def test_SystemPaastaConfig_get_deployd_big_bounce_rate():
fake_config = utils.SystemPaastaConfig({"deployd_big_bounce_rate": 3}, '/some/fake/dir')
actual = fake_config.get_deployd_big_bounce_rate()
expected = 3
assert actual == expected
def test_SystemPaastaConfig_get_deployd_log_level():
fake_config = utils.SystemPaastaConfig({"deployd_log_level": 'DEBUG'}, '/some/fake/dir')
actual = fake_config.get_deployd_log_level()
expected = 'DEBUG'
assert actual == expected
@pytest.yield_fixture
def umask_022():
old_umask = os.umask(0o022)
yield
os.umask(old_umask)
def test_atomic_file_write_itest(umask_022, tmpdir):
target_file_name = tmpdir.join('test_atomic_file_write_itest.txt').strpath
with open(target_file_name, 'w') as f_before:
f_before.write('old content')
with utils.atomic_file_write(target_file_name) as f_new:
f_new.write('new content')
with open(target_file_name) as f_existing:
# While in the middle of an atomic_file_write, the existing
# file should still contain the old content, and should not
# be truncated, etc.
assert f_existing.read() == 'old content'
with open(target_file_name) as f_done:
# once we're done, the content should be in place.
assert f_done.read() == 'new content'
file_stat = os.stat(target_file_name)
assert stat.S_ISREG(file_stat.st_mode)
assert stat.S_IMODE(file_stat.st_mode) == 0o0644
def test_configure_log():
fake_log_writer_config = {'driver': 'fake', 'options': {'fake_arg': 'something'}}
with mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True) as mock_load_system_paasta_config:
mock_load_system_paasta_config().get_log_writer.return_value = fake_log_writer_config
with mock.patch('paasta_tools.utils.get_log_writer_class', autospec=True) as mock_get_log_writer_class:
utils.configure_log()
mock_get_log_writer_class.assert_called_once_with('fake')
mock_get_log_writer_class('fake').assert_called_once_with(fake_arg='something')
def test_compose_job_id_without_hashes():
fake_service = "my_cool_service"
fake_instance = "main"
expected = "my_cool_service.main"
actual = utils.compose_job_id(fake_service, fake_instance)
assert actual == expected
def test_compose_job_id_with_git_hash():
fake_service = "my_cool_service"
fake_instance = "main"
fake_git_hash = "git123abc"
with raises(utils.InvalidJobNameError):
utils.compose_job_id(fake_service, fake_instance, git_hash=fake_git_hash)
def test_compose_job_id_with_config_hash():
fake_service = "my_cool_service"
fake_instance = "main"
fake_config_hash = "config456def"
with raises(utils.InvalidJobNameError):
utils.compose_job_id(fake_service, fake_instance, config_hash=fake_config_hash)
def test_compose_job_id_with_hashes():
fake_service = "my_cool_service"
fake_instance = "main"
fake_git_hash = "git123abc"
fake_config_hash = "config456def"
expected = "my_cool_service.main.git123abc.config456def"
actual = utils.compose_job_id(fake_service, fake_instance, fake_git_hash, fake_config_hash)
assert actual == expected
def test_decompose_job_id_too_short():
with raises(utils.InvalidJobNameError):
utils.decompose_job_id('foo')
def test_decompose_job_id_without_hashes():
fake_job_id = "my_cool_service.main"
expected = ("my_cool_service", "main", None, None)
actual = utils.decompose_job_id(fake_job_id)
assert actual == expected
def test_decompose_job_id_with_hashes():
fake_job_id = "my_cool_service.main.git123abc.config456def"
expected = ("my_cool_service", "main", "git123abc", "config456def")
actual = utils.decompose_job_id(fake_job_id)
assert actual == expected
def test_build_docker_image_name():
registry_url = "fake_registry"
upstream_job_name = "a_really_neat_service"
expected = f"{registry_url}/services-{upstream_job_name}"
with mock.patch(
'paasta_tools.utils.get_service_docker_registry', autospec=True,
return_value=registry_url,
):
actual = utils.build_docker_image_name(upstream_job_name)
assert actual == expected
@mock.patch('paasta_tools.utils.build_docker_image_name', autospec=True)
def test_build_docker_tag(mock_build_docker_image_name):
upstream_job_name = 'foo'
upstream_git_commit = 'bar'
mock_build_docker_image_name.return_value = 'fake-registry/services-foo'
expected = 'fake-registry/services-foo:paasta-{}'.format(
upstream_git_commit,
)
actual = utils.build_docker_tag(upstream_job_name, upstream_git_commit)
assert actual == expected
@mock.patch('paasta_tools.utils.build_docker_image_name', autospec=True)
def test_check_docker_image_false(mock_build_docker_image_name):
mock_build_docker_image_name.return_value = 'fake-registry/services-foo'
fake_app = 'fake_app'
fake_commit = 'fake_commit'
docker_tag = utils.build_docker_tag(fake_app, fake_commit)
with mock.patch('paasta_tools.utils.get_docker_client', autospec=True) as mock_docker:
docker_client = mock_docker.return_value
docker_client.images.return_value = [
{
'Created': 1425430339,
'VirtualSize': 250344331,
'ParentId': '1111',
'RepoTags': [docker_tag],
'Id': 'ef978820f195dede62e206bbd41568463ab2b79260bc63835a72154fe7e196a2',
'Size': 0,
},
]
assert utils.check_docker_image('test_service', 'tag2') is False
@mock.patch('paasta_tools.utils.build_docker_image_name', autospec=True)
def test_check_docker_image_true(mock_build_docker_image_name):
fake_app = 'fake_app'
fake_commit = 'fake_commit'
mock_build_docker_image_name.return_value = 'fake-registry/services-foo'
docker_tag = utils.build_docker_tag(fake_app, fake_commit)
with mock.patch('paasta_tools.utils.get_docker_client', autospec=True) as mock_docker:
docker_client = mock_docker.return_value
docker_client.images.return_value = [
{
'Created': 1425430339,
'VirtualSize': 250344331,
'ParentId': '1111',
'RepoTags': [docker_tag],
'Id': 'ef978820f195dede62e206bbd41568463ab2b79260bc63835a72154fe7e196a2',
'Size': 0,
},
]
assert utils.check_docker_image(fake_app, fake_commit) is True
def test_remove_ansi_escape_sequences():
plain_string = 'blackandwhite'
colored_string = '\033[34m' + plain_string + '\033[0m'
assert utils.remove_ansi_escape_sequences(colored_string) == plain_string
def test_missing_cluster_configs_are_ignored():
fake_soa_dir = '/nail/etc/services'
fake_cluster_configs = [
'/nail/etc/services/service1/marathon-cluster1.yaml',
'/nail/etc/services/service2/chronos-cluster2.yaml',
]
expected = []
with mock.patch(
'os.path.join', autospec=True, return_value='%s/*' % fake_soa_dir,
) as mock_join_path, mock.patch(
'glob.glob', autospec=True, return_value=fake_cluster_configs,
) as mock_glob:
actual = utils.list_clusters(soa_dir=fake_soa_dir)
assert actual == expected
mock_join_path.assert_called_once_with(fake_soa_dir, '*')
mock_glob.assert_called_once_with('%s/*/*.yaml' % fake_soa_dir)
def test_list_clusters_no_service_given_lists_all_of_them():
fake_soa_dir = '/nail/etc/services'
fake_soa_cluster_configs = [
['cluster1', '/nail/etc/services/service1/marathon-cluster1.yaml'],
['cluster2', '/nail/etc/services/service1/chronos-cluster2.yaml'],
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'paasta_tools.utils.get_soa_cluster_deploy_files', autospec=True, return_value=fake_soa_cluster_configs,
):
actual = utils.list_clusters(soa_dir=fake_soa_dir)
assert actual == expected
def test_list_clusters_with_service():
fake_soa_dir = '/nail/etc/services'
fake_service = 'fake_service'
fake_soa_cluster_configs = [
['cluster1', '/nail/etc/services/service1/marathon-cluster1.yaml'],
['cluster2', '/nail/etc/services/service1/chronos-cluster2.yaml'],
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'paasta_tools.utils.get_soa_cluster_deploy_files', autospec=True, return_value=fake_soa_cluster_configs,
):
actual = utils.list_clusters(fake_service, fake_soa_dir)
assert actual == expected
def test_list_clusters_ignores_bogus_clusters():
fake_soa_dir = '/nail/etc/services'
fake_service = 'fake_service'
fake_cluster_configs = [
'/nail/etc/services/service1/marathon-cluster1.yaml',
'/nail/etc/services/service1/marathon-PROD.yaml',
'/nail/etc/services/service1/chronos-cluster2.yaml',
'/nail/etc/services/service1/chronos-SHARED.yaml',
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'os.path.join', autospec=True, return_value=f'{fake_soa_dir}/{fake_service}',
), mock.patch(
'glob.glob', autospec=True, return_value=fake_cluster_configs,
), mock.patch(
'builtins.open', autospec=None, path=mock.mock_open(read_data="fakedata"),
):
actual = utils.list_clusters(service=fake_service)
assert actual == expected
def test_list_all_instances_for_service():
service = 'fake_service'
clusters = ['fake_cluster']
mock_instances = [(service, 'instance1'), (service, 'instance2')]
expected = {'instance1', 'instance2'}
with mock.patch(
'paasta_tools.utils.list_clusters', autospec=True,
) as mock_list_clusters, mock.patch(
'paasta_tools.utils.get_service_instance_list', autospec=True,
) as mock_service_instance_list:
mock_list_clusters.return_value = clusters
mock_service_instance_list.return_value = mock_instances
actual = utils.list_all_instances_for_service(service)
assert actual == expected
mock_list_clusters.assert_called_once_with(service, soa_dir=mock.ANY)
mock_service_instance_list.assert_called_once_with(service, clusters[0], None, soa_dir=mock.ANY)
def test_get_service_instance_list():
fake_name = 'hint'
fake_instance_1 = 'unsweet'
fake_instance_2 = 'water'
fake_cluster = '16floz'
fake_dir = '/nail/home/hipster'
fake_job_config: Dict[str, Dict] = {
fake_instance_1: {},
fake_instance_2: {},
}
expected = [
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
]
with mock.patch(
'paasta_tools.utils.service_configuration_lib.read_extra_service_information', autospec=True,
return_value=fake_job_config,
) as read_extra_info_patch:
actual = utils.get_service_instance_list(fake_name, fake_cluster, soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'marathon-16floz', soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'chronos-16floz', soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'paasta_native-16floz', soa_dir=fake_dir)
assert read_extra_info_patch.call_count == 4
assert sorted(expected) == sorted(actual)
def test_get_service_instance_list_ignores_underscore():
fake_name = 'hint'
fake_instance_1 = 'unsweet'
fake_instance_2 = '_ignore_me'
fake_cluster = '16floz'
fake_dir = '/nail/home/hipster'
fake_job_config: Dict[str, Dict] = {
fake_instance_1: {},
fake_instance_2: {},
}
expected = [
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
]
with mock.patch(
'paasta_tools.utils.service_configuration_lib.read_extra_service_information', autospec=True,
return_value=fake_job_config,
):
actual = utils.get_service_instance_list(service=fake_name, cluster=fake_cluster, soa_dir=fake_dir)
assert sorted(expected) == sorted(actual)
def test_get_services_for_cluster():
cluster = 'honey_bunches_of_oats'
soa_dir = 'completely_wholesome'
instances = [
[
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
],
[
('fake_service2', 'my_nerf_broke'),
],
]
expected = [
('fake_service2', 'my_nerf_broke'),
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
]
with mock.patch(
'os.path.abspath', autospec=True, return_value='chex_mix',
) as abspath_patch, mock.patch(
'os.listdir', autospec=True, return_value=['dir1', 'dir2'],
) as listdir_patch, mock.patch(
'paasta_tools.utils.get_service_instance_list',
side_effect=lambda a, b, c, d: instances.pop(), autospec=True,
) as get_instances_patch:
actual = utils.get_services_for_cluster(cluster, soa_dir=soa_dir)
assert expected == actual
abspath_patch.assert_called_once_with(soa_dir)
listdir_patch.assert_called_once_with('chex_mix')
get_instances_patch.assert_any_call('dir1', cluster, None, soa_dir)
get_instances_patch.assert_any_call('dir2', cluster, None, soa_dir)
assert get_instances_patch.call_count == 2
def test_get_services_for_cluster_ignores_underscore():
cluster = 'honey_bunches_of_oats'
soa_dir = 'completely_wholesome'
instances = [
[
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
('fake_service1', '_ignore_me'),
],
[
('fake_service2', 'my_nerf_broke'),
],
]
expected = [
('fake_service2', 'my_nerf_broke'),
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
]
with mock.patch(
'os.path.abspath', autospec=True, return_value='chex_mix',
), mock.patch(
'os.listdir', autospec=True, return_value=['dir1', 'dir2'],
), mock.patch(
'paasta_tools.utils.get_service_instance_list',
side_effect=lambda a, b, c, d: instances.pop(), autospec=True,
):
actual = utils.get_services_for_cluster(cluster, soa_dir=soa_dir)
assert expected == actual
def test_color_text():
expected = f"{utils.PaastaColors.RED}hi{utils.PaastaColors.DEFAULT}"
actual = utils.PaastaColors.color_text(utils.PaastaColors.RED, "hi")
assert actual == expected
def test_color_text_nested():
expected = "{}red{}blue{}red{}".format(
utils.PaastaColors.RED,
utils.PaastaColors.BLUE,
utils.PaastaColors.DEFAULT + utils.PaastaColors.RED,
utils.PaastaColors.DEFAULT,
)
actual = utils.PaastaColors.color_text(utils.PaastaColors.RED, "red%sred" % utils.PaastaColors.blue("blue"))
assert actual == expected
def test_DeploymentsJson_read():
file_mock = mock.mock_open()
fake_dir = '/var/dir_of_fake'
fake_path = '/var/dir_of_fake/fake_service/deployments.json'
fake_json = {
'v1': {
'no_srv:blaster': {
'docker_image': 'test_rocker:9.9',
'desired_state': 'start',
'force_bounce': None,
},
'dont_care:about': {
'docker_image': 'this:guy',
'desired_state': 'stop',
'force_bounce': '12345',
},
},
}
with mock.patch(
'builtins.open', file_mock, autospec=None,
) as open_patch, mock.patch(
'json.load', autospec=True, return_value=fake_json,
) as json_patch, mock.patch(
'paasta_tools.utils.os.path.isfile', autospec=True, return_value=True,
):
actual = utils.load_deployments_json('fake_service', fake_dir)
open_patch.assert_called_once_with(fake_path)
json_patch.assert_called_once_with(file_mock.return_value.__enter__.return_value)
assert actual == utils.DeploymentsJsonV1(fake_json['v1'])
def test_get_running_mesos_docker_containers():
fake_container_data = [
{
"Status": "Up 2 hours",
"Names": ['/mesos-legit.e1ad42eb-3ed7-4c9b-8711-aff017ef55a5'],
"Id": "05698f4156c4f30c8dcd747f7724b14c9af7771c9a4b96fdd6aa37d6419a12a3",
},
{
"Status": "Up 3 days",
"Names": ['/definitely_not_meeeeesos-.6d2fb3aa-2fef-4f98-8fed-df291481e91f'],
"Id": "ae66e2c3fe3c4b2a7444212592afea5cc6a4d8ca70ee595036b19949e00a257c",
},
]
with mock.patch("paasta_tools.utils.get_docker_client", autospec=True) as mock_docker:
docker_client = mock_docker.return_value
docker_client.containers.return_value = fake_container_data
assert len(utils.get_running_mesos_docker_containers()) == 1
def test_run_cancels_timer_thread_on_keyboard_interrupt():
mock_process = mock.Mock()
mock_timer_object = mock.Mock()
with mock.patch(
'paasta_tools.utils.Popen', autospec=True, return_value=mock_process,
), mock.patch(
'paasta_tools.utils.threading.Timer', autospec=True, return_value=mock_timer_object,
):
mock_process.stdout.readline.side_effect = KeyboardInterrupt
with raises(KeyboardInterrupt):
utils._run('sh echo foo', timeout=10)
assert mock_timer_object.cancel.call_count == 1
def test_run_returns_when_popen_fails():
fake_exception = OSError(1234, 'fake error')
with mock.patch('paasta_tools.utils.Popen', autospec=True, side_effect=fake_exception):
return_code, output = utils._run('nonexistant command', timeout=10)
assert return_code == 1234
assert 'fake error' in output
@pytest.mark.parametrize(
('dcts', 'expected'),
(
(
[{'a': 'b'}, {'c': 'd'}],
[{'a': 'b'}, {'c': 'd'}],
),
(
[{'c': 'd'}, {'a': 'b'}],
[{'a': 'b'}, {'c': 'd'}],
),
(
[{'a': 'b', 'c': 'd'}, {'a': 'b'}],
[{'a': 'b'}, {'a': 'b', 'c': 'd'}],
),
),
)
def test_sort_dcts(dcts, expected):
assert utils.sort_dicts(dcts) == expected
class TestInstanceConfig:
def test_get_monitoring(self):
fake_info = {'fake_key': 'fake_value'}
assert utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'monitoring': fake_info},
branch_dict=None,
).get_monitoring() == fake_info
def test_get_cpus_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cpus': -5},
branch_dict=None,
)
assert fake_conf.get_cpus() == -5
def test_get_cpus_in_config_float(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cpus': .66},
branch_dict=None,
)
assert fake_conf.get_cpus() == .66
def test_get_cpus_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_cpus() == .25
def test_get_mem_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'mem': -999},
branch_dict=None,
)
assert fake_conf.get_mem() == -999
def test_get_mem_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_mem() == 1024
def test_zero_cpu_burst(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={'cpu_burst_pct': 0, 'cpus': 1},
branch_dict=None,
)
assert fake_conf.get_cpu_quota() == 100000
def test_format_docker_parameters_default(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={
'cpus': 1,
'mem': 1024,
},
branch_dict=None,
)
assert fake_conf.format_docker_parameters() == [
{"key": "memory-swap", "value": '1088m'},
{"key": "cpu-period", "value": "100000"},
{"key": "cpu-quota", "value": "1000000"},
{"key": "label", "value": "paasta_service=fake_name"},
{"key": "label", "value": "paasta_instance=fake_instance"},
]
def test_format_docker_parameters_non_default(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={
'cpu_burst_pct': 200,
'cfs_period_us': 200000,
'cpus': 1,
'mem': 1024,
'ulimit': {
'nofile': {'soft': 1024, 'hard': 2048},
'nice': {'soft': 20},
},
'cap_add': ['IPC_LOCK', 'SYS_PTRACE'],
},
branch_dict=None,
)
assert fake_conf.format_docker_parameters() == [
{"key": "memory-swap", "value": '1088m'},
{"key": "cpu-period", "value": "200000"},
{"key": "cpu-quota", "value": "600000"},
{"key": "label", "value": "paasta_service=fake_name"},
{"key": "label", "value": "paasta_instance=fake_instance"},
{"key": "ulimit", "value": "nice=20"},
{"key": "ulimit", "value": "nofile=1024:2048"},
{"key": "cap-add", "value": "IPC_LOCK"},
{"key": "cap-add", "value": "SYS_PTRACE"},
]
def test_full_cpu_burst(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={'cpu_burst_pct': 100, 'cpus': 1},
branch_dict=None,
)
assert fake_conf.get_cpu_quota() == 200000
def test_get_mem_swap_int(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'mem': 50,
},
branch_dict=None,
)
assert fake_conf.get_mem_swap() == "114m"
def test_get_mem_swap_float_rounds_up(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'mem': 50.4,
},
branch_dict=None,
)
assert fake_conf.get_mem_swap() == "115m"
def test_get_disk_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'disk': -999},
branch_dict=None,
)
assert fake_conf.get_disk() == -999
def test_get_disk_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_disk() == 1024
def test_get_gpus_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'gpus': -123},
branch_dict=None,
)
assert fake_conf.get_gpus() == -123
def test_get_gpus_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_gpus() == 0
def test_get_ulimit_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'ulimit': {
'nofile': {'soft': 1024, 'hard': 2048},
'nice': {'soft': 20},
},
},
branch_dict=None,
)
assert list(fake_conf.get_ulimit()) == [
{"key": "ulimit", "value": "nice=20"},
{"key": "ulimit", "value": "nofile=1024:2048"},
]
def test_get_ulimit_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert list(fake_conf.get_ulimit()) == []
def test_get_cap_add_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'cap_add': ['IPC_LOCK', 'SYS_PTRACE'],
},
branch_dict=None,
)
assert list(fake_conf.get_cap_add()) == [
{"key": "cap-add", "value": "IPC_LOCK"},
{"key": "cap-add", "value": "SYS_PTRACE"},
]
def test_get_cap_add_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert list(fake_conf.get_cap_add()) == []
def test_deploy_group_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='fake_instance',
cluster='fake_cluster',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'fake_cluster.fake_instance'
def test_deploy_group_if_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'deploy_group': 'fake_deploy_group'},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'fake_deploy_group'
def test_deploy_group_string_interpolation(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='fake_cluster',
config_dict={'deploy_group': 'cluster_is_{cluster}'},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'cluster_is_fake_cluster'
def test_get_cmd_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_cmd() is None
def test_get_cmd_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cmd': 'FAKECMD'},
branch_dict=None,
)
assert fake_conf.get_cmd() == 'FAKECMD'
def test_get_env_default(self):
fake_conf = utils.InstanceConfig(
service='fake_service',
cluster='fake_cluster',
instance='fake_instance',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_env() == {
'PAASTA_SERVICE': 'fake_service',
'PAASTA_INSTANCE': 'fake_instance',
'PAASTA_CLUSTER': 'fake_cluster',
'PAASTA_DEPLOY_GROUP': 'fake_cluster.fake_instance',
'PAASTA_DOCKER_IMAGE': '',
}
def test_get_env_with_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'env': {'SPECIAL_ENV': 'TRUE'},
'deploy_group': 'fake_deploy_group',
'monitoring': {'team': 'generic_team'},
},
branch_dict={'docker_image': 'something'},
)
assert fake_conf.get_env() == {
'SPECIAL_ENV': 'TRUE',
'PAASTA_SERVICE': '',
'PAASTA_INSTANCE': '',
'PAASTA_CLUSTER': '',
'PAASTA_DEPLOY_GROUP': 'fake_deploy_group',
'PAASTA_DOCKER_IMAGE': 'something',
'PAASTA_MONITORING_TEAM': 'generic_team',
}
def test_get_args_default_no_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_args() == []
def test_get_args_default_with_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cmd': 'FAKECMD'},
branch_dict=None,
)
assert fake_conf.get_args() is None
def test_get_args_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'args': ['arg1', 'arg2']},
branch_dict=None,
)
assert fake_conf.get_args() == ['arg1', 'arg2']
def test_get_args_in_config_with_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'args': ['A'], 'cmd': 'C'},
branch_dict=None,
)
fake_conf.get_cmd()
with raises(utils.InvalidInstanceConfig):
fake_conf.get_args()
def test_get_force_bounce(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict={'force_bounce': 'blurp'},
)
assert fake_conf.get_force_bounce() == 'blurp'
def test_get_desired_state(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict={'desired_state': 'stop'},
)
assert fake_conf.get_desired_state() == 'stop'
def test_monitoring_blacklist_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_monitoring_blacklist(system_deploy_blacklist=[]) == []
def test_monitoring_blacklist_defaults_to_deploy_blacklist(self):
fake_deploy_blacklist = [("region", "fake_region")]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'deploy_blacklist': fake_deploy_blacklist},
branch_dict=None,
)
assert fake_conf.get_monitoring_blacklist(system_deploy_blacklist=[]) == fake_deploy_blacklist
def test_deploy_blacklist_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_deploy_blacklist() == []
def test_deploy_blacklist_reads_blacklist(self):
fake_deploy_blacklist = [("region", "fake_region")]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'deploy_blacklist': fake_deploy_blacklist},
branch_dict=None,
)
assert fake_conf.get_deploy_blacklist() == fake_deploy_blacklist
def test_extra_volumes_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_extra_volumes() == []
def test_extra_volumes_normal(self):
fake_extra_volumes: List[utils.DockerVolume] = [
{
"containerPath": "/etc/a",
"hostPath": "/var/data/a",
"mode": "RO",
},
]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'extra_volumes': fake_extra_volumes},
branch_dict=None,
)
assert fake_conf.get_extra_volumes() == fake_extra_volumes
def test_get_pool(self):
pool = "poolname"
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'pool': pool},
branch_dict=None,
)
assert fake_conf.get_pool() == pool
def test_get_pool_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_pool() == 'default'
def test_get_volumes_dedupes_correctly_when_mode_differs_last_wins(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = []
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
]
def test_get_volumes_dedupes_respects_hostpath(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/a", "hostPath": "/other_a", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [{"containerPath": "/a", "hostPath": "/a", "mode": "RO"}]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/a", "hostPath": "/other_a", "mode": "RO"},
]
def test_get_volumes_handles_dupes_everywhere(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/c", "hostPath": "/c", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/d", "hostPath": "/d", "mode": "RO"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/c", "hostPath": "/c", "mode": "RO"},
{"containerPath": "/d", "hostPath": "/d", "mode": "RO"},
]
def test_get_volumes_prefers_extra_volumes_over_system(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
]
def test_get_volumes_handles_dupes_with_trailing_slashes(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RO"},
]
# note: prefers extra_volumes over system_volumes
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
]
def test_get_volumes_preserves_trailing_slash(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a/", "hostPath": "/a/", "mode": "RW"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RW"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a/", "hostPath": "/a/", "mode": "RW"},
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RW"},
]
def test_get_docker_url_no_error(self):
fake_registry = "im.a-real.vm"
fake_image = "and-i-can-run:1.0"
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
with mock.patch(
'paasta_tools.utils.InstanceConfig.get_docker_registry', autospec=True,
return_value=fake_registry,
), mock.patch(
'paasta_tools.utils.InstanceConfig.get_docker_image', autospec=True,
return_value=fake_image,
):
expected_url = f"{fake_registry}/{fake_image}"
assert fake_conf.get_docker_url() == expected_url
@pytest.mark.parametrize(
('dependencies_reference', 'dependencies', 'expected'), [
(None, None, None),
('aaa', None, None),
('aaa', {}, None),
('aaa', {"aaa": [{"foo": "bar"}]}, {"foo": "bar"}),
('aaa', {"bbb": [{"foo": "bar"}]}, None),
],
)
def test_get_dependencies(self, dependencies_reference, dependencies, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'dependencies_reference': dependencies_reference,
'dependencies': dependencies,
},
branch_dict=None,
)
fake_conf.get_dependencies() == expected
@pytest.mark.parametrize(
('security', 'expected'), [
({}, None),
(None, None),
({"outbound_firewall": "monitor"}, 'monitor'),
({"outbound_firewall": "foo"}, 'foo'),
],
)
def test_get_outbound_firewall(self, security, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'security': security},
branch_dict=None,
)
fake_conf.get_outbound_firewall() == expected
@pytest.mark.parametrize(
('security', 'expected'), [
({}, (True, '')),
({"outbound_firewall": "monitor"}, (True, '')),
({"outbound_firewall": "block"}, (True, '')),
({"outbound_firewall": "foo"}, (False, 'Unrecognized outbound_firewall value "foo"')),
(
{"outbound_firewall": "monitor", "foo": 1},
(False, 'Unrecognized items in security dict of service config: "foo"'),
),
],
)
def test_check_security(self, security, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'security': security},
branch_dict=None,
)
assert fake_conf.check_security() == expected
@pytest.mark.parametrize(
('dependencies_reference', 'dependencies', 'expected'), [
(None, None, (True, '')),
('aaa', {"aaa": []}, (True, '')),
('aaa', None, (False, 'dependencies_reference "aaa" declared but no dependencies found')),
('aaa', {"bbb": []}, (False, 'dependencies_reference "aaa" not found in dependencies dictionary')),
],
)
def test_check_dependencies_reference(self, dependencies_reference, dependencies, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'dependencies_reference': dependencies_reference,
'dependencies': dependencies,
},
branch_dict=None,
)
assert fake_conf.check_dependencies_reference() == expected
def test_is_under_replicated_ok():
num_available = 1
expected_count = 1
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (False, float(100))
def test_is_under_replicated_zero():
num_available = 1
expected_count = 0
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (False, float(100))
def test_is_under_replicated_critical():
num_available = 0
expected_count = 1
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (True, float(0))
def test_deploy_blacklist_to_constraints():
fake_deploy_blacklist = [("region", "useast1-prod"), ("habitat", "fake_habitat")]
expected_constraints = [["region", "UNLIKE", "useast1-prod"], ["habitat", "UNLIKE", "fake_habitat"]]
actual = utils.deploy_blacklist_to_constraints(fake_deploy_blacklist)
assert actual == expected_constraints
def test_validate_service_instance_valid_marathon():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
my_service = 'service1'
my_instance = 'main'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[mock_marathon_services, mock_chronos_services],
) as get_services_for_cluster_patch:
assert utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
) == 'marathon'
assert mock.call(
cluster=fake_cluster,
instance_type='marathon',
soa_dir=fake_soa_dir,
) in get_services_for_cluster_patch.call_args_list
def test_validate_service_instance_valid_chronos():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
my_service = 'service1'
my_instance = 'worker'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[mock_marathon_services, mock_chronos_services],
) as get_services_for_cluster_patch:
assert utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
) == 'chronos'
assert mock.call(
cluster=fake_cluster,
instance_type='chronos',
soa_dir=fake_soa_dir,
) in get_services_for_cluster_patch.call_args_list
def test_validate_service_instance_invalid():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
mock_paasta_native_services = [('service1', 'main2'), ('service2', 'main2')]
mock_adhoc_services = [('service1', 'interactive'), ('service2', 'interactive')]
my_service = 'bad_service'
my_instance = 'main'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[
mock_marathon_services, mock_chronos_services,
mock_paasta_native_services, mock_adhoc_services,
],
):
with raises(utils.NoConfigurationForServiceError):
utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
)
def test_terminal_len():
assert len('some text') == utils.terminal_len(utils.PaastaColors.red('some text'))
def test_format_table():
actual = utils.format_table(
[
['looooong', 'y', 'z'],
['a', 'looooong', 'c'],
['j', 'k', 'looooong'],
],
)
expected = [
'looooong y z',
'a looooong c',
'j k looooong',
]
assert actual == expected
assert ["a b c"] == utils.format_table([['a', 'b', 'c']], min_spacing=5)
def test_format_table_with_interjected_lines():
actual = utils.format_table(
[
['looooong', 'y', 'z'],
'interjection',
['a', 'looooong', 'c'],
'unicode interjection',
['j', 'k', 'looooong'],
],
)
expected = [
'looooong y z',
'interjection',
'a looooong c',
'unicode interjection',
'j k looooong',
]
assert actual == expected
def test_format_table_all_strings():
actual = utils.format_table(['foo', 'bar', 'baz'])
expected = ['foo', 'bar', 'baz']
assert actual == expected
def test_parse_timestamp():
actual = utils.parse_timestamp('19700101T000000')
expected = datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0)
assert actual == expected
def test_null_log_writer():
"""Basic smoke test for NullLogWriter"""
lw = utils.NullLogWriter(driver='null')
lw.log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
class TestFileLogWriter:
def test_smoke(self):
"""Smoke test for FileLogWriter"""
fw = utils.FileLogWriter('/dev/null')
fw.log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
def test_format_path(self):
"""Test the path formatting for FileLogWriter"""
fw = utils.FileLogWriter("/logs/{service}/{component}/{level}/{cluster}/{instance}")
expected = "/logs/a/b/c/d/e"
assert expected == fw.format_path("a", "b", "c", "d", "e")
def test_maybe_flock(self):
"""Make sure we flock and unflock when flock=True"""
with mock.patch("paasta_tools.utils.fcntl", autospec=True) as mock_fcntl:
fw = utils.FileLogWriter("/dev/null", flock=True)
mock_file = mock.Mock()
with fw.maybe_flock(mock_file):
mock_fcntl.flock.assert_called_once_with(mock_file.fileno(), mock_fcntl.LOCK_EX)
mock_fcntl.flock.reset_mock()
mock_fcntl.flock.assert_called_once_with(mock_file.fileno(), mock_fcntl.LOCK_UN)
def test_maybe_flock_flock_false(self):
"""Make sure we don't flock/unflock when flock=False"""
with mock.patch("paasta_tools.utils.fcntl", autospec=True) as mock_fcntl:
fw = utils.FileLogWriter("/dev/null", flock=False)
mock_file = mock.Mock()
with fw.maybe_flock(mock_file):
assert mock_fcntl.flock.call_count == 0
assert mock_fcntl.flock.call_count == 0
def test_log_makes_exactly_one_write_call(self):
"""We want to make sure that log() makes exactly one call to write, since that's how we ensure atomicity."""
fake_file = mock.Mock()
fake_contextmgr = mock.Mock(
__enter__=lambda _self: fake_file,
__exit__=lambda _self, t, v, tb: None,
)
fake_line = "text" * 1000000
with mock.patch("paasta_tools.utils.io.FileIO", return_value=fake_contextmgr, autospec=True) as mock_FileIO:
fw = utils.FileLogWriter("/dev/null", flock=False)
with mock.patch("paasta_tools.utils.format_log_line", return_value=fake_line, autospec=True) as fake_fll:
fw.log("service", "line", "component", level="level", cluster="cluster", instance="instance")
fake_fll.assert_called_once_with("level", "cluster", "service", "instance", "component", "line")
mock_FileIO.assert_called_once_with("/dev/null", mode=fw.mode, closefd=True)
fake_file.write.assert_called_once_with(f"{fake_line}\n".encode('UTF-8'))
def test_write_raises_IOError(self):
fake_file = mock.Mock()
fake_file.write.side_effect = IOError("hurp durp")
fake_contextmgr = mock.Mock(
__enter__=lambda _self: fake_file,
__exit__=lambda _self, t, v, tb: None,
)
fake_line = "line"
with mock.patch(
"paasta_tools.utils.io.FileIO", return_value=fake_contextmgr, autospec=True,
), mock.patch(
"paasta_tools.utils.paasta_print", autospec=True,
) as mock_print, mock.patch(
"paasta_tools.utils.format_log_line", return_value=fake_line, autospec=True,
):
fw = utils.FileLogWriter("/dev/null", flock=False)
fw.log(
service="service",
line="line",
component="build",
level="level",
cluster="cluster",
instance="instance",
)
mock_print.assert_called_once_with(
mock.ANY,
file=sys.stderr,
)
# On python3, they merged IOError and OSError. Once paasta is fully py3, replace mock.ANY above with the OSError
# message below.
assert mock_print.call_args[0][0] in {
"Could not log to /dev/null: IOError: hurp durp -- would have logged: line\n",
"Could not log to /dev/null: OSError: hurp durp -- would have logged: line\n",
}
def test_deep_merge_dictionaries():
overrides = {
'common_key': 'value',
'common_dict': {
'subkey1': 1,
'subkey2': 2,
'subkey3': 3,
},
'just_in_overrides': 'value',
'just_in_overrides_dict': {'key': 'value'},
'overwriting_key': 'value',
'overwriting_dict': {'test': 'value'},
}
defaults = {
'common_key': 'overwritten_value',
'common_dict': {
'subkey1': 'overwritten_value',
'subkey4': 4,
'subkey5': 5,
},
'just_in_defaults': 'value',
'just_in_defaults_dict': {'key': 'value'},
'overwriting_key': {'overwritten-key', 'overwritten-value'},
'overwriting_dict': 'overwritten-value',
}
expected = {
'common_key': 'value',
'common_dict': {
'subkey1': 1,
'subkey2': 2,
'subkey3': 3,
'subkey4': 4,
'subkey5': 5,
},
'just_in_overrides': 'value',
'just_in_overrides_dict': {'key': 'value'},
'just_in_defaults': 'value',
'just_in_defaults_dict': {'key': 'value'},
'overwriting_key': 'value',
'overwriting_dict': {'test': 'value'},
}
assert utils.deep_merge_dictionaries(overrides, defaults, allow_duplicate_keys=True) == expected
def test_deep_merge_dictionaries_no_duplicate_keys_allowed():
# Nested dicts should be allowed
overrides = {
"nested": {
"a": "override",
},
}
defaults = {
"nested": {
"b": "default",
},
}
expected = {
"nested": {
"a": "override",
"b": "default",
},
}
assert utils.deep_merge_dictionaries(overrides, defaults, allow_duplicate_keys=True) == expected
del expected
overrides = {
"a": "override",
}
defaults = {
"a": "default",
}
with raises(utils.DuplicateKeyError):
utils.deep_merge_dictionaries(overrides, defaults, allow_duplicate_keys=False)
overrides = {
"nested": {
"a": "override",
},
}
defaults = {
"nested": {
"a": "default",
},
}
with raises(utils.DuplicateKeyError):
utils.deep_merge_dictionaries(overrides, defaults, allow_duplicate_keys=False)
def test_function_composition():
def func_one(count):
return count + 1
def func_two(count):
return count + 1
composed_func = utils.compose(func_one, func_two)
assert composed_func(0) == 2
def test_is_deploy_step():
assert utils.is_deploy_step('prod.main')
assert utils.is_deploy_step('thingy')
assert not utils.is_deploy_step('itest')
assert not utils.is_deploy_step('performance-check')
assert not utils.is_deploy_step('command-thingy')
def test_long_job_id_to_short_job_id():
assert utils.long_job_id_to_short_job_id('service.instance.git.config') == 'service.instance'
def test_mean():
iterable = [1.0, 2.0, 3.0]
assert utils.mean(iterable) == 2.0
def test_prompt_pick_one_happy():
with mock.patch(
'paasta_tools.utils.sys.stdin', autospec=True,
) as mock_stdin, mock.patch(
'paasta_tools.utils.choice.Menu', autospec=True,
) as mock_menu:
mock_stdin.isatty.return_value = True
mock_menu.return_value = mock.Mock(ask=mock.Mock(return_value='choiceA'))
assert utils.prompt_pick_one(['choiceA'], 'test') == 'choiceA'
def test_prompt_pick_one_quit():
with mock.patch(
'paasta_tools.utils.sys.stdin', autospec=True,
) as mock_stdin, mock.patch(
'paasta_tools.utils.choice.Menu', autospec=True,
) as mock_menu:
mock_stdin.isatty.return_value = True
mock_menu.return_value = mock.Mock(ask=mock.Mock(return_value=(None, 'quit')))
with raises(SystemExit):
utils.prompt_pick_one(['choiceA', 'choiceB'], 'test')
def test_prompt_pick_one_keyboard_interrupt():
with mock.patch(
'paasta_tools.utils.sys.stdin', autospec=True,
) as mock_stdin, mock.patch(
'paasta_tools.utils.choice.Menu', autospec=True,
) as mock_menu:
mock_stdin.isatty.return_value = True
mock_menu.return_value = mock.Mock(ask=mock.Mock(side_effect=KeyboardInterrupt))
with raises(SystemExit):
utils.prompt_pick_one(['choiceA', 'choiceB'], 'test')
def test_prompt_pick_one_eoferror():
with mock.patch(
'paasta_tools.utils.sys.stdin', autospec=True,
) as mock_stdin, mock.patch(
'paasta_tools.utils.choice.Menu', autospec=True,
) as mock_menu:
mock_stdin.isatty.return_value = True
mock_menu.return_value = mock.Mock(ask=mock.Mock(side_effect=EOFError))
with raises(SystemExit):
utils.prompt_pick_one(['choiceA', 'choiceB'], 'test')
def test_prompt_pick_one_exits_no_tty():
with mock.patch('paasta_tools.utils.sys.stdin', autospec=True) as mock_stdin:
mock_stdin.isatty.return_value = False
with raises(SystemExit):
utils.prompt_pick_one(['choiceA', 'choiceB'], 'test')
def test_prompt_pick_one_exits_no_choices():
with mock.patch('paasta_tools.utils.sys.stdin', autospec=True) as mock_stdin:
mock_stdin.isatty.return_value = True
with raises(SystemExit):
utils.prompt_pick_one([], 'test')
def test_get_code_sha_from_dockerurl():
fake_docker_url = 'docker-paasta.yelpcorp.com:443/services-cieye:paasta-93340779404579'
actual = utils.get_code_sha_from_dockerurl(fake_docker_url)
assert actual == 'git93340779'
# Useful mostly for integration tests, where we run busybox a lot.
assert utils.get_code_sha_from_dockerurl('docker.io/busybox') == 'gitbusybox'
@mock.patch("paasta_tools.utils.fcntl.flock", autospec=True, wraps=utils.fcntl.flock)
def test_flock(mock_flock, tmpdir):
my_file = tmpdir.join('my-file')
with open(str(my_file), 'w') as f:
with utils.flock(f):
mock_flock.assert_called_once_with(f.fileno(), utils.fcntl.LOCK_EX)
mock_flock.reset_mock()
mock_flock.assert_called_once_with(f.fileno(), utils.fcntl.LOCK_UN)
@mock.patch("paasta_tools.utils.Timeout", autospec=True)
@mock.patch("paasta_tools.utils.fcntl.flock", autospec=True, wraps=utils.fcntl.flock)
def test_timed_flock_ok(mock_flock, mock_timeout, tmpdir):
my_file = tmpdir.join('my-file')
with open(str(my_file), 'w') as f:
with utils.timed_flock(f, seconds=mock.sentinel.seconds):
mock_timeout.assert_called_once_with(seconds=mock.sentinel.seconds)
mock_flock.assert_called_once_with(f.fileno(), utils.fcntl.LOCK_EX)
mock_flock.reset_mock()
mock_flock.assert_called_once_with(f.fileno(), utils.fcntl.LOCK_UN)
@mock.patch("paasta_tools.utils.Timeout", autospec=True, side_effect=utils.TimeoutError('Oh noes'))
@mock.patch("paasta_tools.utils.fcntl.flock", autospec=True, wraps=utils.fcntl.flock)
def test_timed_flock_timeout(mock_flock, mock_timeout, tmpdir):
my_file = tmpdir.join('my-file')
with open(str(my_file), 'w') as f:
with pytest.raises(utils.TimeoutError):
with utils.timed_flock(f):
assert False # pragma: no cover
assert mock_flock.mock_calls == []
@mock.patch("paasta_tools.utils.fcntl.flock", autospec=True, wraps=utils.fcntl.flock)
def test_timed_flock_inner_timeout_ok(mock_flock, tmpdir):
# Doing something slow inside the 'with' context of timed_flock doesn't cause a timeout
# (the timeout should only apply to the flock operation itself)
my_file = tmpdir.join('my-file')
with open(str(my_file), 'w') as f:
with utils.timed_flock(f, seconds=1):
time.true_slow_sleep(0.1)
assert mock_flock.mock_calls == [
mock.call(f.fileno(), utils.fcntl.LOCK_EX),
mock.call(f.fileno(), utils.fcntl.LOCK_UN),
]
|
# s3 production content bucket information
ACCESS_KEY_ID = 'ACCESS_KEY_ID'
ACCESS_KEY_SECRET = 'ACCESS_KEY_SECRET'
# raise BotoClientError("Bucket names cannot contain upper-case " \
#boto.exception.BotoClientError: BotoClientError: Bucket names cannot contain upp
#er-case characters when using either the sub-domain or virtual hosting calling f
#ormat.
BUCKET_NAME= 'bucket_name-pass-portal-content-prod'
ORIGINAL_DIRECTORY = 'original'
COMPRESS_DIRECTORY = 'compress'
TRIAL_RUN = False
START_FROM = 99765
|
import time, pyaudio,string,serial
from multiprocessing import Process
import requests
import getmac
import pynmea2
import speech_recognition as sr
import Adafruit_DHT
from gtts import gTTS as s2t
import json,os
# DHT22
SERVER_IP = '192.168.0.104'
DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 4
API_URL = 'http://' + SERVER_IP
SEND_DATA = '/insertData'
RAISE_ALERT = '/raiseAlert'
GET_ALERTS = '/getalerts'
# NEO 6M GPS
PORT = '/dev/ttyS0'
SERIAL = serial.Serial(PORT,baudrate=9600,timeout=0.5)
# Speech Recognition
RECOGNIZER = sr.Recognizer()
def getData_NEO6M():
serialRow = SERIAL.readline().decode('utf-8')
if (serialRow[0:6] == '$GPRMC'):
parsedSerialRow = pynmea2.parse(serialRow)
return {
'latitude' : parsedSerialRow.latitude,
'longitude': parsedSerialRow.longitude
}
def getData_DHT22():
h,t = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
return {'temperature':t,'humidity':h}
def parallelRun(*procs):
thread = []
for process in procs:
p = Process(target=process).start()
thread.append(p)
for p in thread:
p.join()
def p1(): # Data aquire , server communication
idx = 0
postData = {
'device_key' : getmac.get_mac_address(),
'data' : {
'temperature': None,
'humidity': None,
'latitude': None,
'longitude': None
}
}
while True:
print("Data aquire process active !")
tmp = getData_DHT22()
gps = getData_NEO6M()
print(gps)
postData['data']['temperature'] = tmp['temperature']
postData['data']['humidity'] = tmp['humidity']
try:
postData['data']['latitude'] = gps['latitude']
postData['data']['longitude'] = gps['longitude']
except:
pass
requests.post(API_URL+SEND_DATA,json=postData) # Send data and reset for next data set
response = requests.get(API_URL+GET_ALERTS)
content = response.content.decode('utf-8')
print(content)
content = json.loads(content)
for item in content['data']:
f = s2t(item['desc'],lang='ro')
f.save(item['cod']+'.mp3')
os.system('mpg321 -a plughw ' + item['cod'] +'.mp3')
time.sleep(60) # Acquire data every 60 seconds, send them every 10 minutes
def p2(): # Voice managing
try:
with sr.Microphone as ainput:
while True:
RECOGNIZER.adjust_for_ambient_noise(ainput,duration=5)
voiceSignal = RECOGNIZER.listen(ainput)
message = RECOGNIZER.recognize_google(voiceSignal,language='ro-RO')
if (message == 'ajutor'):
bodyMessage = {
'device_key': getmac.get_mac_address(),
}
requests.post(API_URL+RAISE_ALERT,json=bodyMessage)
except:
# Raise no mic detected alert
pass
if __name__ == '__main__':
parallelRun(p1,p2)
|
# -*- coding: utf-8 -*-
"""
@Author: Shaoweihua.Liu
@Contact: liushaoweihua@126.com
@Site: github.com/liushaoweihua
@File: __init__.py1.py
@Time: 2020/3/2 10:54 AM
"""
# Codes come from <bert-as-service>:
# Author: Han Xiao
# Github: https://hanxiao.github.io
# Email: artex.xh@gmail.com
# Version: 1.10.0
__all__ = ['_py2', '_str', '_buffer', '_raise']
_py2 = False
_str = str
_buffer = memoryview
def _raise(t_e, _e):
raise t_e from _e
|
"""
Code for initialising and calling functionality for performing cross-validation
"""
import argparse
import train_and_evaluate_model
import active_learning_preannotation
import vectorize_data
import os
import importlib
def do_cross_validation(parser):
properties_main, path_slash_format, path_dot_format = active_learning_preannotation.load_properties(parser)
word2vecwrapper = vectorize_data.Word2vecWrapper(properties_main.model_path, properties_main.semantic_vector_length)
CROSS_VALIDATION_SETTINGS = "cross_validation_settings"
if not os.path.exists(os.path.join(path_slash_format, CROSS_VALIDATION_SETTINGS + ".py")):
print("The directory '" + str(path_slash_format) + "' does not have a " + CROSS_VALIDATION_SETTINGS + ".py file.")
exit(1)
properties_cross_validation = importlib.import_module(path_dot_format + "." + CROSS_VALIDATION_SETTINGS)
properties_container_cross_validation = CrossValidationPropertiesContainer(properties_cross_validation)
for c_value in properties_container_cross_validation.c_values:
for whether_to_use_word2vec in properties_container_cross_validation.whether_to_use_word2vec:
for whether_to_use_clustering in properties_container_cross_validation.whether_to_use_clustering:
properties_main.c_value = c_value
properties_main.whether_to_use_word2vec = whether_to_use_word2vec
properties_main.whether_to_use_clustering = whether_to_use_clustering
train_and_evaluate_model.train_and_evaluate_model_cross_validation(properties_main, path_slash_format, word2vecwrapper, properties_container_cross_validation)
class CrossValidationPropertiesContainer:
"""
CrossValidationPropertiesContainer
A container for properties specific to cross validation
"""
def __init__(self, properties):
"""
:params properties: a python model including properties (retrieved by importlib.import_module(<PATH>))
"""
try:
self.nr_of_cross_validation_splits_for_evaluation = properties.nr_of_cross_validation_splits_for_evaluation
except AttributeError:
print("Cross-validation settings file lacks the property 'nr_of_cross_validation_splits_for_evaluation'.")
exit(1)
try:
self.c_values = properties.c_values
except AttributeError:
print("Cross-validation settings file lacks the property 'c_values'.")
exit(1)
try:
self.whether_to_use_word2vec = properties.whether_to_use_word2vec
except AttributeError:
print("Cross-validation settings file lacks the property 'whether_to_use_word2vec'.")
exit(1)
try:
self.whether_to_use_clustering = properties.whether_to_use_clustering
except AttributeError:
print("Cross-validation settings file lacks the property 'whether_to_use_clustering'.")
exit(1)
try:
self.evaluation_output_dir = properties.evaluation_output_dir
except AttributeError:
print("Cross-validation settings file lacks the property 'evaluation_output_dir'.")
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
do_cross_validation(parser)
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-01-12 10:09:56
# @Last Modified by: 何睿
# @Last Modified time: 2019-01-12 12:05:39
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return None
count = 1
slow, fast = head, head.next
while fast and fast.next:
# 找到第一次相遇的节点
if fast == slow:
# 确定环中节点的个数
fast = fast.next
while fast and fast.next:
# 如果fast指针和slow指针相遇,说明环已经走完一趟
# 结束循环
if fast == slow:
break
# 否则count自增一次
count += 1
slow, fast = slow.next, fast.next.next
# 重置slow和fast只想首节点
slow, fast = head, head
# 将fast往后移count-1个(即环中节点个数少一个节点)
for _ in range(count - 1):
fast = fast.next
while fast:
# 当fast.next 指向slow时,说明已经到达结尾
if fast.next == slow:
return slow
fast = fast.next
slow = slow.next
return slow
slow, fast = slow.next, fast.next.next
# 如果遍历的所有节点fast和slow都不等,说明没有环
return None
|
""" Automated celery tasks that use the NewsAPI to retrive top headline articles """
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from random import shuffle
from celery import shared_task
from django.utils import timezone
from newsapi import NewsApiClient
from env import get_env_value
from .categories import categories
from .models import Article
n_article_request = 100
article_db_limit = 7500
@shared_task
def refresh_articles():
""" Refresh database with newest articles after a datetime cutoff,
tagging each with their category. """
datetime_cutoff = get_datetime_cutoff()
articles = []
newsapi = NewsApiClient(get_env_value('NEWS_API_KEY'))
for category in categories:
response = newsapi.get_top_headlines(
country='us', category=category, page_size=n_article_request)
if response['status'] != 'ok':
print_error(response)
continue
all_response_articles = response['articles']
# Filter out old articles to avoid saving duplicates to database
new_articles = []
for article in all_response_articles:
article_datetime = get_article_datetime(article)
if article_datetime > datetime_cutoff:
article['category'] = category
new_articles.append(article)
articles += new_articles
for article in articles:
save_article(article)
@shared_task
def delete_oldest_articles():
""" Our free hosting plan limits our data storage to 10,000 records
Therefore, we must periodically remove the oldest articles from our database
to accomodate this limitation
"""
articles = Article.objects.all().order_by('-pub_date')[article_db_limit:]
for article in articles:
article.delete()
def print_error(response):
print(response['code'])
print(response['message'])
def get_datetime_cutoff():
""" Get datetime of last published article, so we know which articles are new and which
have already been saved to database. """
ordered_articles = Article.objects.all().order_by('-pub_date')
# If database is empty, start cutoff to this morning
if not ordered_articles:
return datetime.today().replace(hour=0, minute=0, second=0, tzinfo=timezone.utc)
else:
return ordered_articles[0].pub_date.replace(tzinfo=timezone.utc)
def get_article_datetime(article):
datetime_str = article['publishedAt']
return datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc)
def request_articles(newsapi, category):
response = newsapi.get_top_headlines(
country='us', category=category, page_size=n_articles)
if response['status'] != 'ok':
return
return response['articles']
def save_article(article):
article_data = extract_article_data(article)
a = Article(**article_data)
a.save()
def extract_article_data(article):
source = article['source']['name']
title = article['title']
description = article['description']
url = article['url']
url_to_image = article['urlToImage']
pub_date = article['publishedAt']
category = article['category']
return {'source': source, 'title': title, 'description': description,
'url': url, 'url_to_image': url_to_image, 'pub_date': pub_date,
'category': category}
|
from scvi.data import get_from_registry
from celligner2.metrics.metrics import entropy_batch_mixing, knn_purity, asw, nmi
from celligner2.othermodels import SCVI, SCANVI, TOTALVI
from scipy.sparse import issparse
import numpy as np
import scanpy as sc
import torch
from typing import Union, Optional
from sklearn.metrics import f1_score
import anndata
import matplotlib.pyplot as plt
sc.settings.set_figure_params(dpi=200, frameon=False)
torch.set_printoptions(precision=3, sci_mode=False, edgeitems=7)
np.set_printoptions(precision=2, edgeitems=7)
class SCVI_EVAL:
def __init__(
self,
model: Union[SCVI, SCANVI, TOTALVI],
adata: anndata.AnnData,
trainer: Optional['Trainer'] = None,
cell_type_key: str = None,
batch_key: str = None,
):
self.outer_model = model
self.model = model.model
self.model.eval()
if trainer is None:
self.trainer = model.trainer
else:
self.trainer = trainer
self.adata = adata
self.modified = getattr(model.model, 'encode_covariates', True)
self.annotated = type(model) is SCANVI
self.predictions = None
self.certainty = None
self.prediction_names = None
self.class_check = None
self.post_adata_2 = None
if trainer is not None:
if self.trainer.use_cuda:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
else:
self.device = next(self.model.parameters()).get_device()
if issparse(self.adata.X):
X = self.adata.X.toarray()
else:
X = self.adata.X
self.x_tensor = torch.tensor(X, device=self.device)
self.labels = None
self.label_tensor = None
if self.annotated:
self.labels = get_from_registry(self.adata, "labels").astype(np.int8)
self.label_tensor = torch.tensor(self.labels, device=self.device)
self.cell_types = self.adata.obs[cell_type_key].tolist()
self.batch_indices = get_from_registry(self.adata, "batch_indices").astype(np.int8)
self.batch_tensor = torch.tensor(self.batch_indices, device=self.device)
self.batch_names = self.adata.obs[batch_key].tolist()
self.celltype_enc = [0]*len(self.adata.obs[cell_type_key].unique().tolist())
for i, cell_type in enumerate(self.adata.obs[cell_type_key].unique().tolist()):
label = self.adata.obs['_scvi_labels'].unique().tolist()[i]
self.celltype_enc[label] = cell_type
self.post_adata = self.latent_as_anndata()
def latent_as_anndata(self):
if type(self.outer_model) is TOTALVI:
latent = self.outer_model.get_latent_representation(self.adata)
else:
if self.modified:
latents = self.model.sample_from_posterior_z(
self.x_tensor,
y=self.label_tensor,
batch_index=self.batch_tensor
)
else:
latents = self.model.sample_from_posterior_z(
self.x_tensor,
y=self.label_tensor,
)
if self.annotated:
latent = latents.cpu().detach().numpy()
latent2, _, _ = self.model.encoder_z2_z1(latents, self.label_tensor)
latent2 = latent2.cpu().detach().numpy()
post_adata_2 = sc.AnnData(latent2)
post_adata_2.obs['cell_type'] = self.cell_types
post_adata_2.obs['batch'] = self.batch_names
self.post_adata_2 = post_adata_2
else:
latent = latents.cpu().detach().numpy()
post_adata = sc.AnnData(latent)
post_adata.obs['cell_type'] = self.cell_types
post_adata.obs['batch'] = self.batch_names
return post_adata
def get_model_arch(self):
for name, p in self.model.named_parameters():
print(name, " - ", p.size(0), p.size(-1))
def plot_latent(self,
show=True,
save=False,
dir_path=None,
n_neighbors=8,
predictions=False,
in_one=False,
colors=None):
"""
if save:
if dir_path is None:
name = 'scanvi_latent.png'
else:
name = f'{dir_path}.png'
else:
name = False
"""
if self.model is None:
print("Not possible if no model is provided")
return
if save:
show = False
if dir_path is None:
dir_path = 'scanvi_latent'
sc.pp.neighbors(self.post_adata, n_neighbors=n_neighbors)
sc.tl.umap(self.post_adata)
if in_one:
color = ['cell_type', 'batch']
if predictions:
color.append(['certainty', 'predictions', 'type_check'])
sc.pl.umap(self.post_adata,
color=color,
ncols=2,
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_complete.png' if dir_path else None)
else:
sc.pl.umap(self.post_adata,
color=['cell_type'],
frameon=False,
wspace=0.6,
show=show,
palette=colors,
save=f'{dir_path}_celltypes.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['batch'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_batch.png' if dir_path else None)
if predictions:
sc.pl.umap(self.post_adata,
color=['predictions'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_predictions.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['certainty'],
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_certainty.png' if dir_path else None)
sc.pl.umap(self.post_adata,
color=['type_check'],
ncols=2,
frameon=False,
wspace=0.6,
show=show,
save=f'{dir_path}_type_check.png' if dir_path else None)
def plot_history(self, show=True, save=False, dir_path=None):
if self.trainer is None:
print("Not possible if no trainer is provided")
return
if self.annotated:
fig, axs = plt.subplots(2, 1)
elbo_full = self.trainer.history["elbo_full_dataset"]
x_1 = np.linspace(0, len(elbo_full), len(elbo_full))
axs[0].plot(x_1, elbo_full, label="Full")
accuracy_labelled_set = self.trainer.history["accuracy_labelled_set"]
accuracy_unlabelled_set = self.trainer.history["accuracy_unlabelled_set"]
if len(accuracy_labelled_set) != 0:
x_2 = np.linspace(0, len(accuracy_labelled_set), (len(accuracy_labelled_set)))
axs[1].plot(x_2, accuracy_labelled_set, label="accuracy labelled")
if len(accuracy_unlabelled_set) != 0:
x_3 = np.linspace(0, len(accuracy_unlabelled_set), (len(accuracy_unlabelled_set)))
axs[1].plot(x_3, accuracy_unlabelled_set, label="accuracy unlabelled")
axs[0].set_xlabel('Epochs')
axs[0].set_ylabel('ELBO')
axs[1].set_xlabel('Epochs')
axs[1].set_ylabel('Accuracy')
axs[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
axs[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
if save:
if dir_path is None:
plt.savefig('scanvi_history.png', bbox_inches='tight')
else:
plt.savefig(f'{dir_path}.png', bbox_inches='tight')
if show:
plt.show()
else:
fig = plt.figure()
elbo_train = self.trainer.history["elbo_train_set"]
elbo_test = self.trainer.history["elbo_test_set"]
x = np.linspace(0, len(elbo_train), len(elbo_train))
plt.plot(x, elbo_train, label="train")
plt.plot(x, elbo_test, label="test")
plt.ylim(min(elbo_train) - 50, min(elbo_train) + 1000)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
if save:
if dir_path is None:
plt.savefig('scvi_history.png', bbox_inches='tight')
else:
plt.savefig(f'{dir_path}.png', bbox_inches='tight')
if show:
plt.show()
def get_ebm(self, n_neighbors=50, n_pools=50, n_samples_per_pool=100, verbose=True):
ebm_score = entropy_batch_mixing(
adata=self.post_adata,
label_key='batch',
n_neighbors=n_neighbors,
n_pools=n_pools,
n_samples_per_pool=n_samples_per_pool
)
if verbose:
print("Entropy of Batchmixing-Score:", ebm_score)
return ebm_score
def get_knn_purity(self, n_neighbors=50, verbose=True):
knn_score = knn_purity(
adata=self.post_adata,
label_key='cell_type',
n_neighbors=n_neighbors
)
if verbose:
print("KNN Purity-Score:", knn_score)
return knn_score
def get_asw(self):
asw_score_batch, asw_score_cell_types = asw(adata=self.post_adata, label_key='cell_type',batch_key='batch')
print("ASW on batch:", asw_score_batch)
print("ASW on celltypes:", asw_score_cell_types)
return asw_score_batch, asw_score_cell_types
def get_nmi(self):
nmi_score = nmi(adata=self.post_adata, label_key='cell_type')
print("NMI score:", nmi_score)
return nmi_score
def get_latent_score(self):
ebm = self.get_ebm(verbose=False)
knn = self.get_knn_purity(verbose=False)
score = ebm + knn
print("Latent-Space Score (KNN + EBM):", score)
return score
def get_classification_accuracy(self):
if self.annotated:
if self.modified:
softmax = self.model.classify(self.x_tensor, batch_index=self.batch_tensor)
else:
softmax = self.model.classify(self.x_tensor)
softmax = softmax.cpu().detach().numpy()
self.predictions = np.argmax(softmax, axis=1)
self.certainty = np.max(softmax, axis=1)
self.prediction_names = [0]*self.predictions.shape[0]
for index, label in np.ndenumerate(self.predictions):
self.prediction_names[index[0]] = self.celltype_enc[label]
self.class_check = np.array(np.expand_dims(self.predictions, axis=1) == self.labels)
class_check_labels = [0] * self.class_check.shape[0]
for index, check in np.ndenumerate(self.class_check):
class_check_labels[index[0]] = 'Correct' if check else 'Incorrect'
accuracy = np.sum(self.class_check) / self.class_check.shape[0]
self.post_adata.obs['certainty'] = self.certainty
self.post_adata.obs['type_check'] = class_check_labels
self.post_adata.obs['predictions'] = self.prediction_names
print("Classification Accuracy: %0.2f" % accuracy)
return accuracy
else:
print("Classification ratio not available for scVI models")
def get_f1_score(self):
if self.annotated:
if self.modified:
predictions = self.model.classify(self.x_tensor, batch_index=self.batch_tensor)
else:
predictions = self.model.classify(self.x_tensor)
self.predictions = predictions.cpu().detach().numpy()
self.predictions = np.expand_dims(np.argmax(self.predictions, axis=1), axis=1)
score = f1_score(self.labels, self.predictions, average='macro')
print("F1 Score: %0.2f" % score)
return score
else:
print("F1 Score not available for scVI models")
|
import traci
import subprocess
import sys
import os
from sumolib import checkBinary
import time
if __name__ == '__main__':
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools"))
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in doc
except ImportError:
sys.exit("please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
sumoBinary = checkBinary('sumo-gui')
sumoProcess = subprocess.Popen([sumoBinary, "-c", "../prueba.sumocfg", "--remote-port", "2081", "-l", "prueba_log"], stdout=sys.stdout, stderr=sys.stderr)
traci.init(2081)
while traci.simulation.getMinExpectedNumber() > 0:
print ("time: " + str(traci.simulation.getCurrentTime()/1000))
option=raw_input("Opcion: ")
if option=="c":
traci.simulationStep()
elif option=="i":
id=raw_input("ID del vehiculo: ")
print ("Ruta prevista: " + str(traci.vehicle.getRoute(id)) + "\n")
edge=raw_input("edge de la ruta: ")
edge_list=[]
while edge<>"f":
edge_list.append(edge)
edge=raw_input("edge de la ruta: ")
traci.vehicle.setRoute(id, edge_list)
print (traci.vehicle.getRoute(id))
print "Fin de la simulacion"
|
'''
Created on Oct 28, 2020
Define the Config, Parser, and ParserFactory objects for reading and storing
GaMD simulation parameters.
@author: lvotapka
'''
from __future__ import absolute_import
import xml.etree.ElementTree as ET
from xml.dom import minidom
from abc import ABCMeta, ABC
from abc import abstractmethod
from simtk import unit
from gamd import config
def strBool(bool_str):
"""
Take the string "true" or "false" of any case and returns a
boolean object.
"""
if bool_str.lower() == "true":
return True
elif bool_str.lower() == "false":
return False
else:
raise Exception(
"argument for strBool must be string either 'True' or 'False'.")
class Parser:
def __init__(self):
self.config = config.Config()
def assign_value(self, value, func, useunit=None):
if value is not None:
if useunit is None:
return func(value)
else:
return unit.Quantity(func(value), useunit)
else:
return None
@abstractmethod
def parse_file(self, filename):
raise NotImplementedError("must implement parse_file")
class XmlParser(Parser):
def __init__(self):
super(XmlParser, self).__init__()
def assign_tag(self, tag, func, useunit=None):
if tag is not None:
return self.assign_value(tag.text, func, useunit)
else:
return None
def parse_file(self, filename):
tree = ET.parse(filename)
root = tree.getroot()
for tag in root:
if tag.tag == "system_files":
xml_system_files_type_text = tag.find("type").text.lower()
if xml_system_files_type_text == "amber":
amber_config = config.AmberConfig()
for amber_tag in tag:
if amber_tag.tag == "prmtop_filename":
amber_config.prmtop_filename = \
self.assign_tag(amber_tag, str)
elif amber_tag.tag == "inpcrd_filename":
amber_config.inpcrd_filename = \
self.assign_tag(amber_tag, str)
elif amber_tag.tag == \
"load_box_vectors_from_coordinates_file":
amber_config.load_box_vecs_from_coords_file = \
self.assign_tag(amber_tag, strBool)
elif amber_tag.tag == "type":
pass
else:
print("Warning: parameter in XML not found in "\
"amber_config. Spelling error?",
amber_tag.tag)
self.config.system_files_config = amber_config
elif xml_system_files_type_text == "charmm":
charmm_config = config.CharmmConfig()
for charmm_tag in tag:
if charmm_tag.tag == "psf_filename":
charmm_config.psf_filename = \
self.assign_tag(charmm_tag, str)
elif charmm_tag.tag == "pdb_filename":
charmm_config.pdb_filename = \
self.assign_tag(charmm_tag, str)
elif charmm_tag.tag == \
"params_filenames":
charmm_config.params_filenames = []
for xml_params_filename in charmm_tag:
charmm_config.params_filenames.append(
self.assign_tag(xml_params_filename, str))
elif charmm_tag.tag == "type":
pass
else:
print("Warning: parameter in XML not found in "\
"charmm_config. Spelling error?",
charmm_tag.tag)
self.config.system_files_config = charmm_config
elif xml_system_files_type_text == "gromacs":
gromacs_config = config.GromacsConfig()
for gro_tag in tag:
if gro_tag.tag == "gro_filename":
gromacs_config.gro_filename = \
self.assign_tag(gro_tag, str)
elif gro_tag.tag == "top_filename":
gromacs_config.top_filename = \
self.assign_tag(gro_tag, str)
elif gro_tag.tag == "include_dir":
gromacs_config.include_dir = \
self.assign_tag(gro_tag, str)
elif gro_tag.tag == "type":
pass
else:
print("Warning: parameter in XML not found in "\
"gromacs_config. Spelling error?",
gro_tag.tag)
self.config.system_files_config = gromacs_config
elif xml_system_files_type_text == "forcefield":
forcefield_config = config.ForceFieldConfig()
for forcefield_tag in tag:
if forcefield_tag.tag == "pdb_filename":
forcefield_config.pdb_filename = \
self.assign_tag(forcefield_tag, str)
elif forcefield_tag.tag == \
"forcefield_filenames":
forcefield_config.forcefield_filenames = []
for forcefield_filename in forcefield_tag:
forcefield_config.forcefield_filenames.append(
self.assign_tag(forcefield_filename, str))
elif forcefield_config.tag == "type":
pass
else:
print("Warning: parameter in XML not found in "\
"charmm_config. Spelling error?",
forcefield_config.tag)
self.config.system_files_config = forcefield_config
else:
raise Exception("system_files type not implemented:",
xml_system_files_type_text)
elif tag.tag == "box_vectors":
config.box_vectors = config.deserialize_box_vectors(tag)
elif tag.tag == "output_directory":
self.config.output_directory = self.assign_tag(tag, str)
elif tag.tag == "overwrite_output":
self.config.overwrite_output = self.assign_tag(tag, strBool)
elif tag.tag == "chunk_size":
self.config.chunk_size = self.assign_tag(tag, int)
elif tag.tag == "nonbonded_method":
self.config.nonbonded_method = \
self.assign_tag(tag, str).lower()
elif tag.tag == "nonbonded_cutoff":
self.config.nonbonded_cutoff = self.assign_tag(
tag, float, useunit=unit.nanometer)
elif tag.tag == "constraints":
self.config.constraints = self.assign_tag(tag, str).lower()
elif tag.tag == "integrator_type":
self.config.integrator_type = self.assign_tag(tag, str).lower()
elif tag.tag == "friction_coefficient":
self.config.friction_coefficient = self.assign_tag(
tag, float, useunit=unit.picosecond**-1)
elif tag.tag == "target_temperature":
self.config.target_temperature = self.assign_tag(
tag, float, useunit=unit.kelvin)
elif tag.tag == "random_seed":
self.config.random_seed = self.assign_tag(tag, int)
elif tag.tag == "dt":
self.config.dt = self.assign_tag(
tag, float, useunit=unit.picosecond)
elif tag.tag == "use_barostat":
self.config.use_barostat = self.assign_tag(tag, strBool)
elif tag.tag == "barostat_target_pressure":
self.config.barostat_target_pressure = self.assign_tag(
tag, float, useunit=unit.bar)
elif tag.tag == "barostat_target_temperature":
self.config.barostat_target_temperature = self.assign_tag(
tag, float, useunit=unit.kelvin)
elif tag.tag == "barostat_frequency":
self.config.barostat_frequency = self.assign_tag(tag, int)
elif tag.tag == "run_minimization":
self.config.run_minimization = self.assign_tag(tag, strBool)
elif tag.tag == "initial_temperature":
self.config.initial_temperature = self.assign_tag(
tag, float, useunit=unit.kelvin)
elif tag.tag == "energy_reporter_frequency":
self.config.energy_reporter_frequency = self.assign_tag(
tag, int)
elif tag.tag == "coordinates_reporter_frequency":
self.config.coordinates_reporter_frequency = self.assign_tag(
tag, int)
elif tag.tag == "coordinates_reporter_file_type":
self.config.coordinates_reporter_file_type = self.assign_tag(
tag, str).lower()
elif tag.tag == "gamd_bound":
self.config.gamd_bound = self.assign_tag(tag, str).lower()
elif tag.tag == "total_simulation_length":
self.config.total_simulation_length = self.assign_tag(tag, int)
elif tag.tag == "total_boost":
self.config.total_boost = self.assign_tag(tag, strBool)
elif tag.tag == "total_boost_sigma0":
self.config.total_boost_sigma0 = self.assign_tag(
tag, float, useunit=unit.kilocalories_per_mole)
elif tag.tag == "dihedral_boost":
self.config.dihedral_boost = self.assign_tag(tag, strBool)
elif tag.tag == "dihedral_boost_sigma0":
self.config.dihedral_boost_sigma0 = self.assign_tag(
tag, float, useunit=unit.kilocalories_per_mole)
elif tag.tag == "num_steps_conventional_md":
self.config.num_steps_conventional_md = self.assign_tag(
tag, int)
elif tag.tag == "num_steps_conventional_md_prep":
self.config.num_steps_conventional_md_prep = self.assign_tag(
tag, int)
elif tag.tag == "num_steps_per_averaging":
self.config.num_steps_per_averaging = self.assign_tag(
tag, int)
elif tag.tag == "num_steps_gamd_equilibration":
self.config.num_steps_gamd_equilibration = self.assign_tag(
tag, int)
elif tag.tag == "num_steps_gamd_equilibration_prep":
self.config.num_steps_gamd_equilibration_prep = self.assign_tag(
tag, int)
elif tag.tag == "restart_checkpoint_filename":
self.config.restart_checkpoint_filename = self.assign_tag(
tag, str)
elif tag.tag == "restart_checkpoint_frequency":
self.config.restart_checkpoint_frequency = self.assign_tag(
tag, int)
else:
print("Warning: parameter in XML not found in config. "\
"Spelling error?", tag.tag)
class ParserFactory:
def __init__(self):
return
def parse_file(self, input_file, input_type):
input_type = input_type.lower()
if input_type == "xml":
myparser = XmlParser()
myparser.parse_file(input_file)
config = myparser.config
else:
raise Exception("input type not implemented: %s", input_type)
return config
if __name__ == "__main__":
myparser = XmlParser()
myparser.parse_file("/tmp/gamdconfig.xml")
|
from dataclasses import dataclass
@dataclass
class Options:
"""List of configurable options of the `flake8-too-many` plugin."""
ignore_defaulted_arguments: bool
max_function_arguments: int
max_function_return_stmts: int
max_function_return_values: int
max_unpacking_targets: int
|
# this file is originally in exp_3_debiased folder
# plot predict dense map with true force point.
# show the changes of learning process from 1 point to 10 points
import numpy as np
import autosklearn.regression
import sklearn.model_selection
import sklearn.datasets
import sklearn.metrics
import scipy
import pickle
import random
import matplotlib.pyplot as plt
from train_config import *
from data_loader import normalize_points
import glob
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 100
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (6,8)
import time
def load_data(point_path, force_path, probe_type='point', datatype='1'):
points=[]
colors=[]
normals=[]
curvatures=[]
dataFile=open(point_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
points.append(l2[0:3])
colors.append(l2[0:3])
normals.append(l2[6:9])
curvatures.append(l2[9])
dataFile.close()
# normalize, note colors and normals is 0~1
points = np.array(points)
colors = np.array(colors)
normals = np.array(normals)
curvatures = np.array(curvatures)
max_range = max([ (np.max(points[:,0])-np.min(points[:,0])) , (np.max(points[:,1])-np.min(points[:,1])) , (np.max(points[:,2])-np.min(points[:,2])) ])
for i in range(3):
points[:,i] = (points[:,i]-np.min(points[:,i]))/max_range
num_point = len(points)
print('[*]load %d points, and normalized'%num_point)
'''
X = np.array([[]])
Y = np.array([[]])
insert_i = 0
'''
X=[]
Y=[]
for i in range(num_point):
force_path = './'+probe_type+'/force_'+str(i)+'.txt'
force=[]
force_normal=[]
displacement=[]
theta=[]
dataFile=open(force_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
if probe_type == 'point':
force.append(l2[0:3])
force_normal.append(l2[3])
displacement.append(l2[4])
else:
force.append(l2[0:3])
force_normal.append(l2[3])
displacement.append(l2[4])
theta.append(l2[5:7])
dataFile.close()
# clean
#TODO:
# final
if probe_type == 'point':
num_dis = len(displacement)
#print('---load %d displacement'%num_dis)
displacement = np.resize(np.array(displacement),(num_dis,1))
X_i = np.hstack((np.tile(points[i],(num_dis,1)), displacement))
Y_i = np.array(force_normal,ndmin=2).T
'''
if insert_i == 0:
X=X_i
Y=Y_i
else:
X = np.vstack((X,X_i))
Y = np.vstack((Y,Y_i))
insert_i = insert_i + 1
'''
X.append(X_i)
Y.append(Y_i)
return X,Y
def my_train_test_split(X,y,num_point=1,train_size=0.8,select_method='random'):
num_point = len(X)
if select_method=='random':
train_index = random.sample(range(num_point),int(train_size*num_point))
test_index = [x for x in range(num_point) if x not in train_index]
elif select_method=='uniform':
train_index = [int(i*(1.0/train_size)) for i in range(int(train_size*num_point))]
test_index = [x for x in range(num_point) if x not in train_index]
flag = 0
for i in train_index:
if flag==0:
X_train = X[i]
y_train = y[i]
flag = 1
else:
X_train = np.vstack((X_train,X[i]))
y_train = np.vstack((y_train,y[i]))
flag = 0
for i in test_index:
if flag==0:
X_test = X[i]
y_test = y[i]
flag = 1
else:
X_test = np.vstack((X_test,X[i]))
y_test = np.vstack((y_test,y[i]))
return X_train, X_test, y_train, y_test
def my_train_test_split2(X,y,num_point=1,train_size=0.8,use_all=False):
num_point = len(X)
train_index = random.sample(range(num_point),int(train_size*num_point))
test_index = [x for x in range(num_point) if x not in train_index]
flag = 0
for i in train_index:
if flag==0:
X_train = X[i]
y_train = y[i]
flag = 1
else:
X_train = np.vstack((X_train,X[i]))
y_train = np.vstack((y_train,y[i]))
if use_all == False:
flag = 0
for i in test_index:
if flag==0:
X_test = X[i]
y_test = y[i]
flag = 1
else:
X_test = np.vstack((X_test,X[i]))
y_test = np.vstack((y_test,y[i]))
if use_all == False:
return X_train, X_test, y_train, y_test
else:
return X_train, y_train
def load_pcd(path, pcdtype='xyzrgbn'):
points=[]
normals=[]
normal_theta=[]
theta=[]
pt_index=[]
lines=[]
dataFile=open(path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
lines.append(l2)
points.append(l2[0:3])
normals.append(l2[6:9])
if pcdtype == 'xyzrgbntheta':
normal_theta.append(l2[10:13])
theta.append(l2[13])
pt_index.append(l2[14])
dataFile.close()
print('---------------------pcd loaded -----------------------------')
if pcdtype == 'xyzrgbn':
return points, normals
elif pcdtype == 'xyzrgbntheta':
return points, normals, normal_theta, theta, pt_index
elif pcdtype == 'return_lines':
return lines
def main(point_num):
X_poke,y_poke = load_data('./probePcd.txt','.') #note: is list
X = load_pcd('./originalPcd.txt',pcdtype='return_lines') #note: is list
X = np.array(X)
print(X.shape)
X = X[:,0:3]
set_displacement = -0.002
X = np.hstack((X, np.tile(set_displacement,(X.shape[0],1))))
X = X[X[:,1]>-0.01]
X = normalize_points(X, location_offset[0:3], location_offset[3])
model_path = './models_dense/model_pt'+str(point_num)+'.pkl'
print(model_path)
index_cur = train_indexes_dense[0][point_num-1]
print(index_cur)
with open(model_path, 'rb') as f:
s2 = f.read()
automl = pickle.loads(s2)
predictions = automl.predict(X)
#print('[*]load model and predict at %f s, average: %f s'%(t_e-t_s, (t_e-t_s)/X.shape[0]))
X[:,0] = X[:,0]*location_offset[3]*100 #cm
X[:,1] = X[:,1]*location_offset[3]*100 #cm
cm = plt.cm.get_cmap('jet')
#sc = plt.scatter(Xe[:,0], Xe[:,1], c=Xe[:,4], vmin=0, vmax=err_max, s=20, cmap=cm)
sc = plt.scatter(X[:,0], X[:,1], c=predictions, vmin=0, vmax=1.5, s=4, cmap=cm)
for i in index_cur:
cur_X_array = X_poke[i]
cur_y_array = y_poke[i]
#print(cur_X_array[:,3])
y_true = cur_y_array[cur_X_array[:,3]>set_displacement]
y_true = y_true[0]
loc_x = cur_X_array[0,0]*location_offset[3] * 100
loc_y = cur_X_array[0,1]*location_offset[3]* 100
#colors = plt.cm.jet(y_true)
plt.scatter(loc_x, loc_y, color=plt.cm.jet(y_true/2),s=100,edgecolors='k')#, c=y_true, vmin=0, vmax=3, s=100, cmap=cm)
#plt.text(loc_x,loc_y,str(i))
cbar=plt.colorbar(sc)
cbar.ax.set_ylabel('Force (N)', labelpad=30,rotation=270,fontsize=25)
cbar.ax.tick_params(labelsize=16)
plt.xlabel('x (cm)', fontsize=25)
plt.ylabel('y (cm)', fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.axis('auto')
plt.savefig('./dense_fig/pt_'+str(point_num)+'.png')
plt.show()
if __name__ == "__main__":
for i in range(10):
main(i+1)
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('lah/version.py') as f:
exec(f.read())
tests_require = [
"mock",
"nose",
]
install_requires=[
"biopython>=1.46",
"click==7.0",
"Jinja2>=2.10.1",
"natsort",
"pyyaml==5.1",
"SQLAlchemy>=1.3.10",
"tabulate",
"yoyo-migrations>=6.1.0",
]
setup(
name='lah',
version=__version__,
description='Sequence Transform',
long_description=readme,
author='Eddie Belter',
author_email='ebelter@wustl.edu',
license=license,
url='https://github.com/hall-lab/lah.git',
install_requires=install_requires,
entry_points='''
[console_scripts]
lah=lah.cli:cli
''',
setup_requires=["pytest-runner"],
test_suite="nose.collector",
tests_requires=tests_require,
packages=find_packages(include=['lah'], exclude=('tests')),
include_package_data=True,
package_data={"lah": ["db-migrations/*"]},
)
|
'''
Bluetooth socket support
Copyright 2018 Gunnar Bowman, Emily Boyes, Trip Calihan, Simon D. Levy, Shepherd Sims
MIT License
'''
import os
import time
import bluetooth as bt
pathON = '/home/pi/ObjectsNeeded.txt'
pathOP = '/home/pi/ObjectsPresent.txt'
pathBL = '/home/pi/BatteryLevel.txt'
class BluetoothServer(object):
'''
Provides an abstract class for serving sockets over Bluetooth. You call the constructor and the start()
method. You must implement the method handleMessage(self, message) to handle messages from the client.
'''
def __init__(self):
'''
Constructor
'''
# Arbitrary service UUID to advertise
self.uuid = "7be1fcb3-5776-42fb-91fd-2ee7b5bbb86d"
self.client_sock = None
def start(self):
'''
Serves a socket on the default port, listening for clients. Upon client connection, runs a loop to
that receives period-delimited messages from the client and calls the sub-class's
handleMessage(self, message) method. Sub-class can call send(self, message) to send a
message back to the client. Begins listening again after client disconnects.
'''
# Make device visible
os.system("hciconfig hci0 piscan")
# Create a new server socket using RFCOMM protocol
server_sock = bt.BluetoothSocket(bt.RFCOMM)
# Bind to any port
server_sock.bind(("", bt.PORT_ANY))#
#server_sock.find(("", 1))#
# Start listening
server_sock.listen(1)
# Get the port the server socket is listening
port = server_sock.getsockname()[1]
# Start advertising the service
bt.advertise_service(server_sock, "RaspiBtSrv",
service_id=self.uuid,
service_classes=[self.uuid, bt.SERIAL_PORT_CLASS],
profiles=[bt.SERIAL_PORT_PROFILE])
# Outer loop: listen for connections from client
while True:
print("Waiting for connection on RFCOMM channel %d" % port)
try:
# This will block until we get a new connection
self.client_sock, client_info = server_sock.accept()
print("Accepted connection from " + str(client_info))
# Track strings delimited by '.'
s = ''
try:
on = open(pathON, 'r')
neededContents = on.read()
nC = neededContents.split('|')[:-1]
op = open(pathOP, 'r')
presentContents = op.read()
pC = presentContents.split('|')[:-1]
bl = open(pathBL, 'r')
bp = bl.read() # bp - battery percentage
print("Sending List of Objects missing")
for need in nC:
if need not in pC:
print(need)
self.send("OM:"+need)
print("Sending List of Objects present ")
for present in pC:
print(present)
self.send("OP:"+present)
print("Sending battery level of Pi")
print(bp)
self.send("BL:"+bp)
except IOError:
print("Trouble reading file. Trying again after 5 seconds")
time.sleep(5)
#while True:
#self.send("Object Missing");
#self.send("U|0|1")
#c = self.client_sock.recv(1).decode('utf-8')
#if c == '.' and len(s) > 0:
# self.handleMessage(s)
#self.send(s)
#print(s)
# self.handleMessage(s)
# s = ''
#else:
# s += c
except IOError:
pass
except KeyboardInterrupt:
if self.client_sock is not None:
self.client_sock.close()
server_sock.close()
print("Server going down")
break
def send(self, message):
'''
Appends a period to your message and sends the message back to the client.
'''
self.client_sock.send((message+'.').encode('utf-8'))
|
from socket import *
from select import *
import sys
from time import ctime
# 웹소켓 주소, 포트, 버퍼 사이즈 정의
HOST = '192.168.0.5' # 주소는 공유기에 접속되 있는 ip 주소
PORT = 8000
BUFSIZE = 1024
ADDR = (HOST, PORT)
flag = True
# 소켓 서버 초기화
serv = socket(AF_INET, SOCK_STREAM)
serv.close()
# 소켓 연결
server = socket(AF_INET, SOCK_STREAM)
# 주소 연결
server.bind(ADDR)
server.listen(10)
# 연결된 웹소켓 리스트
connection_list = [server]
print('==============================================')
print('채팅 서버를 시작합니다. %s 포트로 접속을 기다립니다.' % str(PORT))
print('==============================================')
client_socket, client_address = server.accept()
print('[INFO][%s] 클라이언트(%s)가 새롭게 연결 되었습니다.' % (ctime(), client_address[0]))
connection_list.append(client_socket)
# ==========현제 반복적으로 작동 안함==========
# 1회성으로 작동 중
try:
while flag:
if client_socket != server:
# 클라이언트로 부터 소켓 정보 받아옴
data = client_socket.recv(BUFSIZE).decode()
print('hello : %s' % str(data))
# 데이터 존재 시
if data:
print('[INFO][%s] 클라이언트로부터 데이터를 전달 받았습니다.' % ctime())
msg = 'Server got: ' + data
# 연결된 모든 소켓에 메시지 전송
client_socket.sendall(msg.encode())
# 데이터 부재 시
else:
flag = False
print('[INFO][%s] 사용자와의 연결이 끊어졌습니다.' % ctime())
client_socket.close()
except Exception as e:
# 웹 클라이언트 종료, 서버 종료
client_socket.close()
server.close()
|
from protocol.Radio.RadioProtocol import radioProtocol
import configparser
class protocolManager(object):
radio = None
config = None
def __init__(self, config):
self.config = config
while True:
if self.config['Protocol']['Radio']:
print("oui")
# Modifier quand le catch retournera un bodySensor
#if self.radio is None:
# self.radio = radioProtocol()
#self.radio.catch()
|
import torch
CHUNK_NUM = 32
def partition_without_replication(device, probs, ids):
"""Partition node with given node IDs and node access distribution.
The result will cause no replication between each parititon.
We assume node IDs can be placed in the given device.
Args:
device (int): device which computes the partitioning strategy
probs (torch.Tensor): node access distribution
ids (Optional[torch.Tensor]): specified node IDs
Returns:
[torch.Tensor]: list of IDs for each partition
"""
ranks = len(probs)
if ids is not None:
ids = ids.to(device)
probs = [
prob[ids].to(device) if ids is not None else prob.to(device)
for prob in probs
]
total_size = ids.size(0) if ids is not None else probs[0].size(0)
res = [None] * ranks
for rank in range(ranks):
res[rank] = []
CHUNK_SIZE = (total_size + CHUNK_NUM - 1) // CHUNK_NUM
chunk_beg = 0
beg_rank = 0
for i in range(CHUNK_NUM):
chunk_end = min(total_size, chunk_beg + CHUNK_SIZE)
chunk_size = chunk_end - chunk_beg
chunk = torch.arange(chunk_beg,
chunk_end,
dtype=torch.int64,
device=device)
probs_sum_chunk = [
torch.zeros(chunk_size, device=device) + 1e-6 for i in range(ranks)
]
for rank in range(ranks):
for dst_rank in range(ranks):
if dst_rank == rank:
probs_sum_chunk[rank] += probs[dst_rank][chunk] * ranks
else:
probs_sum_chunk[rank] -= probs[dst_rank][chunk]
acc_size = 0
rank_size = (chunk_size + ranks - 1) // ranks
picked_chunk_parts = torch.LongTensor([]).to(device)
for rank_ in range(beg_rank, beg_rank + ranks):
rank = rank_ % ranks
probs_sum_chunk[rank][picked_chunk_parts] -= 1e6
rank_size = min(rank_size, chunk_size - acc_size)
_, rank_order = torch.sort(probs_sum_chunk[rank], descending=True)
pick_chunk_part = rank_order[:rank_size]
pick_ids = chunk[pick_chunk_part]
picked_chunk_parts = torch.cat(
(picked_chunk_parts, pick_chunk_part))
res[rank].append(pick_ids)
acc_size += rank_size
beg_rank += 1
chunk_beg += chunk_size
for rank in range(ranks):
res[rank] = torch.cat(res[rank])
if ids is not None:
res[rank] = ids[res[rank]]
return res
def partition_with_replication(device, probs, ids, per_rank_size):
"""Partition node with given node IDs and node access distribution.
The result will cause replication between each parititon,
but the size of each partition will not exceed per_rank_size.
"""
partition_res = partition_without_replication(device, probs, ids)
if ids is not None:
ids = ids.to(device)
ranks = len(probs)
total_res = [
torch.empty(per_rank_size, device=device) for i in range(ranks)
]
probs = [prob.clone().to(device) for prob in probs]
for rank in range(ranks):
partition_ids = partition_res[rank]
probs[rank][partition_ids] = -1e6
replication_size = per_rank_size - partition_ids.size(0)
_, prev_order = torch.sort(probs[rank], descending=True)
replication_ids = ids[
prev_order[:
replication_size]] if ids is not None else prev_order[:
replication_size]
total_res[rank] = torch.cat((partition_ids, replication_ids))
return total_res
def select_nodes(device, probs, ids):
nodes = probs[0].size(0)
prob_sum = torch.zeros(nodes, device=device)
for prob in probs:
if ids is None:
prob_sum += prob
else:
prob_sum[ids] += prob[ids]
node_ids = torch.nonzero(prob_sum)
return prob_sum, node_ids
def partition_free(device, probs, ids, per_rank_size):
"""Partition node with given node IDs and node access distribution.
The result will cause either replication or missing nodes across partitions.
The size of each partition is limited by per_rank_size.
"""
prob_sum, node_ids = select_nodes(device, probs, ids)
nodes = node_ids.size(0)
ranks = len(probs)
limit = ranks * per_rank_size
if nodes <= limit:
return partition_with_replication(device, probs, node_ids,
per_rank_size), None
else:
_, prev_order = torch.sort(prob_sum, descending=True)
limit_ids = prev_order[:limit]
return partition_without_replication(device, probs,
node_ids), limit_ids
|
import sqlite3
import os
PATH = os.path.join(os.getcwd(), 'resources', 'feitico.db')
conn = sqlite3.connect(PATH)
cursor = conn.cursor()
###################
# CRIANDO TABELAS #
###################
# cursor.execute("""
# CREATE TABLE feitico (
# id_feitico INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# nome VARCHAR(255) NOT NULL UNIQUE,
# descricao TEXT NOT NULL,
# disponivel BOOLEAN NOT NULL,
# nivel INTEGER NOT NULL,
# dificuldade INTEGER NOT NULL,
# tipo INTEGER NOT NULL,
# ex_solucao TEXT
# );
# """)
# cursor.execute("""
# CREATE TABLE status (
# id_status INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# id_feitico INTEGER,
# assunto VARCHAR(255) NOT NULL,
# valor INTEGER NOT NULL,
# FOREIGN KEY(id_feitico) REFERENCES feitico(id_feitico)
# );
# """)
# ADICIONANDO CAMPO NA TABELA
# cursor.execute("""
# ALTER TABLE feitico ADD COLUMN json TEXT;
# """)
#####################
# INSERINDO VALORES #
#####################
# cursor.execute("""
# INSERT INTO feitico VALUES (
# null,
# 'teste',
# 'feitico que ordena livros',
# 1,
# 0,
# 0,
# 1,
# null
# );
# """)
# cursor.execute("""
# INSERT INTO status VALUES (
# null,
# 4,
# 'operadores',
# 90
# );
# """)
conn.commit()
cursor.execute("""
SELECT * FROM status
WHERE assunto = 'tipos'
and id_feitico > 2
""")
print cursor.fetchall()
conn.close()
|
from __future__ import annotations
import subprocess
import sys
from urllib.parse import ParseResult
import requests
import murfey
def check(api_base: ParseResult, install: bool = True, force: bool = False):
"""
Verify that the current client version can run against the selected server.
If the version number is outside the allowed range then this can trigger
an update on the client, and in that case will terminate the process.
"""
version_check_url = api_base._replace(
path="/version", query=f"client_version={murfey.__version__}"
)
server_reply = requests.get(version_check_url.geturl())
if server_reply.status_code != 200:
raise ValueError(f"Server unreachable ({server_reply.status_code})")
versions = server_reply.json()
if not install:
return
print(
f"Murfey {murfey.__version__} connected to Murfey server {versions['server']}"
)
if versions["client-needs-update"] or versions["client-needs-downgrade"]:
# Proceed with mandatory installation
if versions["client-needs-update"]:
print("This version of Murfey must be updated before continuing.")
if versions["client-needs-downgrade"]:
print(
"This version of Murfey is too new for the server and must be downgraded before continuing."
)
result = install_murfey(api_base, versions["server"])
if result:
print("\nMurfey has been updated. Please restart Murfey")
exit()
else:
exit("Error occurred while updating Murfey")
if versions["server"] != murfey.__version__:
if force:
result = install_murfey(api_base, versions["server"])
if result:
print("\nMurfey has been updated. Please restart Murfey")
exit()
else:
exit("Error occurred while updating Murfey")
else:
print("An update is available, install with 'murfey update'.")
def install_murfey(api_base: ParseResult, version: str) -> bool:
"""Install a specific version of the Murfey client.
Return 'true' on success and 'false' on error."""
assert api_base.hostname is not None
result = subprocess.run(
[
sys.executable,
"-mpip",
"install",
"--trusted-host",
api_base.hostname,
"-i",
api_base._replace(path="/pypi", query="").geturl(),
f"murfey[client]=={version}",
]
)
return result.returncode == 0
|
import logging
logger = logging.getLogger('qrhub')
logging_formats = {
'simple': '%(asctime)s: %(levelname)s - %(message)s',
'full': '%(asctime)s [%(name)s]: %(levelname)s - %(message)s (%(filename)s:%(funcName)s#%(lineno)d)'
}
class DBHandler(logging.Handler):
def emit(self, record):
from apps.core.models import Log
payload = {
'filename': record.filename,
'function_name': record.funcName,
'level_name': record.levelname,
'level_number': record.levelno,
'line_number': record.lineno,
'module': record.module,
'message': self.format(record),
'path': record.pathname,
'stack_info': record.stack_info,
'args': record.args
}
Log.objects.create(**payload)
class ColoredFormatter(logging.Formatter):
"""Logging Formatter to add colors"""
reset = "\x1b[0m"
white = "\x1b[97;21m"
red = "\x1b[91;21m"
green = "\x1b[32;21m"
bold_red = "\x1b[31;21m"
blue = "\x1b[36;21m"
yellow = "\x1b[33;21m"
grey = "\x1b[37;21m"
def format(self, record):
FORMATS = {
logging.DEBUG: self.green + self._fmt + self.reset,
logging.INFO: self.blue + self._fmt + self.reset,
logging.WARNING: self.yellow + self._fmt + self.reset,
logging.ERROR: self.red + self._fmt + self.reset,
logging.CRITICAL: self.bold_red + self._fmt + self.reset
}
log_fmt = FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
|
#
# PySNMP MIB module BATTERY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BATTERY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:18:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter32, NotificationType, ModuleIdentity, mib_2, ObjectIdentity, Bits, Counter64, TimeTicks, iso, Gauge32, MibIdentifier, Integer32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter32", "NotificationType", "ModuleIdentity", "mib-2", "ObjectIdentity", "Bits", "Counter64", "TimeTicks", "iso", "Gauge32", "MibIdentifier", "Integer32", "Unsigned32")
DisplayString, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "DateAndTime", "TextualConvention")
batteryMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 233))
batteryMIB.setRevisions(('2015-06-15 00:00',))
if mibBuilder.loadTexts: batteryMIB.setLastUpdated('201506150000Z')
if mibBuilder.loadTexts: batteryMIB.setOrganization('IETF EMAN Working Group')
batteryNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 233, 0))
batteryObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 233, 1))
batteryConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 233, 2))
batteryTable = MibTable((1, 3, 6, 1, 2, 1, 233, 1, 1), )
if mibBuilder.loadTexts: batteryTable.setStatus('current')
batteryEntry = MibTableRow((1, 3, 6, 1, 2, 1, 233, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: batteryEntry.setStatus('current')
batteryIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryIdentifier.setStatus('current')
batteryFirmwareVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryFirmwareVersion.setStatus('current')
batteryType = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("other", 2), ("primary", 3), ("rechargeable", 4), ("capacitor", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryType.setStatus('current')
batteryTechnology = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryTechnology.setStatus('current')
batteryDesignVoltage = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 5), Unsigned32()).setUnits('millivolt').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryDesignVoltage.setStatus('current')
batteryNumberOfCells = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryNumberOfCells.setStatus('current')
batteryDesignCapacity = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 7), Unsigned32()).setUnits('milliampere hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryDesignCapacity.setStatus('current')
batteryMaxChargingCurrent = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 8), Unsigned32()).setUnits('milliampere').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryMaxChargingCurrent.setStatus('current')
batteryTrickleChargingCurrent = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 9), Unsigned32()).setUnits('milliampere').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryTrickleChargingCurrent.setStatus('current')
batteryActualCapacity = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 10), Unsigned32()).setUnits('milliampere hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryActualCapacity.setStatus('current')
batteryChargingCycleCount = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryChargingCycleCount.setStatus('current')
batteryLastChargingCycleTime = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryLastChargingCycleTime.setStatus('current')
batteryChargingOperState = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("charging", 2), ("maintainingCharge", 3), ("noCharging", 4), ("discharging", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryChargingOperState.setStatus('current')
batteryChargingAdminState = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notSet", 1), ("charge", 2), ("doNotCharge", 3), ("discharge", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryChargingAdminState.setStatus('current')
batteryActualCharge = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 15), Unsigned32()).setUnits('milliampere hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryActualCharge.setStatus('current')
batteryActualVoltage = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 16), Unsigned32()).setUnits('millivolt').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryActualVoltage.setStatus('current')
batteryActualCurrent = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 17), Integer32()).setUnits('milliampere').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryActualCurrent.setStatus('current')
batteryTemperature = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 18), Integer32()).setUnits('deci-degrees Celsius').setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryTemperature.setStatus('current')
batteryAlarmLowCharge = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 19), Unsigned32()).setUnits('milliampere hours').setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmLowCharge.setStatus('current')
batteryAlarmLowVoltage = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 20), Unsigned32()).setUnits('millivolt').setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmLowVoltage.setStatus('current')
batteryAlarmLowCapacity = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 21), Unsigned32()).setUnits('milliampere hours').setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmLowCapacity.setStatus('current')
batteryAlarmHighCycleCount = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 22), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmHighCycleCount.setStatus('current')
batteryAlarmHighTemperature = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 23), Integer32()).setUnits('deci-degrees Celsius').setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmHighTemperature.setStatus('current')
batteryAlarmLowTemperature = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 24), Integer32()).setUnits('deci-degrees Celsius').setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryAlarmLowTemperature.setStatus('current')
batteryCellIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 233, 1, 1, 1, 25), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryCellIdentifier.setStatus('current')
batteryChargingStateNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 1)).setObjects(("BATTERY-MIB", "batteryChargingOperState"))
if mibBuilder.loadTexts: batteryChargingStateNotification.setStatus('current')
batteryLowNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 2)).setObjects(("BATTERY-MIB", "batteryActualCharge"), ("BATTERY-MIB", "batteryActualVoltage"), ("BATTERY-MIB", "batteryCellIdentifier"))
if mibBuilder.loadTexts: batteryLowNotification.setStatus('current')
batteryCriticalNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 3)).setObjects(("BATTERY-MIB", "batteryActualCharge"), ("BATTERY-MIB", "batteryActualVoltage"), ("BATTERY-MIB", "batteryCellIdentifier"))
if mibBuilder.loadTexts: batteryCriticalNotification.setStatus('current')
batteryTemperatureNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 4)).setObjects(("BATTERY-MIB", "batteryTemperature"), ("BATTERY-MIB", "batteryCellIdentifier"))
if mibBuilder.loadTexts: batteryTemperatureNotification.setStatus('current')
batteryAgingNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 5)).setObjects(("BATTERY-MIB", "batteryActualCapacity"), ("BATTERY-MIB", "batteryChargingCycleCount"), ("BATTERY-MIB", "batteryCellIdentifier"))
if mibBuilder.loadTexts: batteryAgingNotification.setStatus('current')
batteryConnectedNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 6)).setObjects(("BATTERY-MIB", "batteryIdentifier"))
if mibBuilder.loadTexts: batteryConnectedNotification.setStatus('current')
batteryDisconnectedNotification = NotificationType((1, 3, 6, 1, 2, 1, 233, 0, 7))
if mibBuilder.loadTexts: batteryDisconnectedNotification.setStatus('current')
batteryCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 233, 2, 1))
batteryGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 233, 2, 2))
batteryCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 233, 2, 1, 1)).setObjects(("BATTERY-MIB", "batteryDescriptionGroup"), ("BATTERY-MIB", "batteryStatusGroup"), ("BATTERY-MIB", "batteryAlarmThresholdsGroup"), ("BATTERY-MIB", "batteryNotificationsGroup"), ("BATTERY-MIB", "batteryPerCellNotificationsGroup"), ("BATTERY-MIB", "batteryAdminGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryCompliance = batteryCompliance.setStatus('current')
batteryDescriptionGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 1)).setObjects(("BATTERY-MIB", "batteryIdentifier"), ("BATTERY-MIB", "batteryFirmwareVersion"), ("BATTERY-MIB", "batteryType"), ("BATTERY-MIB", "batteryTechnology"), ("BATTERY-MIB", "batteryDesignVoltage"), ("BATTERY-MIB", "batteryNumberOfCells"), ("BATTERY-MIB", "batteryDesignCapacity"), ("BATTERY-MIB", "batteryMaxChargingCurrent"), ("BATTERY-MIB", "batteryTrickleChargingCurrent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryDescriptionGroup = batteryDescriptionGroup.setStatus('current')
batteryStatusGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 2)).setObjects(("BATTERY-MIB", "batteryActualCapacity"), ("BATTERY-MIB", "batteryChargingCycleCount"), ("BATTERY-MIB", "batteryLastChargingCycleTime"), ("BATTERY-MIB", "batteryChargingOperState"), ("BATTERY-MIB", "batteryActualCharge"), ("BATTERY-MIB", "batteryActualVoltage"), ("BATTERY-MIB", "batteryActualCurrent"), ("BATTERY-MIB", "batteryTemperature"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryStatusGroup = batteryStatusGroup.setStatus('current')
batteryAdminGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 3)).setObjects(("BATTERY-MIB", "batteryChargingAdminState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryAdminGroup = batteryAdminGroup.setStatus('current')
batteryAlarmThresholdsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 4)).setObjects(("BATTERY-MIB", "batteryAlarmLowCharge"), ("BATTERY-MIB", "batteryAlarmLowVoltage"), ("BATTERY-MIB", "batteryAlarmLowCapacity"), ("BATTERY-MIB", "batteryAlarmHighCycleCount"), ("BATTERY-MIB", "batteryAlarmHighTemperature"), ("BATTERY-MIB", "batteryAlarmLowTemperature"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryAlarmThresholdsGroup = batteryAlarmThresholdsGroup.setStatus('current')
batteryNotificationsGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 5)).setObjects(("BATTERY-MIB", "batteryChargingStateNotification"), ("BATTERY-MIB", "batteryLowNotification"), ("BATTERY-MIB", "batteryCriticalNotification"), ("BATTERY-MIB", "batteryAgingNotification"), ("BATTERY-MIB", "batteryTemperatureNotification"), ("BATTERY-MIB", "batteryConnectedNotification"), ("BATTERY-MIB", "batteryDisconnectedNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryNotificationsGroup = batteryNotificationsGroup.setStatus('current')
batteryPerCellNotificationsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 233, 2, 2, 6)).setObjects(("BATTERY-MIB", "batteryCellIdentifier"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
batteryPerCellNotificationsGroup = batteryPerCellNotificationsGroup.setStatus('current')
mibBuilder.exportSymbols("BATTERY-MIB", batteryLastChargingCycleTime=batteryLastChargingCycleTime, batteryConformance=batteryConformance, batteryAlarmThresholdsGroup=batteryAlarmThresholdsGroup, batteryMIB=batteryMIB, batteryAdminGroup=batteryAdminGroup, batteryNotifications=batteryNotifications, batteryEntry=batteryEntry, batteryTechnology=batteryTechnology, batteryActualCharge=batteryActualCharge, batteryAlarmLowCharge=batteryAlarmLowCharge, batteryAlarmLowTemperature=batteryAlarmLowTemperature, batteryDesignCapacity=batteryDesignCapacity, batteryCompliances=batteryCompliances, batteryAlarmHighCycleCount=batteryAlarmHighCycleCount, batteryAlarmLowCapacity=batteryAlarmLowCapacity, batteryCompliance=batteryCompliance, batteryActualVoltage=batteryActualVoltage, batteryActualCapacity=batteryActualCapacity, batteryTemperature=batteryTemperature, batteryChargingOperState=batteryChargingOperState, batteryStatusGroup=batteryStatusGroup, batteryObjects=batteryObjects, PYSNMP_MODULE_ID=batteryMIB, batteryAlarmLowVoltage=batteryAlarmLowVoltage, batteryDisconnectedNotification=batteryDisconnectedNotification, batteryGroups=batteryGroups, batteryNotificationsGroup=batteryNotificationsGroup, batteryDescriptionGroup=batteryDescriptionGroup, batteryChargingCycleCount=batteryChargingCycleCount, batteryFirmwareVersion=batteryFirmwareVersion, batteryCriticalNotification=batteryCriticalNotification, batteryConnectedNotification=batteryConnectedNotification, batteryDesignVoltage=batteryDesignVoltage, batteryMaxChargingCurrent=batteryMaxChargingCurrent, batteryLowNotification=batteryLowNotification, batteryIdentifier=batteryIdentifier, batteryTemperatureNotification=batteryTemperatureNotification, batteryNumberOfCells=batteryNumberOfCells, batteryAlarmHighTemperature=batteryAlarmHighTemperature, batteryChargingAdminState=batteryChargingAdminState, batteryTable=batteryTable, batteryChargingStateNotification=batteryChargingStateNotification, batteryPerCellNotificationsGroup=batteryPerCellNotificationsGroup, batteryCellIdentifier=batteryCellIdentifier, batteryAgingNotification=batteryAgingNotification, batteryType=batteryType, batteryTrickleChargingCurrent=batteryTrickleChargingCurrent, batteryActualCurrent=batteryActualCurrent)
|
# 共享视觉模型
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
from keras.models import Model
# First, define the vision modules
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3,3))(digit_input)
x = Conv2D(64, (3,3))(x)
x = MaxPooling2D((2, 2))(x)
out = Flatten(x)
vision_model = Model(digit_input)
# Then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# The vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = keras.layers.concatenate([out_a, out_b])
out = Dense(1, activation='sigmoid')(concatenated)
classification_model = Model([digit_a, digit_b], out)
|
from flask import Flask, jsonify
from flask.wrappers import Request
import requests
from blockchain import Blockchain
from uuid import uuid4
# Creating a Web App
app = Flask(__name__)
# Creating a Blockchain
blockchain = Blockchain()
# Creating an address for the node
node_address = str(uuid4()).replace('-', '')
# Mining a new block
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(sender=node_address, receiver='Jokers', amount=10)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# Getting the full Blockchain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
@app.route('/is_valid', methods=['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {
'message': '''Houston, we have a problem.
The Blockchain is not valid.'''}
return jsonify(response), 200
# Add a new transaction to the Blockchain
@app.route('/add_transaction', methods=['POST'])
def add_transaction():
json = Request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all (key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(json['sender'], json['receiver'], json['amount'])
response = {'message': f'This transaction will be added in Block {index}'}
return jsonify(response), 201
# Connecting new nodes
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = Request.get_json()
nodes = json.get('nodes')
if nodes is not None:
return "No node", 400
for node in nodes:
blockchain.add_node(node)
response = {'message': f'All the nodes are now connected.',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Checking if the Blockchain is valid
@app.route('/replace_chain', methods=['GET'])
def is_valid():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message': 'The chain was replaced by the longest one.'}
else:
response = {'message': 'The chain is the longest one'}
return jsonify(response), 200
# Running the app
app.run(host='0.0.0.0', port=5000)
|
import torch
import torch.nn as nn
import numpy as np
from data_info.data_info import DataInfo
class ACloss(nn.Module):
def __init__(self):
super(ACloss, self).__init__()
self.height = DataInfo.resized_image_size[1]
self.width = DataInfo.resized_image_size[0]
self.num_landmark_class = DataInfo.num_landmark_class
def get_angle_matrix(self, inp_mat):
np_array1 = inp_mat - [self.height / 2, self.width / 2]
np_array2 = inp_mat - [self.height / 2, self.width / 2]
arccos_val = np.dot(np_array1, np_array2.transpose()) / (
np.linalg.norm(np_array1, axis=1).reshape(np_array1.shape[0], 1) *
np.linalg.norm(np_array2, axis=1))
arccos_val = np.where(arccos_val < -1, -1, arccos_val)
arccos_val = np.where(arccos_val > 1, 1, arccos_val)
angle_matrix = np.arccos(arccos_val)
angle_matrix[np.isnan(angle_matrix)] = 0
return angle_matrix
@staticmethod
def get_dist_matrix(inp_mat):
y_meshgrid1, y_meshgrid2 = np.meshgrid(inp_mat[:, 0], inp_mat[:, 0])
x_meshgrid1, x_meshgrid2 = np.meshgrid(inp_mat[:, 1], inp_mat[:, 1])
dist = np.sqrt((y_meshgrid1 - y_meshgrid2) ** 2 + (x_meshgrid1 - x_meshgrid2) ** 2)
return dist
def get_angle_and_dist_loss(self, output, target): # tensor(1,landmark_num,h,w)
angle_loss = 0.0
dist_loss = 0.0
for batch in range(target.size(0)):
output_matrix = np.zeros((self.num_landmark_class, 2))
target_matrix = np.zeros((self.num_landmark_class, 2))
for landmark_num in range(0, self.num_landmark_class):
output_image = output[batch][landmark_num]
output_image = output_image.cpu()
target_image = target[batch][landmark_num]
target_image = target_image.cpu()
output_max_point_np_array = np.array(np.where(output_image == output_image.max()))
target_max_point_np_array = np.array(np.where(target_image == target_image.max()))
output_matrix[landmark_num] = output_max_point_np_array[:, 0]
target_matrix[landmark_num] = target_max_point_np_array[:, 0]
output_angle = self.get_angle_matrix(output_matrix)
target_angle = self.get_angle_matrix(target_matrix)
angle_loss += np.mean(np.abs(output_angle - target_angle))
output_dist = self.get_dist_matrix(output_matrix)
target_dist = self.get_dist_matrix(target_matrix)
dist_loss += np.mean(np.abs(output_dist - target_dist))
return angle_loss, dist_loss
def forward(self, output, target):
l2_loss = torch.mean(torch.pow((output - target), 2))
angle_loss, dist_loss = self.get_angle_and_dist_loss(output, target)
w_loss = (1 + angle_loss) + np.log(dist_loss + 1e-10)
loss = torch.mul(l2_loss, w_loss)
return loss, l2_loss, w_loss, angle_loss, dist_loss
|
from django.conf import settings
from mongoengine import Document, fields
from mongodbforms import DocumentForm
import unittest
class MyDocument(Document):
mystring = fields.StringField()
myverbosestring = fields.StringField(verbose_name="Foobar")
myrequiredstring = fields.StringField(required=True)
list_of_strings = fields.ListField(fields.StringField())
class MyForm(DocumentForm):
class Meta:
document = MyDocument
class SimpleDocumentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings.configure()
def test_form(self):
form = MyForm()
self.assertEquals(len(form.fields), 4)
self.assertFalse(form.fields['mystring'].required)
self.assertEquals(form.fields['myverbosestring'].label, "Foobar")
self.assertTrue(form.fields['myrequiredstring'].required)
self.assertEqual(form.fields['list_of_strings'].label, "List of strings")
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\components\stored_sim_info_component.py
# Compiled at: 2018-09-12 23:39:12
# Size of source mod 2**32: 10090 bytes
from protocolbuffers import SimObjectAttributes_pb2 as protocols
from interactions import ParticipantType
from interactions.utils.interaction_elements import XevtTriggeredElement
from interactions.utils.loot_basic_op import BaseTargetedLootOperation, BaseLootOperation
from objects.components import Component, types, componentmethod_with_fallback
from sims.sim_info_name_data import SimInfoNameData
from sims4.tuning.tunable import AutoFactoryInit, HasTunableFactory, TunableEnumEntry, OptionalTunable, Tunable
import services, sims4, zone_types
logger = sims4.log.Logger('Stored Sim Info Component', default_owner='shipark')
class TransferStoredSimInfo(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'clear_stored_sim_on_subject': Tunable(description='\n If set to False, the Stored Sim will remain on the subject object. If\n set to True, the Store Sim will be removed from the subject object.\n ',
tunable_type=bool,
default=False)}
def __init__(self, *args, clear_stored_sim_on_subject=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._clear_stored_sim_on_subject = clear_stored_sim_on_subject
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error("The Transfer Stored Sim Info loot tuned on: '{}' has a subject participant of None value.", self)
return
stored_sim_info = subject.get_component(types.STORED_SIM_INFO_COMPONENT)
if stored_sim_info is None:
logger.error("The Transfer Stored Sim Info loot tuned on interaction: '{}' has a subject with no Stored Sim Info Component.", self)
return
if target is None:
logger.error("The Transfer Stored Sim Info loot tuned on interaction: '{}' has a target participant of None value.", self)
return
if target.has_component(types.STORED_SIM_INFO_COMPONENT):
target.remove_component(types.STORED_SIM_INFO_COMPONENT)
target.add_dynamic_component((types.STORED_SIM_INFO_COMPONENT), sim_id=(stored_sim_info.get_stored_sim_id()))
if self._clear_stored_sim_on_subject:
subject.remove_component(types.STORED_SIM_INFO_COMPONENT)
class StoreSimInfoLootOp(BaseTargetedLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None or target is None:
logger.error('Trying to run Store Sim Info loot action with a None Subject and/or Target. subject:{}, target:{}', subject, target)
return
else:
target.is_sim or logger.error('Trying to run Store Sim Info loot action on Subject {} with a non Sim Target {}', subject, target)
return
if subject.has_component(types.STORED_SIM_INFO_COMPONENT):
subject.remove_component(types.STORED_SIM_INFO_COMPONENT)
subject.add_dynamic_component((types.STORED_SIM_INFO_COMPONENT), sim_id=(target.sim_id))
class RemoveSimInfoLootOp(BaseLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Trying to run Remove Stored Sim Info loot action with a None Subject')
return
if subject.has_component(types.STORED_SIM_INFO_COMPONENT):
subject.remove_component(types.STORED_SIM_INFO_COMPONENT)
class StoreSimElement(XevtTriggeredElement, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description':'\n An element that retrieves an interaction participant and attaches\n its information to another interaction participant using a dynamic\n StoredSimInfoComponent.\n ',
'source_participant':OptionalTunable(description='\n Specify what participant to store on the destination participant.\n ',
tunable=TunableEnumEntry(description='\n The participant of this interaction whose Sim Info is retrieved\n to be stored as a component.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.PickedObject)),
enabled_name='specific_participant',
disabled_name='no_participant'),
'destination_participant':TunableEnumEntry(description='\n The participant of this interaction to which a\n StoredSimInfoComponent is added, with the Sim Info of\n source_participant.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Object)}
def _do_behavior(self):
source = self.interaction.get_participant(participant_type=(self.source_participant)) if self.source_participant is not None else None
destination = self.interaction.get_participant(participant_type=(self.destination_participant))
if destination.has_component(types.STORED_SIM_INFO_COMPONENT):
destination.remove_component(types.STORED_SIM_INFO_COMPONENT)
if source is not None:
destination.add_dynamic_component((types.STORED_SIM_INFO_COMPONENT), sim_id=(source.id))
class StoredSimInfoComponent(Component, component_name=types.STORED_SIM_INFO_COMPONENT, allow_dynamic=True, persistence_key=protocols.PersistenceMaster.PersistableData.StoredSimInfoComponent):
def __init__(self, *args, sim_id=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._sim_id = sim_id
self._sim_info_name_data = None
def save(self, persistence_master_message):
persistable_data = protocols.PersistenceMaster.PersistableData()
persistable_data.type = protocols.PersistenceMaster.PersistableData.StoredSimInfoComponent
stored_sim_info_component_data = persistable_data.Extensions[protocols.PersistableStoredSimInfoComponent.persistable_data]
stored_sim_info_component_data.sim_id = self._sim_id
if self._sim_info_name_data is not None:
stored_sim_info_component_data.sim_info_name_data = SimInfoNameData.generate_sim_info_name_data_msg((self._sim_info_name_data), use_profanity_filter=False)
persistence_master_message.data.extend([persistable_data])
def load(self, persistable_data):
stored_sim_info_component_data = persistable_data.Extensions[protocols.PersistableStoredSimInfoComponent.persistable_data]
self._sim_id = stored_sim_info_component_data.sim_id
if stored_sim_info_component_data.sim_info_name_data:
sim_info_data = stored_sim_info_component_data.sim_info_name_data
self._sim_info_name_data = SimInfoNameData(sim_info_data.gender, sim_info_data.first_name, sim_info_data.last_name, sim_info_data.full_name_key)
def on_add(self, *_, **__):
services.current_zone().register_callback(zone_types.ZoneState.HOUSEHOLDS_AND_SIM_INFOS_LOADED, self._on_households_loaded)
def _on_households_loaded(self, *_, **__):
if self._sim_info_name_data is None:
sim_info = services.sim_info_manager().get(self._sim_id)
if sim_info is not None:
self._sim_info_name_data = sim_info.get_name_data()
self.owner.update_object_tooltip()
@componentmethod_with_fallback(lambda : None)
def get_stored_sim_id(self):
return self._sim_id
@componentmethod_with_fallback(lambda : None)
def get_stored_sim_info(self):
return services.sim_info_manager().get(self._sim_id)
@componentmethod_with_fallback(lambda : None)
def get_stored_sim_info_or_name_data(self):
sim_info = services.sim_info_manager().get(self._sim_id)
if sim_info is not None:
return sim_info
return self._sim_info_name_data
def has_stored_data(self):
return self._sim_info_name_data is not None
def component_interactable_gen(self):
yield self
|
import setuptools
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
requirements = []
else:
requirements = [
'pandas>=0.25.1',
'numpy>=1.16.5',
'scipy>=1.3.1',
'gpflow==1.5.1',
'tensorflow==1.15.2',
'goatools>=1.0.2',
'scikit-learn>=0.21.3',
'statsmodels>=0.10.1',
'matplotlib>=3.1.1',
'seaborn>=0.9.0',
'pytest'
]
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="TRANSPIRE",
version="0.1.1.dev1",
author="Michelle A. Kennedy",
author_email="mak4515@gmail.com",
description="A Python package for TRanslocation ANalysis of SPatIal pRotEomics data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mak4515/TRANSPIRE",
packages=setuptools.find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires = requirements,
include_package_data = True,
package_data = {
'': ['.csv', '.txt', '.xlsx']
}
)
|
#!/usr/bin/env python
from setuptools import setup
VERSION = '1.0.0'
DESCRIPTION = "mutual-followers: Find out mutual friends of twitter users"
LONG_DESCRIPTION = """
mutual-followers finds out the mutual friends for given screen names.
I use this to discover people who are tweeting about software development and also being followed by common reputable people.
"""
CLASSIFIERS = filter(None, map(str.strip,
"""
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python :: 3.7
Operating System :: OS Independent
Topic :: Utilities
Topic :: Database :: Database Engines/Servers
Topic :: Software Development :: Libraries :: Python Modules
""".splitlines()))
setup(
name="mutual-followers",
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
keywords=('twitter', 'tweepy', 'friends', 'followers',
'python', 'set'),
author="Berkay Dincer",
author_email="dincerbberkay@gmail.com",
url="https://github.com/berkay-dincer/mutual-followers",
license="MIT License",
platforms=['any'],
zip_safe=True,
install_requires=['tweepy>=1.1.0', 'argparse>=1.4.0'],
packages=['mutual-followers']
)
|
from .Assets import Assets
from .BaseClient import BaseClient
from .DataViews import DataViews
from .Streams import Streams
from .Types import Types
class OCSClient:
"""
A client that handles communication with OCS
"""
def __init__(self, api_version: str, tenant: str, url: str, client_id: str,
client_secret: str = None, accept_verbosity: bool = False):
"""
Use this to help in communinication with OCS
:param api_version: Version of the api you are communicating with
:param tenant: Your tenant ID
:param url: The base URL for your OCS instance
:param client_id: Your client ID
:param client_secret: Your client Secret or Key
:param accept_verbosity: Sets whether in value calls you get all values or just
non-default values
"""
self.__base_client = BaseClient(api_version, tenant, url, client_id,
client_secret, accept_verbosity)
self.__assets = Assets(self.__base_client)
self.__data_views = DataViews(self.__base_client)
self.__streams = Streams(self.__base_client)
self.__types = Types(self.__base_client)
@property
def uri(self) -> str:
"""
:return: The uri of this OCS client as a string
"""
return self.__base_client.uri
@property
def tenant(self) -> str:
"""
:return: The tenant of this OCS client as a string
"""
return self.__base_client.tenant
@property
def acceptverbosity(self) -> bool:
"""
:return: Whether this will include the accept verbosity header
"""
return self.__base_client.AcceptVerbosity
@acceptverbosity.setter
def acceptverbosity(self, value: bool):
self.__base_client.AcceptVerbosity = value
@property
def request_timeout(self) -> int:
"""
:return: Request timeout in seconds (default 30 secs)
"""
return self.__base_client.RequestTimeout
@request_timeout.setter
def request_timeout(self, value: int):
self.__base_client.RequestTimeout = value
@property
def Assets(self) -> Assets:
"""
:return: A client for interacting with Assets
"""
return self.__assets
@property
def DataViews(self) -> DataViews:
"""
:return: A client for interacting with Data Views
"""
return self.__data_views
@property
def Streams(self) -> Streams:
"""
:return: A client for interacting with Streams
"""
return self.__streams
@property
def Types(self) -> Types:
"""
:return: A client for interacting with Types
"""
return self.__types
@property
def baseClient(self) -> BaseClient:
"""
:return: A client for interacting with the baseclient directly
"""
return self.__base_client
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import json
import logging
import os
import pkgutil
import shutil
import sys
import threading
import time
import traceback
import uuid
from distutils.version import LooseVersion
from urllib import parse
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException
from bzt.utils import reraise, load_class, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient, Environment
from bzt.utils import NETWORK_PROBLEMS
from .dicts import Configuration
from .modules import Provisioning, Reporter, Service, Aggregator, EngineModule
from .names import EXEC, TAURUS_ARTIFACTS_DIR, SETTINGS
from .templates import Singletone
from ..environment_helpers import expand_variable_with_os, custom_expandvars, expand_envs_with_os
from bzt.resources.version import VERSION, DEV_VERSION
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]EXEC
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.aggregator.engine = self
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self.user_pythonpath = None
self.temp_pythonpath = None
self._http_client = None
self.graceful_tmp = None
def set_pythonpath(self):
version = sys.version.split(' ')[0]
path_suffix = os.path.join('python-packages', version)
self.user_pythonpath = get_full_path(os.path.join("~", ".bzt", path_suffix))
self.temp_pythonpath = get_full_path(os.path.join(self.artifacts_dir, path_suffix))
current_pythonpath = os.environ.get('PYTHONPATH', '')
paths = self.temp_pythonpath, self.user_pythonpath, current_pythonpath
self.log.debug("Set PYTHONPATH to :\n\tUSER: '{}' +\n\tTEMP: '{}' +\n\tCURRENT: '{}'".format(*paths))
try:
user_packages = os.listdir(self.user_pythonpath)
except:
user_packages = []
self.log.debug("Content of user packages dir: {}".format(user_packages))
os.environ['PYTHONPATH'] = os.pathsep.join(paths)
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions: # type: BetterDict
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
self.graceful_tmp = self.create_artifact(prefix="GRACEFUL", suffix="")
env = env.get()
env['GRACEFUL'] = self.graceful_tmp
return shell_exec(args, cwd=cwd, env=env, **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
if self.graceful_tmp and not os.path.exists(self.graceful_tmp):
open(self.graceful_tmp, 'x').close()
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if self.graceful_tmp and os.path.exists(self.graceful_tmp):
os.remove(self.graceful_tmp)
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = self.__expand_artifacts_dir()
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def __expand_artifacts_dir(self):
envs = self.__get_envs_from_config()
artifacts_dir = custom_expandvars(self.artifacts_dir, envs)
artifacts_dir = expand_variable_with_os(artifacts_dir)
artifacts_dir = get_full_path(artifacts_dir)
return artifacts_dir
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
return False
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
if VERSION == DEV_VERSION:
return
params = (VERSION, install_id)
addr = "https://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
try:
response = client.request('GET', addr, timeout=10)
except NETWORK_PROBLEMS:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
return
data = response.json()
latest = data.get('latest')
needs_upgrade = data.get('needsUpgrade')
if latest is None or needs_upgrade is None:
self.log.warning(f'Wrong updates info: "{data}"')
else:
self.log.debug(f'Taurus updates info: "{data}"')
mine = LooseVersion(VERSION)
if (mine < latest) or needs_upgrade:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.__get_envs_from_config()
envs = expand_envs_with_os(envs)
def apply_env(value, key, container):
if isinstance(value, str):
container[key] = custom_expandvars(value, envs)
BetterDict.traverse(self.config, apply_env)
self.__export_variables_to_os()
def __export_variables_to_os(self):
"""
Export all user-defined environment variables to the system.
Example:
settings:
env:
FOO: bbb/ccc
BAR: aaa
"""
envs = self.__get_envs_from_config()
for var_name in envs:
if envs[var_name] is None:
if var_name in os.environ:
os.environ.pop(var_name)
else:
os.environ[var_name] = envs[var_name]
self.log.debug("OS env: %s=%s", var_name, envs[var_name])
def __get_envs_from_config(self):
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
return envs
|
from .boxes import nms, box_iou
from .new_empty_tensor import _new_empty_tensor
from .deform_conv import deform_conv2d, DeformConv2d
from .roi_align import roi_align, RoIAlign
from .roi_pool import roi_pool, RoIPool
from .ps_roi_align import ps_roi_align, PSRoIAlign
from .ps_roi_pool import ps_roi_pool, PSRoIPool
from .poolers import MultiScaleRoIAlign
from .feature_pyramid_network import FeaturePyramidNetwork
from ._register_onnx_ops import _register_custom_op
_register_custom_op()
__all__ = [
'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',
'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',
'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'
]
|
from common import *
from trezor.ui import display
class TestDisplay(unittest.TestCase):
def test_clear(self):
display.clear()
def test_refresh(self):
display.refresh()
def test_bar(self):
display.bar(0, 0, 10, 10, 0xFFFF)
def test_bar_radius(self):
display.bar_radius(0, 0, 10, 10, 0xFFFF, 0x0000, 16)
def test_image(self):
pass
def test_icon(self):
pass
def test_text(self):
display.text(120, 120, 'Test', 0, 0xFFFF, 0x0000)
def test_text_center(self):
display.text_center(120, 120, 'Test', 0, 0xFFFF, 0x0000)
def test_text_right(self):
display.text_right(120, 120, 'Test', 0, 0xFFFF, 0x0000)
def test_text_width(self):
display.text_width('Test', 0)
def test_qrcode(self):
display.qrcode(0, 0, 'Test', 4)
def test_loader(self):
display.loader(333, 0, 0xFFFF, 0x0000)
def test_orientation(self):
for o in [0, 90, 180, 270]:
display.orientation(o)
def test_backlight(self):
for b in range(256):
display.backlight(b)
def test_offset(self):
for x in range(-4, 5):
for y in range(-4, 5):
o = (x * 57, y * 57)
display.offset(o)
o2 = display.offset()
self.assertEqual(o, o2)
def test_raw(self):
pass
def test_save(self):
pass
if __name__ == '__main__':
unittest.main()
|
# Pandas
import pandas as pd
# utils
import joblib
class Utils:
"""Handles the utils for data"""
def load_from_csv(self, path):
"""Load to a csv"""
return pd.read_csv(path)
def features_target(self, dataset, dropcols, y):
"""Slplit features and targets"""
X = dataset.drop(dropcols, axis=1)
y = dataset[y]
return X, y
def model_export(self, clf, score):
"""Export best model."""
joblib.dump(clf, './models/best_model.pkl')
|
from django.contrib.auth.models import User
from django.db import models
from main.models import PhotoItem
from psycho.settings import st_password
class Person(PhotoItem):
"""
Stores all information about a psychologist
"""
id = models.AutoField(primary_key=True)
full_name = models.CharField('Имя', max_length=100)
name = models.CharField('Логин', max_length=50, unique=True,
help_text='Уникальный логин. При входе в систему его нужно будет указывать. ')
birth_date = models.DateField('Дата рождения')
email = models.EmailField()
info = models.TextField('Основная информация', blank=True,
help_text='Хорошее краткое описание своей деятельности')
bio = models.TextField('Биография', blank=True)
contacts = models.TextField('Другие контактные данные: ', blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile', null=True, blank=True)
def save(self, *args, **kwargs):
if self.user:
user = User.objects.get(id=self.user.id)
user.username, user.email = self.name, self.email
user.save()
else:
user = User.objects.create_user(self.name, self.email, st_password)
user.save()
self.user = user
user.groups.add(1)
user.is_staff = True
user.save()
self.save_photo()
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
user = User.objects.get(id=self.user.id)
if not user.is_superuser:
user.delete()
super().delete(*args, **kwargs)
def __str__(self):
return self.full_name
class Meta:
"""
Person model settings
"""
db_table = 'people'
verbose_name = 'Специалист'
verbose_name_plural = 'Специалисты'
ordering = ['-user__is_superuser']
class HelpItem(models.Model):
"""
Stores information about the type of help
"""
id = models.AutoField(primary_key=True)
name = models.CharField("Название", max_length=50, help_text='С чем работает эксперт?')
description = models.TextField("Описание", null=True, blank=True, max_length=200)
expert = models.ForeignKey(Person, on_delete=models.CASCADE, verbose_name='Эксперт', related_name='help_items')
def __str__(self):
return self.name
class Meta:
"""
HelpItem model settings
"""
ordering = ['-id']
db_table = 'help'
verbose_name = 'Пункт помощи'
verbose_name_plural = 'Пункты помощи'
class Achievement(PhotoItem):
"""
Stores information about a single certificate or other proof of competence
"""
priority = models.IntegerField("Приоритет", default=2,
help_text='Чем больше приоритет, тем выше в списке будет достижение')
expert = models.ForeignKey(Person, on_delete=models.CASCADE, verbose_name='Эксперт', related_name='achievements')
def __str__(self):
return self.alt
class Meta:
"""
Achievement model settings
"""
db_table = 'achievements'
verbose_name = 'Достижение'
ordering = ['-priority']
verbose_name_plural = 'Достижения'
|
"""
flightAPI
AirMapSDK
Created by AirMap Team on 6/28/16.
Copyright (c) 2016 AirMap, Inc. All rights reserved.
"""
# flightAPI.py -- Flight API functions
import traceback
import httplib
import urllib
import json
import ssl
import time
import datetime
import socket
from airdefs import Advisory, Advisories, Properties, Globals
import os
import subprocess
import traceback
class Flight:
os = __import__('os')
connection = None
headers = None
thisGlobals = Globals()
def __init__(self):
pass
def get_FlightList(self, pilotID):
connection = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
try:
connection.request('GET', '/flight/v2/?pilot_id='+str(pilotID)+'&enhance=true', '', headers)
result = connection.getresponse().read()
parsed_json = json.loads(result)
flight_collection = parsed_json['data']['results']
return flight_collection
except:
traceback.print_exc()
def cmd_KillFlights(self, pilotID):
connection = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
try:
connection.request('GET', '/flight/v2/?pilot_id='+str(pilotID)+'&enhance=true', '', headers)
result = connection.getresponse().read()
parsed_json = json.loads(result)
flight_collection = parsed_json['data']['results']
for flights in flight_collection:
endFlight = flights['id']
#destroy flight
print "deleting {}".format(endFlight)
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/{}/delete'.format(endFlight), '', headers)
result = connectFlight.getresponse().read()
#print(result)
except:
print "Kill Flights Error..."
traceback.print_exc()
except:
traceback.print_exc()
def get_PilotID(self):
if Globals.pilotIDValid == True:
return Globals.pilot_id
else:
return False
def create_FlightPoint(self, time, lat, lon, public, notify):
startTime = datetime.datetime.utcnow()
endTime = startTime + datetime.timedelta(0,(time*60))
startTime = startTime.isoformat() + "-00:00"
endTime = endTime.isoformat() + "-00:00"
print startTime
print endTime
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/point', json.dumps({"latitude":float(lat),"longitude":float(lon),"max_altitude":100,"start_time":"{}".format(startTime),"end_time":"" + endTime + "","public":bool(public),"notify":bool(notify)}), headers)
result = connectFlight.getresponse().read()
#Globals.strPrint(self.thisGlobals,result)
try:
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
print parsed_status
Globals.pilot_id = parsed_json['data']['pilot_id']
Globals.pilotIDValid = True
#Globals.strPrint (self.thisGlobals,Globals.pilot_id)
except:
Globals.strPrint (self.thisGlobals,"Pilot ID not found...Retry!")
Globals.strPrint (self.thisGlobals,result)
return False
if parsed_status != "success":
return False
Globals.myFlightID = parsed_json['data']['id']
except:
print "Create Flight Error..."
traceback.print_exc()
return Globals.myFlightID
def create_FlightPolygon(self, time, lat, lon,thisBounds, public, notify):
startTime = datetime.datetime.utcnow()
endTime = startTime + datetime.timedelta(0,(time*60))
startTime = startTime.isoformat() + "-00:00"
endTime = endTime.isoformat() + "-00:00"
thisBounds = json.loads(thisBounds)
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/polygon', json.dumps({"latitude":float(lat),"longitude":float(lon),"max_altitude":100,"start_time":"{}".format(startTime),"end_time":"" + endTime + "","public":bool(public),"notify":bool(notify),"geometry":{"type":"Polygon","coordinates": thisBounds}}), headers)
result = connectFlight.getresponse().read()
#Globals.strPrint(self.thisGlobals,result)
try:
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
print parsed_status
Globals.pilot_id = parsed_json['data']['pilot_id']
Globals.pilotIDValid = True
#Globals.strPrint (self.thisGlobals,Globals.pilot_id)
except:
Globals.strPrint (self.thisGlobals,"Pilot ID not found...Retry!")
Globals.strPrint (self.thisGlobals,result)
return False
if parsed_status != "success":
return False
Globals.myFlightID = parsed_json['data']['id']
except:
print "Create Flight Error..."
traceback.print_exc()
return Globals.myFlightID
def end_Flight(self, flightID):
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/{}/end'.format(flightID), '', headers)
result = connectFlight.getresponse().read()
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
if parsed_status != "success":
return False
else:
return True
except:
print "End Flight Error..."
traceback.print_exc()
def delete_Flight(self, flightID):
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/{}/delete'.format(flightID), '', headers)
result = connectFlight.getresponse().read()
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
if parsed_status != "success":
return False
else:
return True
except:
print "End Flight Error..."
traceback.print_exc()
def start_comm(self, flightID):
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/{}/start-comm'.format(flightID), '', headers)
result = connectFlight.getresponse().read()
parsed_json = json.loads(result)
print parsed_json
#parsed_status = parsed_json['data']['key']['data']
parsed_status = parsed_json['data']['key']
print "H:" + parsed_status
#thisKey = (''.join(str(hex(i)[2:].zfill(2)) for i in parsed_status)).decode('hex')
thisKey = parsed_status.decode('base64')
return thisKey
except:
print "Could Not Start Comms..."
traceback.print_exc()
def end_comm(self, flightID):
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('POST', '/flight/v2/{}/start-comm'.format(flightID), '', headers)
result = connectFlight.getresponse().read()
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
if parsed_status != "success":
return False
else:
return True
except:
print "Could Not End Comms..."
traceback.print_exc()
def recover_Pilot(self):
try:
connectFlight = httplib.HTTPSConnection(Globals.httpsAddr, Globals.httpsPort, timeout=Globals.timeOut)
headers = Globals.xapikey
headers['Authorization'] = "Bearer {}".format(Globals.myToken)
connectFlight.request('GET', '/pilot/v2/profile', "", headers)
result = connectFlight.getresponse().read()
try:
parsed_json = json.loads(result)
parsed_status = parsed_json['status']
print parsed_status
Globals.pilot_id = parsed_json['data']['id']
Globals.pilotIDValid = True
except:
Globals.strPrint (self.thisGlobals,"Pilot Recover ID not found...Retry!")
Globals.strPrint (self.thisGlobals,result)
return False
if parsed_status != "success":
return False
except:
print "Create Flight Error..."
traceback.print_exc()
return Globals.pilot_id
|
from webdnn.backend.webgpu.optimize_rules.concat_lstm_input_and_hidden import ConcatLSTMInputAndHidden
from webdnn.backend.webgpu.optimize_rules.insert_transpose import InsertTranspose
from webdnn.graph.optimize_rule import OptimizeRuleGroup
from webdnn.optimizer.sub_rules.constant_folding import ConstantFolding
from webdnn.optimizer.sub_rules.elementwise_kernel_fusion import ElementwiseKernelFusion
from webdnn.optimizer.sub_rules.merge_sgemm_and_elementwise_mul import MergeSgemmAndElementwiseMul
from webdnn.optimizer.sub_rules.remove_no_effect_operator import RemoveNoEffectOperator
from webdnn.optimizer.sub_rules.remove_redundant_operator import RemoveRedundantOperator
from webdnn.optimizer.sub_rules.replace_convolution_by_im2col import ReplaceConvolutionByIm2Col
from webdnn.optimizer.sub_rules.replace_deconvolution_by_col2im import ReplaceDeconvolutionByCol2Im
from webdnn.optimizer.sub_rules.replace_linear_by_sgemm import ReplaceLinearBySgemm
from webdnn.optimizer.sub_rules.update_inplace_attribute import UpdateInplaceAttribute
class WebGPUOptimizeRule(OptimizeRuleGroup):
def __init__(self):
super(WebGPUOptimizeRule, self).__init__([
OptimizeRuleGroup([
InsertTranspose(),
ReplaceConvolutionByIm2Col(),
MergeSgemmAndElementwiseMul(),
ConstantFolding(),
ReplaceDeconvolutionByCol2Im(),
MergeSgemmAndElementwiseMul(),
ConstantFolding(),
ReplaceLinearBySgemm(),
MergeSgemmAndElementwiseMul(),
ConstantFolding(),
ConcatLSTMInputAndHidden(),
RemoveRedundantOperator(),
RemoveNoEffectOperator(),
UpdateInplaceAttribute()
]),
ElementwiseKernelFusion()
])
|
import json
import logging
from django.http import (
HttpResponse,
HttpResponseBadRequest
)
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.shortcuts import get_object_or_404
from papermerge.core.models import BaseTreeNode, Document, Folder
from papermerge.core.models.utils import recursive_delete
logger = logging.getLogger(__name__)
@login_required
def browse_view(request, parent_id=None):
nodes = BaseTreeNode.objects.filter(parent_id=parent_id)
nodes_list = []
parent_kv = []
if parent_id:
parent_node = get_object_or_404(
BaseTreeNode, id=parent_id
)
for item in parent_node.kv.all():
parent_kv.append(item.to_dict())
for node in nodes:
node_dict = node.to_dict()
if node.is_document():
node_dict['img_src'] = reverse(
'core:preview',
args=(node.id, 4, 1)
)
node_dict['document_url'] = reverse(
'core:document',
args=(node.id,)
)
nodes_list.append(node_dict)
return HttpResponse(
json.dumps(
{
'nodes': nodes_list,
'parent_id': parent_id,
'parent_kv': parent_kv
}
),
content_type="application/json"
)
@login_required
def breadcrumb_view(parent, parent_id=None):
nodes = []
node = None
try:
node = BaseTreeNode.objects.get(id=parent_id)
except BaseTreeNode.DoesNotExist:
pass
if node:
nodes = [
item.to_dict() for item in node.get_ancestors(include_self=True)
]
return HttpResponse(
json.dumps({
'nodes': nodes,
}),
content_type="application/json"
)
@login_required
def node_view(request, node_id):
try:
node = BaseTreeNode.objects.get(id=node_id)
except BaseTreeNode.DoesNotExist:
return HttpResponseBadRequest(
json.dumps({
'node': node.to_dict()
}),
content_type="application/json"
)
if request.method == "DELETE":
node.delete()
return HttpResponse(
json.dumps({
'msg': 'OK'
}),
content_type="application/json"
)
return HttpResponse(
json.dumps({
'node': node.to_dict()
}),
content_type="application/json"
)
@login_required
def nodes_view(request):
if request.method == "POST":
data = json.loads(request.body)
node_ids = [item['id'] for item in data]
queryset = BaseTreeNode.objects.filter(id__in=node_ids)
recursive_delete(queryset)
return HttpResponse(
json.dumps({
'msg': 'OK'
}),
content_type="application/json"
)
return HttpResponse(
json.dumps({
'msg': 'OK'
}),
content_type="application/json"
)
|
import os
import requests
from gdelt20utils.common import utils_worker
from gdelt20utils.common.gd_logger import logger_obj
HEARTBEAT_LOG_NUM = 10
WORKER_ERROR_MAX = 100
class ExtractorError(Exception):
pass
class FileExtractWorker():
def __init__(
self,
language,
timestamps=None,
path_gen=None,
check_point=None,
api_client=None,
obj_types=None):
self.language = language
self.obj_types = obj_types
self.api_client = api_client
self.path_gen = path_gen
self.check_point = check_point
self.jobq = utils_worker.JobQueue()
skip_processed = check_point.get_cnt()
for ts in timestamps[skip_processed:]:
self.jobq.put(ts)
self.api_request_cnt = 0
self.logger = logger_obj.logger
def extract_data_worker(self, queue=None, worker_num=None):
heartbeat_cnt = 0
worker_error = 0
self.logger.info(f"Worker {worker_num} started")
while True:
ts = queue.get()
for obj_type in self.obj_types:
save_path = self.path_gen.get_data_file_path(
self.language, obj_type, ts)
try:
if not os.path.exists(save_path):
res_code, url = self.api_client.save_file(self.language, ts, obj_type, save_path)
if res_code != requests.codes.ok:
if res_code == requests.codes.not_found:
self.logger.warning(f"Worker {worker_num}: requests error - {url} code 404")
else:
raise ExtractorError(
f"Worker {worker_num}: requests error - url {url}, code {res_code}")
else:
self.logger.info(f"Worker {worker_num}: {save_path} exists, skip")
except Exception as exp:
self.logger.error(f"Worker {worker_num} error: {exp}")
worker_error += 1
queue.task_done()
self.check_point.update_checkpoint(ts)
if worker_error >= WORKER_ERROR_MAX:
self.logger.info(f"Worker {worker_num}: max error acceded {worker_error}")
queue.lock_flush()
heartbeat_cnt += 1
if not heartbeat_cnt % HEARTBEAT_LOG_NUM:
self.logger.info(f"Worker {worker_num}: heartbeat {heartbeat_cnt}, last ts {ts}")
def run(self):
self.logger.info(f"Start extraction, concurrency {utils_worker.CONCURENCY_NUM}, stats {self.check_point}")
self.jobq.start(
concurrency=utils_worker.CONCURENCY_NUM,
worker=self.extract_data_worker
)
self.logger.info(f"Finish extraction, stats {self.check_point}")
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rtabmap_ros/NodeData.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import rtabmap_ros.msg
import geometry_msgs.msg
import std_msgs.msg
import sensor_msgs.msg
class NodeData(genpy.Message):
_md5sum = "9ea28782bcb5ac28722f26aaaa7acb18"
_type = "rtabmap_ros/NodeData"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
int32 id
int32 mapId
int32 weight
float64 stamp
string label
# Pose from odometry not corrected
geometry_msgs/Pose pose
# Ground truth (optional)
geometry_msgs/Pose groundTruthPose
# GPS (optional)
GPS gps
# compressed image in /camera_link frame
# use rtabmap::util3d::uncompressImage() from "rtabmap/core/util3d.h"
uint8[] image
# compressed depth image in /camera_link frame
# use rtabmap::util3d::uncompressImage() from "rtabmap/core/util3d.h"
uint8[] depth
# Camera models
float32[] fx
float32[] fy
float32[] cx
float32[] cy
float32[] width
float32[] height
float32 baseline
# local transform (/base_link -> /camera_link)
geometry_msgs/Transform[] localTransform
# compressed 2D laser scan in /base_link frame
# use rtabmap::util3d::uncompressData() from "rtabmap/core/util3d.h"
uint8[] laserScan
int32 laserScanMaxPts
float32 laserScanMaxRange
int32 laserScanFormat
geometry_msgs/Transform laserScanLocalTransform
# compressed user data
# use rtabmap::util3d::uncompressData() from "rtabmap/core/util3d.h"
uint8[] userData
# compressed occupancy grid
# use rtabmap::util3d::uncompressData() from "rtabmap/core/util3d.h"
uint8[] grid_ground
uint8[] grid_obstacles
uint8[] grid_empty_cells
float32 grid_cell_size
Point3f grid_view_point
# std::multimap<wordId, cv::Keypoint>
# std::multimap<wordId, pcl::PointXYZ>
int32[] wordIds
KeyPoint[] wordKpts
sensor_msgs/PointCloud2 wordPts
# compressed descriptors
# use rtabmap::util3d::uncompressData() from "rtabmap/core/util3d.h"
uint8[] descriptors
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: rtabmap_ros/GPS
float64 stamp # in seconds
float64 longitude # DD format
float64 latitude # DD format
float64 altitude # in meters
float64 error # in meters
float64 bearing # North 0->360 deg
================================================================================
MSG: geometry_msgs/Transform
# This represents the transform between two coordinate frames in free space.
Vector3 translation
Quaternion rotation
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: rtabmap_ros/Point3f
#class cv::Point3f
#{
# float x;
# float y;
# float z;
#}
float32 x
float32 y
float32 z
================================================================================
MSG: rtabmap_ros/KeyPoint
#class cv::KeyPoint
#{
# Point2f pt;
# float size;
# float angle;
# float response;
# int octave;
# int class_id;
#}
Point2f pt
float32 size
float32 angle
float32 response
int32 octave
int32 class_id
================================================================================
MSG: rtabmap_ros/Point2f
#class cv::Point2f
#{
# float x;
# float y;
#}
float32 x
float32 y
================================================================================
MSG: sensor_msgs/PointCloud2
# This message holds a collection of N-dimensional points, which may
# contain additional information such as normals, intensity, etc. The
# point data is stored as a binary blob, its layout described by the
# contents of the "fields" array.
# The point cloud data may be organized 2d (image-like) or 1d
# (unordered). Point clouds organized as 2d images may be produced by
# camera depth sensors such as stereo or time-of-flight.
# Time of sensor data acquisition, and the coordinate frame ID (for 3d
# points).
Header header
# 2D structure of the point cloud. If the cloud is unordered, height is
# 1 and width is the length of the point cloud.
uint32 height
uint32 width
# Describes the channels and their layout in the binary data blob.
PointField[] fields
bool is_bigendian # Is this data bigendian?
uint32 point_step # Length of a point in bytes
uint32 row_step # Length of a row in bytes
uint8[] data # Actual point data, size is (row_step*height)
bool is_dense # True if there are no invalid points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: sensor_msgs/PointField
# This message holds the description of one point entry in the
# PointCloud2 message format.
uint8 INT8 = 1
uint8 UINT8 = 2
uint8 INT16 = 3
uint8 UINT16 = 4
uint8 INT32 = 5
uint8 UINT32 = 6
uint8 FLOAT32 = 7
uint8 FLOAT64 = 8
string name # Name of field
uint32 offset # Offset from start of point struct
uint8 datatype # Datatype enumeration, see above
uint32 count # How many elements in the field
"""
__slots__ = ['id','mapId','weight','stamp','label','pose','groundTruthPose','gps','image','depth','fx','fy','cx','cy','width','height','baseline','localTransform','laserScan','laserScanMaxPts','laserScanMaxRange','laserScanFormat','laserScanLocalTransform','userData','grid_ground','grid_obstacles','grid_empty_cells','grid_cell_size','grid_view_point','wordIds','wordKpts','wordPts','descriptors']
_slot_types = ['int32','int32','int32','float64','string','geometry_msgs/Pose','geometry_msgs/Pose','rtabmap_ros/GPS','uint8[]','uint8[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32','geometry_msgs/Transform[]','uint8[]','int32','float32','int32','geometry_msgs/Transform','uint8[]','uint8[]','uint8[]','uint8[]','float32','rtabmap_ros/Point3f','int32[]','rtabmap_ros/KeyPoint[]','sensor_msgs/PointCloud2','uint8[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
id,mapId,weight,stamp,label,pose,groundTruthPose,gps,image,depth,fx,fy,cx,cy,width,height,baseline,localTransform,laserScan,laserScanMaxPts,laserScanMaxRange,laserScanFormat,laserScanLocalTransform,userData,grid_ground,grid_obstacles,grid_empty_cells,grid_cell_size,grid_view_point,wordIds,wordKpts,wordPts,descriptors
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(NodeData, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.id is None:
self.id = 0
if self.mapId is None:
self.mapId = 0
if self.weight is None:
self.weight = 0
if self.stamp is None:
self.stamp = 0.
if self.label is None:
self.label = ''
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
if self.groundTruthPose is None:
self.groundTruthPose = geometry_msgs.msg.Pose()
if self.gps is None:
self.gps = rtabmap_ros.msg.GPS()
if self.image is None:
self.image = b''
if self.depth is None:
self.depth = b''
if self.fx is None:
self.fx = []
if self.fy is None:
self.fy = []
if self.cx is None:
self.cx = []
if self.cy is None:
self.cy = []
if self.width is None:
self.width = []
if self.height is None:
self.height = []
if self.baseline is None:
self.baseline = 0.
if self.localTransform is None:
self.localTransform = []
if self.laserScan is None:
self.laserScan = b''
if self.laserScanMaxPts is None:
self.laserScanMaxPts = 0
if self.laserScanMaxRange is None:
self.laserScanMaxRange = 0.
if self.laserScanFormat is None:
self.laserScanFormat = 0
if self.laserScanLocalTransform is None:
self.laserScanLocalTransform = geometry_msgs.msg.Transform()
if self.userData is None:
self.userData = b''
if self.grid_ground is None:
self.grid_ground = b''
if self.grid_obstacles is None:
self.grid_obstacles = b''
if self.grid_empty_cells is None:
self.grid_empty_cells = b''
if self.grid_cell_size is None:
self.grid_cell_size = 0.
if self.grid_view_point is None:
self.grid_view_point = rtabmap_ros.msg.Point3f()
if self.wordIds is None:
self.wordIds = []
if self.wordKpts is None:
self.wordKpts = []
if self.wordPts is None:
self.wordPts = sensor_msgs.msg.PointCloud2()
if self.descriptors is None:
self.descriptors = b''
else:
self.id = 0
self.mapId = 0
self.weight = 0
self.stamp = 0.
self.label = ''
self.pose = geometry_msgs.msg.Pose()
self.groundTruthPose = geometry_msgs.msg.Pose()
self.gps = rtabmap_ros.msg.GPS()
self.image = b''
self.depth = b''
self.fx = []
self.fy = []
self.cx = []
self.cy = []
self.width = []
self.height = []
self.baseline = 0.
self.localTransform = []
self.laserScan = b''
self.laserScanMaxPts = 0
self.laserScanMaxRange = 0.
self.laserScanFormat = 0
self.laserScanLocalTransform = geometry_msgs.msg.Transform()
self.userData = b''
self.grid_ground = b''
self.grid_obstacles = b''
self.grid_empty_cells = b''
self.grid_cell_size = 0.
self.grid_view_point = rtabmap_ros.msg.Point3f()
self.wordIds = []
self.wordKpts = []
self.wordPts = sensor_msgs.msg.PointCloud2()
self.descriptors = b''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3id().pack(_x.id, _x.mapId, _x.weight, _x.stamp))
_x = self.label
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_20d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.groundTruthPose.position.x, _x.groundTruthPose.position.y, _x.groundTruthPose.position.z, _x.groundTruthPose.orientation.x, _x.groundTruthPose.orientation.y, _x.groundTruthPose.orientation.z, _x.groundTruthPose.orientation.w, _x.gps.stamp, _x.gps.longitude, _x.gps.latitude, _x.gps.altitude, _x.gps.error, _x.gps.bearing))
_x = self.image
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.depth
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.fx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.fx))
length = len(self.fy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.fy))
length = len(self.cx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.cx))
length = len(self.cy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.cy))
length = len(self.width)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.width))
length = len(self.height)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.height))
buff.write(_get_struct_f().pack(self.baseline))
length = len(self.localTransform)
buff.write(_struct_I.pack(length))
for val1 in self.localTransform:
_v1 = val1.translation
_x = _v1
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v2 = val1.rotation
_x = _v2
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = self.laserScan
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_ifi7d().pack(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat, _x.laserScanLocalTransform.translation.x, _x.laserScanLocalTransform.translation.y, _x.laserScanLocalTransform.translation.z, _x.laserScanLocalTransform.rotation.x, _x.laserScanLocalTransform.rotation.y, _x.laserScanLocalTransform.rotation.z, _x.laserScanLocalTransform.rotation.w))
_x = self.userData
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_ground
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_obstacles
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_empty_cells
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_4f().pack(_x.grid_cell_size, _x.grid_view_point.x, _x.grid_view_point.y, _x.grid_view_point.z))
length = len(self.wordIds)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.wordIds))
length = len(self.wordKpts)
buff.write(_struct_I.pack(length))
for val1 in self.wordKpts:
_v3 = val1.pt
_x = _v3
buff.write(_get_struct_2f().pack(_x.x, _x.y))
_x = val1
buff.write(_get_struct_3f2i().pack(_x.size, _x.angle, _x.response, _x.octave, _x.class_id))
_x = self
buff.write(_get_struct_3I().pack(_x.wordPts.header.seq, _x.wordPts.header.stamp.secs, _x.wordPts.header.stamp.nsecs))
_x = self.wordPts.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.wordPts.height, _x.wordPts.width))
length = len(self.wordPts.fields)
buff.write(_struct_I.pack(length))
for val1 in self.wordPts.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.wordPts.is_bigendian, _x.wordPts.point_step, _x.wordPts.row_step))
_x = self.wordPts.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.wordPts.is_dense))
_x = self.descriptors
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
if self.groundTruthPose is None:
self.groundTruthPose = geometry_msgs.msg.Pose()
if self.gps is None:
self.gps = rtabmap_ros.msg.GPS()
if self.localTransform is None:
self.localTransform = None
if self.laserScanLocalTransform is None:
self.laserScanLocalTransform = geometry_msgs.msg.Transform()
if self.grid_view_point is None:
self.grid_view_point = rtabmap_ros.msg.Point3f()
if self.wordKpts is None:
self.wordKpts = None
if self.wordPts is None:
self.wordPts = sensor_msgs.msg.PointCloud2()
end = 0
_x = self
start = end
end += 20
(_x.id, _x.mapId, _x.weight, _x.stamp,) = _get_struct_3id().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.label = str[start:end].decode('utf-8')
else:
self.label = str[start:end]
_x = self
start = end
end += 160
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.groundTruthPose.position.x, _x.groundTruthPose.position.y, _x.groundTruthPose.position.z, _x.groundTruthPose.orientation.x, _x.groundTruthPose.orientation.y, _x.groundTruthPose.orientation.z, _x.groundTruthPose.orientation.w, _x.gps.stamp, _x.gps.longitude, _x.gps.latitude, _x.gps.altitude, _x.gps.error, _x.gps.bearing,) = _get_struct_20d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.image = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.depth = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fx = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fy = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.cx = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.cy = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.width = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.height = struct.unpack(pattern, str[start:end])
start = end
end += 4
(self.baseline,) = _get_struct_f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.localTransform = []
for i in range(0, length):
val1 = geometry_msgs.msg.Transform()
_v4 = val1.translation
_x = _v4
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v5 = val1.rotation
_x = _v5
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.localTransform.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.laserScan = str[start:end]
_x = self
start = end
end += 68
(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat, _x.laserScanLocalTransform.translation.x, _x.laserScanLocalTransform.translation.y, _x.laserScanLocalTransform.translation.z, _x.laserScanLocalTransform.rotation.x, _x.laserScanLocalTransform.rotation.y, _x.laserScanLocalTransform.rotation.z, _x.laserScanLocalTransform.rotation.w,) = _get_struct_ifi7d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.userData = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_ground = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_obstacles = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_empty_cells = str[start:end]
_x = self
start = end
end += 16
(_x.grid_cell_size, _x.grid_view_point.x, _x.grid_view_point.y, _x.grid_view_point.z,) = _get_struct_4f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.wordIds = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wordKpts = []
for i in range(0, length):
val1 = rtabmap_ros.msg.KeyPoint()
_v6 = val1.pt
_x = _v6
start = end
end += 8
(_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])
_x = val1
start = end
end += 20
(_x.size, _x.angle, _x.response, _x.octave, _x.class_id,) = _get_struct_3f2i().unpack(str[start:end])
self.wordKpts.append(val1)
_x = self
start = end
end += 12
(_x.wordPts.header.seq, _x.wordPts.header.stamp.secs, _x.wordPts.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.wordPts.header.frame_id = str[start:end].decode('utf-8')
else:
self.wordPts.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.wordPts.height, _x.wordPts.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wordPts.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.wordPts.fields.append(val1)
_x = self
start = end
end += 9
(_x.wordPts.is_bigendian, _x.wordPts.point_step, _x.wordPts.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.wordPts.is_bigendian = bool(self.wordPts.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.wordPts.data = str[start:end]
start = end
end += 1
(self.wordPts.is_dense,) = _get_struct_B().unpack(str[start:end])
self.wordPts.is_dense = bool(self.wordPts.is_dense)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.descriptors = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3id().pack(_x.id, _x.mapId, _x.weight, _x.stamp))
_x = self.label
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_20d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.groundTruthPose.position.x, _x.groundTruthPose.position.y, _x.groundTruthPose.position.z, _x.groundTruthPose.orientation.x, _x.groundTruthPose.orientation.y, _x.groundTruthPose.orientation.z, _x.groundTruthPose.orientation.w, _x.gps.stamp, _x.gps.longitude, _x.gps.latitude, _x.gps.altitude, _x.gps.error, _x.gps.bearing))
_x = self.image
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.depth
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.fx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.fx.tostring())
length = len(self.fy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.fy.tostring())
length = len(self.cx)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.cx.tostring())
length = len(self.cy)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.cy.tostring())
length = len(self.width)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.width.tostring())
length = len(self.height)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.height.tostring())
buff.write(_get_struct_f().pack(self.baseline))
length = len(self.localTransform)
buff.write(_struct_I.pack(length))
for val1 in self.localTransform:
_v7 = val1.translation
_x = _v7
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v8 = val1.rotation
_x = _v8
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = self.laserScan
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_ifi7d().pack(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat, _x.laserScanLocalTransform.translation.x, _x.laserScanLocalTransform.translation.y, _x.laserScanLocalTransform.translation.z, _x.laserScanLocalTransform.rotation.x, _x.laserScanLocalTransform.rotation.y, _x.laserScanLocalTransform.rotation.z, _x.laserScanLocalTransform.rotation.w))
_x = self.userData
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_ground
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_obstacles
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.grid_empty_cells
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_4f().pack(_x.grid_cell_size, _x.grid_view_point.x, _x.grid_view_point.y, _x.grid_view_point.z))
length = len(self.wordIds)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.wordIds.tostring())
length = len(self.wordKpts)
buff.write(_struct_I.pack(length))
for val1 in self.wordKpts:
_v9 = val1.pt
_x = _v9
buff.write(_get_struct_2f().pack(_x.x, _x.y))
_x = val1
buff.write(_get_struct_3f2i().pack(_x.size, _x.angle, _x.response, _x.octave, _x.class_id))
_x = self
buff.write(_get_struct_3I().pack(_x.wordPts.header.seq, _x.wordPts.header.stamp.secs, _x.wordPts.header.stamp.nsecs))
_x = self.wordPts.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.wordPts.height, _x.wordPts.width))
length = len(self.wordPts.fields)
buff.write(_struct_I.pack(length))
for val1 in self.wordPts.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.wordPts.is_bigendian, _x.wordPts.point_step, _x.wordPts.row_step))
_x = self.wordPts.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.wordPts.is_dense))
_x = self.descriptors
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
if self.groundTruthPose is None:
self.groundTruthPose = geometry_msgs.msg.Pose()
if self.gps is None:
self.gps = rtabmap_ros.msg.GPS()
if self.localTransform is None:
self.localTransform = None
if self.laserScanLocalTransform is None:
self.laserScanLocalTransform = geometry_msgs.msg.Transform()
if self.grid_view_point is None:
self.grid_view_point = rtabmap_ros.msg.Point3f()
if self.wordKpts is None:
self.wordKpts = None
if self.wordPts is None:
self.wordPts = sensor_msgs.msg.PointCloud2()
end = 0
_x = self
start = end
end += 20
(_x.id, _x.mapId, _x.weight, _x.stamp,) = _get_struct_3id().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.label = str[start:end].decode('utf-8')
else:
self.label = str[start:end]
_x = self
start = end
end += 160
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.groundTruthPose.position.x, _x.groundTruthPose.position.y, _x.groundTruthPose.position.z, _x.groundTruthPose.orientation.x, _x.groundTruthPose.orientation.y, _x.groundTruthPose.orientation.z, _x.groundTruthPose.orientation.w, _x.gps.stamp, _x.gps.longitude, _x.gps.latitude, _x.gps.altitude, _x.gps.error, _x.gps.bearing,) = _get_struct_20d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.image = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.depth = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fx = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.fy = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.cx = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.cy = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.width = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.height = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(self.baseline,) = _get_struct_f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.localTransform = []
for i in range(0, length):
val1 = geometry_msgs.msg.Transform()
_v10 = val1.translation
_x = _v10
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v11 = val1.rotation
_x = _v11
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.localTransform.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.laserScan = str[start:end]
_x = self
start = end
end += 68
(_x.laserScanMaxPts, _x.laserScanMaxRange, _x.laserScanFormat, _x.laserScanLocalTransform.translation.x, _x.laserScanLocalTransform.translation.y, _x.laserScanLocalTransform.translation.z, _x.laserScanLocalTransform.rotation.x, _x.laserScanLocalTransform.rotation.y, _x.laserScanLocalTransform.rotation.z, _x.laserScanLocalTransform.rotation.w,) = _get_struct_ifi7d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.userData = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_ground = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_obstacles = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.grid_empty_cells = str[start:end]
_x = self
start = end
end += 16
(_x.grid_cell_size, _x.grid_view_point.x, _x.grid_view_point.y, _x.grid_view_point.z,) = _get_struct_4f().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.wordIds = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wordKpts = []
for i in range(0, length):
val1 = rtabmap_ros.msg.KeyPoint()
_v12 = val1.pt
_x = _v12
start = end
end += 8
(_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])
_x = val1
start = end
end += 20
(_x.size, _x.angle, _x.response, _x.octave, _x.class_id,) = _get_struct_3f2i().unpack(str[start:end])
self.wordKpts.append(val1)
_x = self
start = end
end += 12
(_x.wordPts.header.seq, _x.wordPts.header.stamp.secs, _x.wordPts.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.wordPts.header.frame_id = str[start:end].decode('utf-8')
else:
self.wordPts.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.wordPts.height, _x.wordPts.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wordPts.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.wordPts.fields.append(val1)
_x = self
start = end
end += 9
(_x.wordPts.is_bigendian, _x.wordPts.point_step, _x.wordPts.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.wordPts.is_bigendian = bool(self.wordPts.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.wordPts.data = str[start:end]
start = end
end += 1
(self.wordPts.is_dense,) = _get_struct_B().unpack(str[start:end])
self.wordPts.is_dense = bool(self.wordPts.is_dense)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.descriptors = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3f2i = None
def _get_struct_3f2i():
global _struct_3f2i
if _struct_3f2i is None:
_struct_3f2i = struct.Struct("<3f2i")
return _struct_3f2i
_struct_IBI = None
def _get_struct_IBI():
global _struct_IBI
if _struct_IBI is None:
_struct_IBI = struct.Struct("<IBI")
return _struct_IBI
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_ifi7d = None
def _get_struct_ifi7d():
global _struct_ifi7d
if _struct_ifi7d is None:
_struct_ifi7d = struct.Struct("<ifi7d")
return _struct_ifi7d
_struct_f = None
def _get_struct_f():
global _struct_f
if _struct_f is None:
_struct_f = struct.Struct("<f")
return _struct_f
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2f = None
def _get_struct_2f():
global _struct_2f
if _struct_2f is None:
_struct_2f = struct.Struct("<2f")
return _struct_2f
_struct_20d = None
def _get_struct_20d():
global _struct_20d
if _struct_20d is None:
_struct_20d = struct.Struct("<20d")
return _struct_20d
_struct_B2I = None
def _get_struct_B2I():
global _struct_B2I
if _struct_B2I is None:
_struct_B2I = struct.Struct("<B2I")
return _struct_B2I
_struct_3id = None
def _get_struct_3id():
global _struct_3id
if _struct_3id is None:
_struct_3id = struct.Struct("<3id")
return _struct_3id
_struct_4f = None
def _get_struct_4f():
global _struct_4f
if _struct_4f is None:
_struct_4f = struct.Struct("<4f")
return _struct_4f
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
|
import unittest
from support import lib,ffi
from qcgc_test import QCGCTest
class FitAllocatorTest(QCGCTest):
def test_initialization(self):
# self.assertEqual( <config_value> ,lib.arenas().size)
self.assertEqual(1, lib.arenas().count)
self.assertNotEqual(ffi.NULL, lib.arenas().items)
# self.assertEqual( <config_value> ,lib.free_arenas().size)
self.assertEqual(0, lib.free_arenas().count)
self.assertNotEqual(ffi.NULL, lib.free_arenas().items)
self.assertEqual(ffi.addressof(lib.arena_cells(lib.arenas().items[0])[lib.qcgc_arena_first_cell_index]), lib._qcgc_bump_allocator.ptr)
self.assertEqual(lib.qcgc_arena_cells_count - lib.qcgc_arena_first_cell_index, self.bump_remaining_cells())
for i in range(lib.qcgc_small_free_lists):
self.assertEqual(lib.QCGC_SMALL_FREE_LIST_INIT_SIZE, lib.small_free_list(i).size)
self.assertEqual(0, lib.small_free_list(i).count)
self.assertNotEqual(ffi.NULL, lib.small_free_list(i).items)
for i in range(lib.qcgc_large_free_lists):
self.assertEqual(lib.QCGC_LARGE_FREE_LIST_INIT_SIZE, lib.large_free_list(i).size)
self.assertEqual(0, lib.large_free_list(i).count)
self.assertNotEqual(ffi.NULL, lib.large_free_list(i).items)
def test_macro_consistency(self):
self.assertEqual(2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP, lib.qcgc_small_free_lists + 1)
last_exp = lib.QCGC_LARGE_FREE_LIST_FIRST_EXP + lib.qcgc_large_free_lists - 1
self.assertEqual(2**last_exp * 16, 2**lib.QCGC_LARGE_ALLOC_THRESHOLD_EXP)
def test_small_free_list_index(self):
for i in range(1, lib.qcgc_small_free_lists + 1):
self.assertTrue(lib.is_small(i))
self.assertEqual(lib.small_index(i), i - 1)
self.assertTrue(lib.small_index_to_cells(i - 1), i)
self.assertLess(lib.small_index(i), lib.qcgc_small_free_lists)
def test_large_free_list_index(self):
index = -1;
for i in range(2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP, 2**lib.QCGC_LARGE_ALLOC_THRESHOLD_EXP // 16):
if (i & (i - 1) == 0):
# Check for power of two
index = index + 1
self.assertFalse(lib.is_small(i))
self.assertEqual(index, lib.large_index(i))
self.assertLess(lib.large_index(i), lib.qcgc_large_free_lists)
def test_block_validity_check(self):
arena = lib.qcgc_arena_create()
first = ffi.addressof(lib.arena_cells(arena)[lib.qcgc_arena_first_cell_index])
self.assertTrue(lib.valid_block(first, lib.qcgc_arena_cells_count - lib.qcgc_arena_first_cell_index))
lib.qcgc_arena_mark_allocated(first, 10);
self.assertFalse(lib.valid_block(first, 10));
self.set_blocktype(first, lib.BLOCK_FREE);
self.assertTrue(lib.valid_block(first, 10));
self.assertFalse(lib.valid_block(first, 8));
self.assertFalse(lib.valid_block(first + 1, 9));
self.assertFalse(lib.valid_block(first + 1, 8));
def test_add_small(self):
blocks = list()
for i in range(1, lib.qcgc_small_free_lists + 1):
p = self.bump_allocate_cells(i)
lib.qcgc_arena_mark_free(p)
blocks.append(p)
lib.qcgc_fit_allocator_add(p, i)
for i in range(lib.qcgc_small_free_lists):
l = lib.small_free_list(i)
self.assertEqual(l.count, 1)
self.assertEqual(blocks[i], l.items[0])
def test_add_large(self):
blocks = list()
for i in range(lib.qcgc_large_free_lists):
size = 2**(i + lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
p = self.bump_allocate_cells(size)
lib.qcgc_arena_mark_free(p)
blocks.append(p)
lib.qcgc_fit_allocator_add(p, size)
for i in range(lib.qcgc_large_free_lists):
size = 2**(i + lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
l = lib.large_free_list(i)
self.assertEqual(l.count, 1)
self.assertEqual(blocks[i], l.items[0].ptr)
self.assertEqual(size, l.items[0].size)
def test_allocate_exact(self):
"Test allocate when there is always exactly the size needed"
# Small first fit
for i in range(1, lib.qcgc_small_free_lists + 1):
p = self.bump_allocate_cells(i)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, i)
q = self.fit_allocate(i)
self.assertEqual(p, q)
q = self.fit_allocate(i)
self.assertNotEqual(p, q)
# Large first fit
for i in range(lib.qcgc_large_free_lists):
size = 2**(i + lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
p = self.bump_allocate_cells(size)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, size)
q = self.fit_allocate(size)
self.assertEqual(p, q)
q = self.fit_allocate(size)
self.assertNotEqual(p, q)
def test_allocate_no_block(self):
"Test allocate when no block is available"
p = self.bump_allocate_cells(1)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, 1)
q = self.fit_allocate(2)
self.assertEqual(ffi.NULL, q)
def test_allocate_block_splitting(self):
"Test allocation when blocks have to be split"
# Small block
size = lib.qcgc_small_free_lists
p = self.bump_allocate_cells(size)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, size)
q = self.fit_allocate(1)
self.assertEqual(q, p)
q = self.fit_allocate(size - 1)
self.assertEqual(q, p + 1)
# Large block
size = 2**(1 + lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
p = self.bump_allocate_cells(size)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, size)
q = self.fit_allocate(1)
self.assertEqual(q, p)
q = self.fit_allocate(size - 2)
self.assertEqual(q, p + 1)
q = self.fit_allocate(1)
self.assertEqual(q, p + size - 1)
@unittest.skip("Free lists do not contain invalid blocks")
def test_allocate_coalesced_block(self):
"Test allocation when there are invalid blocks in the free lists"
# Small block
# coalesced area no 1
# ATOMIC! Invalidates internal invariant for short time
x = self.bump_allocate(16)
y = self.bump_allocate(16)
self.bump_allocate(16) # Prevent non-coalesced arena
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",x))
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",y))
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", x), 1)
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", y), 1)
self.set_blocktype(ffi.cast("cell_t *", y), lib.BLOCK_EXTENT)
# only valid block
p = self.bump_allocate_cells(1)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, 1)
# coalesced area no 2
# ATOMIC! Invalidates internal invariant for short time
x = self.bump_allocate(16)
y = self.bump_allocate(16)
self.bump_allocate(16) # Prevent non-coalesced arena
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",x))
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",y))
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", x), 1)
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", y), 1)
self.set_blocktype(ffi.cast("cell_t *", y), lib.BLOCK_EXTENT)
q = self.fit_allocate(1)
self.assertEqual(p, q)
# Large block
# coalesced area no 1
# ATOMIC! Invalidates internal invariant for short time
x = self.bump_allocate(16 * 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
y = self.bump_allocate(16 * 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
self.bump_allocate(16) # Prevent non-coalesced arena
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",x))
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",y))
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", x), 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", y), 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
self.set_blocktype(ffi.cast("cell_t *", y), lib.BLOCK_EXTENT)
# only valid block
p = self.bump_allocate_cells(2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
lib.qcgc_arena_mark_free(p)
lib.qcgc_fit_allocator_add(p, 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
# coalesced area no 2
# ATOMIC! Invalidates internal invariant for short time
x = self.bump_allocate(16 * 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
y = self.bump_allocate(16 * 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
self.bump_allocate(16) # Prevent non-coalesced arena
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",x))
lib.qcgc_arena_mark_free(ffi.cast("cell_t *",y))
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", x), 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
lib.qcgc_fit_allocator_add(ffi.cast("cell_t *", y), 2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
self.set_blocktype(ffi.cast("cell_t *", y), lib.BLOCK_EXTENT)
q = self.fit_allocate(2**lib.QCGC_LARGE_FREE_LIST_FIRST_EXP)
self.assertEqual(p, q)
def test_fit_allocate_no_double_entry(self):
roots = list()
x = self.bump_allocate(16 * 3)
roots.append(x)
self.bump_allocate(16 * 1)
roots.append(self.bump_allocate(16 * 1))
#
for r in roots:
self.push_root(r)
lib.bump_ptr_reset()
lib.qcgc_collect()
for _ in roots:
self.pop_root()
#
self.assertEqual(lib.small_free_list(0).count, 1)
#
del roots[0]
for r in roots:
self.push_root(r)
lib.qcgc_collect()
for _ in roots:
self.pop_root()
#
self.assertEqual(lib.small_free_list(3).count, 1)
#
y = lib.qcgc_fit_allocate(16 * 3) # Create double entry
self.assertEqual(lib.small_free_list(3).count, 0)
self.assertEqual(lib.small_free_list(0).count, 1)
self.assertEqual(x, y)
def fit_allocate(self, cells):
p = lib.qcgc_fit_allocate(cells * 16)
return ffi.cast("cell_t *", p)
def bump_allocate_cells(self, cells):
p = self.bump_allocate(cells * 16)
self.bump_allocate(16) # Prevent non-coalseced arena
return ffi.cast("cell_t *", p)
if __name__ == "__main__":
unittest.main()
|
__all__ = [
'app',
'clear_cache'
]
from . import *
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.command_modules.storage._command_type import cli_storage_data_plane_command
from azure.cli.command_modules.storage._factory import \
(storage_client_factory, blob_data_service_factory, file_data_service_factory,
table_data_service_factory, queue_data_service_factory, cloud_storage_account_service_factory)
from azure.cli.command_modules.storage._format import \
(transform_container_list, transform_container_show,
transform_blob_output,
transform_share_list,
transform_file_output,
transform_entity_show,
transform_message_show,
transform_boolean_for_table,
transform_file_directory_result)
from azure.cli.command_modules.storage._validators import \
(transform_acl_list_output, transform_cors_list_output, transform_entity_query_output,
transform_logging_list_output, transform_metrics_list_output,
transform_url, transform_storage_list_output, transform_container_permission_output,
create_boolean_result_output_transformer)
from azure.cli.core.commands import cli_command
# storage account commands
factory = lambda kwargs: storage_client_factory().storage_accounts # noqa: E731 lambda vs def
cli_command(__name__, 'storage account check-name', 'azure.mgmt.storage.operations.storage_accounts_operations#StorageAccountsOperations.check_name_availability', factory)
cli_command(__name__, 'storage account delete', 'azure.mgmt.storage.operations.storage_accounts_operations#StorageAccountsOperations.delete', factory, confirmation=True)
cli_command(__name__, 'storage account show', 'azure.mgmt.storage.operations.storage_accounts_operations#StorageAccountsOperations.get_properties', factory)
cli_command(__name__, 'storage account create', 'azure.cli.command_modules.storage.custom#create_storage_account')
cli_command(__name__, 'storage account list', 'azure.cli.command_modules.storage.custom#list_storage_accounts')
cli_command(__name__, 'storage account show-usage', 'azure.cli.command_modules.storage.custom#show_storage_account_usage')
cli_command(__name__, 'storage account update', 'azure.cli.command_modules.storage.custom#set_storage_account_properties')
cli_command(__name__, 'storage account show-connection-string', 'azure.cli.command_modules.storage.custom#show_storage_account_connection_string')
cli_command(__name__, 'storage account keys renew', 'azure.mgmt.storage.operations.storage_accounts_operations#StorageAccountsOperations.regenerate_key', factory, transform=lambda x: x.keys)
cli_command(__name__, 'storage account keys list', 'azure.mgmt.storage.operations.storage_accounts_operations#StorageAccountsOperations.list_keys', factory, transform=lambda x: x.keys)
cli_storage_data_plane_command('storage account generate-sas', 'azure.storage.cloudstorageaccount#CloudStorageAccount.generate_shared_access_signature', cloud_storage_account_service_factory)
# container commands
factory = blob_data_service_factory
cli_storage_data_plane_command('storage container list', 'azure.storage.blob.blockblobservice#BlockBlobService.list_containers', factory, transform=transform_storage_list_output, table_transformer=transform_container_list)
cli_storage_data_plane_command('storage container delete', 'azure.storage.blob.blockblobservice#BlockBlobService.delete_container', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage container show', 'azure.storage.blob.blockblobservice#BlockBlobService.get_container_properties', factory, table_transformer=transform_container_show)
cli_storage_data_plane_command('storage container create', 'azure.storage.blob.blockblobservice#BlockBlobService.create_container', factory, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage container generate-sas', 'azure.storage.blob.blockblobservice#BlockBlobService.generate_container_shared_access_signature', factory)
cli_storage_data_plane_command('storage container metadata update', 'azure.storage.blob.blockblobservice#BlockBlobService.set_container_metadata', factory)
cli_storage_data_plane_command('storage container metadata show', 'azure.storage.blob.blockblobservice#BlockBlobService.get_container_metadata', factory)
cli_storage_data_plane_command('storage container lease acquire', 'azure.storage.blob.blockblobservice#BlockBlobService.acquire_container_lease', factory)
cli_storage_data_plane_command('storage container lease renew', 'azure.storage.blob.blockblobservice#BlockBlobService.renew_container_lease', factory)
cli_storage_data_plane_command('storage container lease release', 'azure.storage.blob.blockblobservice#BlockBlobService.release_container_lease', factory)
cli_storage_data_plane_command('storage container lease change', 'azure.storage.blob.blockblobservice#BlockBlobService.change_container_lease', factory)
cli_storage_data_plane_command('storage container lease break', 'azure.storage.blob.blockblobservice#BlockBlobService.break_container_lease', factory)
cli_storage_data_plane_command('storage container exists', 'azure.storage.blob.baseblobservice#BaseBlobService.exists', factory, transform=create_boolean_result_output_transformer('exists'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage container set-permission', 'azure.storage.blob.baseblobservice#BaseBlobService.set_container_acl', factory)
cli_storage_data_plane_command('storage container show-permission', 'azure.storage.blob.baseblobservice#BaseBlobService.get_container_acl', factory, transform=transform_container_permission_output)
cli_storage_data_plane_command('storage container policy create', 'azure.cli.command_modules.storage.custom#create_acl_policy', factory)
cli_storage_data_plane_command('storage container policy delete', 'azure.cli.command_modules.storage.custom#delete_acl_policy', factory)
cli_storage_data_plane_command('storage container policy show', 'azure.cli.command_modules.storage.custom#get_acl_policy', factory)
cli_storage_data_plane_command('storage container policy list', 'azure.cli.command_modules.storage.custom#list_acl_policies', factory, table_transformer=transform_acl_list_output)
cli_storage_data_plane_command('storage container policy update', 'azure.cli.command_modules.storage.custom#set_acl_policy', factory)
# blob commands
cli_storage_data_plane_command('storage blob list', 'azure.storage.blob.blockblobservice#BlockBlobService.list_blobs', factory, transform=transform_storage_list_output, table_transformer=transform_blob_output)
cli_storage_data_plane_command('storage blob delete', 'azure.storage.blob.blockblobservice#BlockBlobService.delete_blob', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage blob generate-sas', 'azure.storage.blob.blockblobservice#BlockBlobService.generate_blob_shared_access_signature', factory)
cli_storage_data_plane_command('storage blob url', 'azure.storage.blob.blockblobservice#BlockBlobService.make_blob_url', factory, transform=transform_url)
cli_storage_data_plane_command('storage blob snapshot', 'azure.storage.blob.blockblobservice#BlockBlobService.snapshot_blob', factory)
cli_storage_data_plane_command('storage blob show', 'azure.storage.blob.blockblobservice#BlockBlobService.get_blob_properties', factory, table_transformer=transform_blob_output)
cli_storage_data_plane_command('storage blob update', 'azure.storage.blob.blockblobservice#BlockBlobService.set_blob_properties', factory)
cli_storage_data_plane_command('storage blob exists', 'azure.storage.blob.baseblobservice#BaseBlobService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage blob download', 'azure.storage.blob.baseblobservice#BaseBlobService.get_blob_to_path', factory)
cli_storage_data_plane_command('storage blob upload', 'azure.cli.command_modules.storage.custom#upload_blob', factory)
cli_storage_data_plane_command('storage blob metadata show', 'azure.storage.blob.blockblobservice#BlockBlobService.get_blob_metadata', factory)
cli_storage_data_plane_command('storage blob metadata update', 'azure.storage.blob.blockblobservice#BlockBlobService.set_blob_metadata', factory)
cli_storage_data_plane_command('storage blob service-properties show', 'azure.storage.blob.baseblobservice#BaseBlobService.get_blob_service_properties', factory)
cli_storage_data_plane_command('storage blob lease acquire', 'azure.storage.blob.blockblobservice#BlockBlobService.acquire_blob_lease', factory)
cli_storage_data_plane_command('storage blob lease renew', 'azure.storage.blob.blockblobservice#BlockBlobService.renew_blob_lease', factory)
cli_storage_data_plane_command('storage blob lease release', 'azure.storage.blob.blockblobservice#BlockBlobService.release_blob_lease', factory)
cli_storage_data_plane_command('storage blob lease change', 'azure.storage.blob.blockblobservice#BlockBlobService.change_blob_lease', factory)
cli_storage_data_plane_command('storage blob lease break', 'azure.storage.blob.blockblobservice#BlockBlobService.break_blob_lease', factory)
cli_storage_data_plane_command('storage blob copy start', 'azure.storage.blob.blockblobservice#BlockBlobService.copy_blob', factory)
cli_storage_data_plane_command('storage blob copy start-batch', 'azure.cli.command_modules.storage.blob#storage_blob_copy_batch', factory)
cli_storage_data_plane_command('storage blob copy cancel', 'azure.storage.blob.blockblobservice#BlockBlobService.abort_copy_blob', factory)
cli_storage_data_plane_command('storage blob upload-batch',
'azure.cli.command_modules.storage.blob#storage_blob_upload_batch',
factory)
cli_storage_data_plane_command('storage blob download-batch',
'azure.cli.command_modules.storage.blob#storage_blob_download_batch',
factory)
# share commands
factory = file_data_service_factory
cli_storage_data_plane_command('storage share list', 'azure.storage.file.fileservice#FileService.list_shares', factory, transform=transform_storage_list_output, table_transformer=transform_share_list)
cli_storage_data_plane_command('storage share create', 'azure.storage.file.fileservice#FileService.create_share', factory, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage share delete', 'azure.storage.file.fileservice#FileService.delete_share', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage share generate-sas', 'azure.storage.file.fileservice#FileService.generate_share_shared_access_signature', factory)
cli_storage_data_plane_command('storage share stats', 'azure.storage.file.fileservice#FileService.get_share_stats', factory)
cli_storage_data_plane_command('storage share show', 'azure.storage.file.fileservice#FileService.get_share_properties', factory)
cli_storage_data_plane_command('storage share update', 'azure.storage.file.fileservice#FileService.set_share_properties', factory)
cli_storage_data_plane_command('storage share metadata show', 'azure.storage.file.fileservice#FileService.get_share_metadata', factory)
cli_storage_data_plane_command('storage share metadata update', 'azure.storage.file.fileservice#FileService.set_share_metadata', factory)
cli_storage_data_plane_command('storage share exists', 'azure.storage.file.fileservice#FileService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage share policy create', 'azure.cli.command_modules.storage.custom#create_acl_policy', factory)
cli_storage_data_plane_command('storage share policy delete', 'azure.cli.command_modules.storage.custom#delete_acl_policy', factory)
cli_storage_data_plane_command('storage share policy show', 'azure.cli.command_modules.storage.custom#get_acl_policy', factory)
cli_storage_data_plane_command('storage share policy list', 'azure.cli.command_modules.storage.custom#list_acl_policies', factory, table_transformer=transform_acl_list_output)
cli_storage_data_plane_command('storage share policy update', 'azure.cli.command_modules.storage.custom#set_acl_policy', factory)
# directory commands
cli_storage_data_plane_command('storage directory create', 'azure.storage.file.fileservice#FileService.create_directory', factory, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage directory delete', 'azure.storage.file.fileservice#FileService.delete_directory', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage directory show', 'azure.storage.file.fileservice#FileService.get_directory_properties', factory, table_transformer=transform_file_output)
cli_storage_data_plane_command('storage directory list', 'azure.cli.command_modules.storage.custom#list_share_directories', factory, transform=transform_file_directory_result, table_transformer=transform_file_output)
cli_storage_data_plane_command('storage directory exists', 'azure.storage.file.fileservice#FileService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage directory metadata show', 'azure.storage.file.fileservice#FileService.get_directory_metadata', factory)
cli_storage_data_plane_command('storage directory metadata update', 'azure.storage.file.fileservice#FileService.set_directory_metadata', factory)
# file commands
cli_storage_data_plane_command('storage file list', 'azure.cli.command_modules.storage.custom#list_share_files', factory, transform=transform_file_directory_result, table_transformer=transform_file_output)
cli_storage_data_plane_command('storage file delete', 'azure.storage.file.fileservice#FileService.delete_file', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage file resize', 'azure.storage.file.fileservice#FileService.resize_file', factory)
cli_storage_data_plane_command('storage file url', 'azure.storage.file.fileservice#FileService.make_file_url', factory, transform=transform_url)
cli_storage_data_plane_command('storage file generate-sas', 'azure.storage.file.fileservice#FileService.generate_file_shared_access_signature', factory)
cli_storage_data_plane_command('storage file show', 'azure.storage.file.fileservice#FileService.get_file_properties', factory, table_transformer=transform_file_output)
cli_storage_data_plane_command('storage file update', 'azure.storage.file.fileservice#FileService.set_file_properties', factory)
cli_storage_data_plane_command('storage file exists', 'azure.storage.file.fileservice#FileService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage file download', 'azure.storage.file.fileservice#FileService.get_file_to_path', factory)
cli_storage_data_plane_command('storage file upload', 'azure.storage.file.fileservice#FileService.create_file_from_path', factory)
cli_storage_data_plane_command('storage file metadata show', 'azure.storage.file.fileservice#FileService.get_file_metadata', factory)
cli_storage_data_plane_command('storage file metadata update', 'azure.storage.file.fileservice#FileService.set_file_metadata', factory)
cli_storage_data_plane_command('storage file copy start', 'azure.storage.file.fileservice#FileService.copy_file', factory)
cli_storage_data_plane_command('storage file copy cancel', 'azure.storage.file.fileservice#FileService.abort_copy_file', factory)
cli_storage_data_plane_command('storage file upload-batch',
'azure.cli.command_modules.storage.file#storage_file_upload_batch',
factory)
cli_storage_data_plane_command('storage file download-batch',
'azure.cli.command_modules.storage.file#storage_file_download_batch',
factory)
cli_storage_data_plane_command('storage file copy start-batch',
'azure.cli.command_modules.storage.file#storage_file_copy_batch',
factory)
# table commands
factory = table_data_service_factory
cli_storage_data_plane_command('storage table generate-sas', 'azure.storage.table.tableservice#TableService.generate_table_shared_access_signature', factory)
cli_storage_data_plane_command('storage table stats', 'azure.storage.table.tableservice#TableService.get_table_service_stats', factory)
cli_storage_data_plane_command('storage table list', 'azure.storage.table.tableservice#TableService.list_tables', factory, transform=transform_storage_list_output)
cli_storage_data_plane_command('storage table create', 'azure.storage.table.tableservice#TableService.create_table', factory, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage table exists', 'azure.storage.table.tableservice#TableService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage table delete', 'azure.storage.table.tableservice#TableService.delete_table', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage table policy create', 'azure.cli.command_modules.storage.custom#create_acl_policy', factory)
cli_storage_data_plane_command('storage table policy delete', 'azure.cli.command_modules.storage.custom#delete_acl_policy', factory)
cli_storage_data_plane_command('storage table policy show', 'azure.cli.command_modules.storage.custom#get_acl_policy', factory)
cli_storage_data_plane_command('storage table policy list', 'azure.cli.command_modules.storage.custom#list_acl_policies', factory, table_transformer=transform_acl_list_output)
cli_storage_data_plane_command('storage table policy update', 'azure.cli.command_modules.storage.custom#set_acl_policy', factory)
# table entity commands
cli_storage_data_plane_command('storage entity query', 'azure.storage.table.tableservice#TableService.query_entities', factory, table_transformer=transform_entity_query_output)
cli_storage_data_plane_command('storage entity show', 'azure.storage.table.tableservice#TableService.get_entity', factory, table_transformer=transform_entity_show)
cli_storage_data_plane_command('storage entity insert', 'azure.cli.command_modules.storage.custom#insert_table_entity', factory)
cli_storage_data_plane_command('storage entity replace', 'azure.storage.table.tableservice#TableService.update_entity', factory)
cli_storage_data_plane_command('storage entity merge', 'azure.storage.table.tableservice#TableService.merge_entity', factory)
cli_storage_data_plane_command('storage entity delete', 'azure.storage.table.tableservice#TableService.delete_entity', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
# queue commands
factory = queue_data_service_factory
cli_storage_data_plane_command('storage queue generate-sas', 'azure.storage.queue.queueservice#QueueService.generate_queue_shared_access_signature', factory)
cli_storage_data_plane_command('storage queue stats', 'azure.storage.queue.queueservice#QueueService.get_queue_service_stats', factory)
cli_storage_data_plane_command('storage queue list', 'azure.storage.queue.queueservice#QueueService.list_queues', factory, transform=transform_storage_list_output)
cli_storage_data_plane_command('storage queue create', 'azure.storage.queue.queueservice#QueueService.create_queue', factory, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage queue delete', 'azure.storage.queue.queueservice#QueueService.delete_queue', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage queue metadata show', 'azure.storage.queue.queueservice#QueueService.get_queue_metadata', factory)
cli_storage_data_plane_command('storage queue metadata update', 'azure.storage.queue.queueservice#QueueService.set_queue_metadata', factory)
cli_storage_data_plane_command('storage queue exists', 'azure.storage.queue.queueservice#QueueService.exists', factory, transform=create_boolean_result_output_transformer('exists'))
cli_storage_data_plane_command('storage queue policy create', 'azure.cli.command_modules.storage.custom#create_acl_policy', factory)
cli_storage_data_plane_command('storage queue policy delete', 'azure.cli.command_modules.storage.custom#delete_acl_policy', factory)
cli_storage_data_plane_command('storage queue policy show', 'azure.cli.command_modules.storage.custom#get_acl_policy', factory)
cli_storage_data_plane_command('storage queue policy list', 'azure.cli.command_modules.storage.custom#list_acl_policies', factory, table_transformer=transform_acl_list_output)
cli_storage_data_plane_command('storage queue policy update', 'azure.cli.command_modules.storage.custom#set_acl_policy', factory)
# queue message commands
cli_storage_data_plane_command('storage message put', 'azure.storage.queue.queueservice#QueueService.put_message', factory)
cli_storage_data_plane_command('storage message get', 'azure.storage.queue.queueservice#QueueService.get_messages', factory, table_transformer=transform_message_show)
cli_storage_data_plane_command('storage message peek', 'azure.storage.queue.queueservice#QueueService.peek_messages', factory, table_transformer=transform_message_show)
cli_storage_data_plane_command('storage message delete', 'azure.storage.queue.queueservice#QueueService.delete_message', factory, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table)
cli_storage_data_plane_command('storage message clear', 'azure.storage.queue.queueservice#QueueService.clear_messages', factory)
cli_storage_data_plane_command('storage message update', 'azure.storage.queue.queueservice#QueueService.update_message', factory)
# cors commands
cli_storage_data_plane_command('storage cors list', 'azure.cli.command_modules.storage.custom#list_cors', None, transform=transform_cors_list_output)
cli_storage_data_plane_command('storage cors add', 'azure.cli.command_modules.storage.custom#add_cors', None)
cli_storage_data_plane_command('storage cors clear', 'azure.cli.command_modules.storage.custom#clear_cors', None)
# logging commands
cli_storage_data_plane_command('storage logging show', 'azure.cli.command_modules.storage.custom#get_logging', None, table_transformer=transform_logging_list_output)
cli_storage_data_plane_command('storage logging update', 'azure.cli.command_modules.storage.custom#set_logging', None)
# metrics commands
cli_storage_data_plane_command('storage metrics show', 'azure.cli.command_modules.storage.custom#get_metrics', None, table_transformer=transform_metrics_list_output)
cli_storage_data_plane_command('storage metrics update', 'azure.cli.command_modules.storage.custom#set_metrics', None)
|
import pickle
import random
import numpy as np
def load_counts(dataset):
fname = "{}_counts".format(dataset)
with open(fname, 'rb') as f:
loaded_cls_counts = pickle.load(f)
return loaded_cls_counts
def avg_cls_weights(dataset, num_classes):
all_class_freq = load_counts(dataset)
J = 40
keys = list(all_class_freq)
random.shuffle(keys)
averaging_weights = np.zeros((J, num_classes), dtype=np.float32)
for i in range(num_classes):
total_num_counts = 0
worker_class_counts = [0] * J
for j in range(J):
w = keys[j]
if i in all_class_freq[w].keys():
total_num_counts += all_class_freq[w][i]
worker_class_counts[j] = all_class_freq[w][i]
else:
total_num_counts += 0
worker_class_counts[j] = 0
averaging_weights[:, i] = worker_class_counts / total_num_counts
return averaging_weights, all_class_freq
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import functools
from hypothesis import strategies as hyst
from hypothesis import assume, given
from hypothesis.extra import numpy as hynp
import numpy as np
from scipy.stats._multivariate import random_correlation_gen
from pymor.analyticalproblems.functions import Function, ExpressionFunction, ConstantFunction
from pymor.core.config import config
from pymor.parameters.base import Mu
from pymor.vectorarrays.list import NumpyListVectorSpace
from pymor.vectorarrays.block import BlockVectorSpace
from pymor.vectorarrays.numpy import NumpyVectorSpace
if config.HAVE_FENICS:
import dolfin as df
from pymor.bindings.fenics import FenicsVectorSpace
if config.HAVE_DEALII:
from pymor_dealii.pymor.vectorarray import DealIIVectorSpace
if config.HAVE_DUNEGDT:
from pymor.bindings.dunegdt import DuneXTVectorSpace
if config.HAVE_NGSOLVE:
import ngsolve as ngs
import netgen.meshing as ngmsh
from pymor.bindings.ngsolve import NGSolveVectorSpace
# hypothesis will gladly fill all our RAM with vector arrays if it's not restricted.
MAX_VECTORARRAY_LENGTH = 102
hy_lengths = hyst.integers(min_value=0, max_value=MAX_VECTORARRAY_LENGTH)
# this is a legacy restriction, some tests will not work as expected when this is changed/unset
MAX_ARRAY_ELEMENT_ABSVALUE = 1
hy_float_array_elements = hyst.floats(allow_nan=False, allow_infinity=False,
min_value=-MAX_ARRAY_ELEMENT_ABSVALUE, max_value=MAX_ARRAY_ELEMENT_ABSVALUE)
# the magnitute restriction is also a legacy one
MAX_COMPLEX_MAGNITUDE = 2
hy_complex_array_elements = hyst.complex_numbers(allow_nan=False, allow_infinity=False,
max_magnitude=MAX_COMPLEX_MAGNITUDE)
hy_dtypes = hyst.sampled_from([np.float64, np.complex128])
@hyst.composite
def _hy_dims(draw, count, compatible):
dims = hyst.integers(min_value=0, max_value=34)
if compatible:
return draw(equal_tuples(dims, count))
dim_tuple = draw(hyst.tuples(*[dims for _ in range(count)]))
for d in range(1, count):
assume(dim_tuple[d] != dim_tuple[0])
return dim_tuple
def nothing(*args, **kwargs):
return hyst.nothing()
def _np_arrays(length, dim, dtype=None):
if dtype is None:
return hynp.arrays(dtype=np.float64, shape=(length, dim), elements=hy_float_array_elements) | \
hynp.arrays(dtype=np.complex128, shape=(length, dim), elements=hy_complex_array_elements)
if dtype is np.complex128:
return hynp.arrays(dtype=dtype, shape=(length, dim), elements=hy_complex_array_elements)
if dtype is np.float64:
return hynp.arrays(dtype=dtype, shape=(length, dim), elements=hy_float_array_elements)
raise RuntimeError(f'unsupported dtype={dtype}')
def _numpy_vector_spaces(draw, np_data_list, compatible, count, dims):
return [(NumpyVectorSpace(d), ar) for d, ar in zip(dims, np_data_list)]
def _numpy_list_vector_spaces(draw, np_data_list, compatible, count, dims):
return [(NumpyListVectorSpace(d), ar) for d, ar in zip(dims, np_data_list)]
def _block_vector_spaces(draw, np_data_list, compatible, count, dims):
ret = []
rr = draw(hyst.randoms())
def _block_dims(d):
bd = []
while d > 1:
block_size = rr.randint(1, d)
bd.append(block_size)
d -= block_size
if d > 0:
bd.append(d)
return bd
for c, (d, ar) in enumerate(zip(dims, np_data_list)):
# only redraw after initial for (potentially) incompatible arrays
if c == 0 or (not compatible and c > 0):
block_dims = _block_dims(d)
constituent_spaces = [NumpyVectorSpace(dim) for dim in block_dims]
# TODO this needs to be relaxed again
assume(len(constituent_spaces))
ret.append((BlockVectorSpace(constituent_spaces), ar))
return ret
_other_vector_space_types = []
if config.HAVE_FENICS:
_FENICS_spaces = {}
def _fenics_vector_spaces(draw, np_data_list, compatible, count, dims):
ret = []
for d, ar in zip(dims, np_data_list):
assume(d > 1)
if d not in _FENICS_spaces:
_FENICS_spaces[d] = FenicsVectorSpace(df.FunctionSpace(df.UnitIntervalMesh(d - 1), 'Lagrange', 1))
ret.append((_FENICS_spaces[d], ar))
return ret
_other_vector_space_types.append('fenics')
if config.HAVE_NGSOLVE:
_NGSOLVE_spaces = {}
def _create_ngsolve_space(dim):
if dim not in _NGSOLVE_spaces:
mesh = ngmsh.Mesh(dim=1)
if dim > 0:
pids = []
for i in range(dim + 1):
pids.append(mesh.Add(ngmsh.MeshPoint(ngmsh.Pnt(i / dim, 0, 0))))
for i in range(dim):
mesh.Add(ngmsh.Element1D([pids[i], pids[i + 1]], index=1))
_NGSOLVE_spaces[dim] = NGSolveVectorSpace(ngs.L2(ngs.Mesh(mesh), order=0))
return _NGSOLVE_spaces[dim]
def _ngsolve_vector_spaces(draw, np_data_list, compatible, count, dims):
return [(_create_ngsolve_space(d), ar) for d, ar in zip(dims, np_data_list)]
_other_vector_space_types.append('ngsolve')
if config.HAVE_DEALII:
def _dealii_vector_spaces(draw, np_data_list, compatible, count, dims):
return [(DealIIVectorSpace(d), ar) for d, ar in zip(dims, np_data_list)]
_other_vector_space_types.append('dealii')
if config.HAVE_DUNEGDT:
def _dunegdt_vector_spaces(draw, np_data_list, compatible, count, dims):
return [(DuneXTVectorSpace(d), ar) for d, ar in zip(dims, np_data_list)]
_other_vector_space_types.append('dunegdt')
_picklable_vector_space_types = ['numpy', 'numpy_list', 'block']
@hyst.composite
def vector_arrays(draw, space_types, count=1, dtype=None, length=None, compatible=True):
dims = draw(_hy_dims(count, compatible))
dtype = dtype or draw(hy_dtypes)
lngs = draw(length or hyst.tuples(*[hy_lengths for _ in range(count)]))
np_data_list = [draw(_np_arrays(l, dim, dtype=dtype)) for l, dim in zip(lngs, dims)]
space_type = draw(hyst.sampled_from(space_types))
space_data = globals()[f'_{space_type}_vector_spaces'](draw, np_data_list, compatible, count, dims)
ret = [sp.from_numpy(d) for sp, d in space_data]
assume(len(ret))
if len(ret) == 1:
assert count == 1
# in test funcs where we only need one array this saves a line to access the single list
# element
return ret[0]
assert count > 1
return ret
def given_vector_arrays(which='all', count=1, dtype=None, length=None, compatible=True, index_strategy=None, **kwargs):
"""This decorator hides the combination details of given
the decorated function will be first wrapped in a |hypothesis.given| (with expanded `given_args`
and then in |pytest.mark.parametrize| with selected implementation names. The decorated test
function must still draw (which a vector_arrays or similar strategy) from the `data` argument in
the default case.
Parameters
----------
which
A list of implementation shortnames, or either of the special values "all" and "picklable".
kwargs
passed to `given` decorator as is, use for additional strategies
count
how many vector arrays to return (in a list), count=1 is special cased to just return the
array
dtype
dtype of the foundational numpy data the vector array is constructed from
length
a hypothesis.strategy how many vectors to generate in each vector array
compatible
if count > 1, this switch toggles generation of vector_arrays with compatible `dim`,
`length` and `dtype`
"""
@functools.wraps(given)
def inner_backend_decorator(func):
try:
use_imps = {'all': _picklable_vector_space_types + _other_vector_space_types,
'picklable': _picklable_vector_space_types}[which]
except KeyError:
use_imps = which
first_args = {}
if index_strategy:
arr_ind_strategy = index_strategy(vector_arrays(
count=count, dtype=dtype, length=length, compatible=compatible, space_types=use_imps))
first_args['vectors_and_indices'] = arr_ind_strategy
else:
arr_strategy = vector_arrays(count=count, dtype=dtype, length=length, compatible=compatible,
space_types=use_imps)
if count > 1:
first_args['vector_arrays'] = arr_strategy
else:
first_args['vector_array'] = arr_strategy
return given(**first_args, **kwargs)(func)
return inner_backend_decorator
# TODO match st_valid_inds results to this
def valid_inds(v, length=None, random_module=None):
if length is None:
yield []
yield slice(None)
yield slice(0, len(v))
yield slice(0, 0)
yield slice(-3)
yield slice(0, len(v), 3)
yield slice(0, len(v)//2, 2)
yield list(range(-len(v), len(v)))
yield list(range(int(len(v)/2)))
yield list(range(len(v))) * 2
# TODO what's with the magic number here?
length = 32
if len(v) > 0:
for ind in [-len(v), 0, len(v) - 1]:
yield ind
if len(v) == length:
yield slice(None)
# this avoids managing random state "against" hypothesis when this function is used in a
# strategy
if random_module is None:
np.random.seed(len(v) * length)
yield list(np.random.randint(-len(v), len(v), size=length))
else:
if len(v) == 0:
yield slice(0, 0)
yield []
@hyst.composite
def valid_indices(draw, array_strategy, length=None):
v = draw(array_strategy)
ints = hyst.integers(min_value=-len(v), max_value=max(len(v)-1, 0))
indices = hyst.nothing()
if length is None:
indices = indices | hyst.just([]) | hyst.lists(ints, max_size=2*len(v))
else:
indices = indices | hyst.slices(length)
if len(v) > 0:
inds = [-len(v), 0, len(v) - 1]
if len(v) == length:
inds.append(slice(None))
indices = indices | hyst.lists(ints, max_size=length)
else:
inds = []
if len(v) == 0:
inds.append(slice(0, 0))
indices = indices | hyst.sampled_from(inds)
return v, draw(indices)
# TODO match st_valid_inds_of_same_length results to this
def valid_inds_of_same_length(v1, v2, random_module=None):
if len(v1) == len(v2):
yield slice(None), slice(None)
yield list(range(len(v1))), list(range(len(v1)))
yield (slice(0, len(v1)),) * 2
yield (slice(0, 0),) * 2
yield (slice(-3),) * 2
yield (slice(0, len(v1), 3),) * 2
yield (slice(0, len(v1)//2, 2),) * 2
yield [], []
if len(v1) > 0 and len(v2) > 0:
yield 0, 0
yield len(v1) - 1, len(v2) - 1
yield -len(v1), -len(v2)
yield [0], 0
yield (list(range(min(len(v1), len(v2))//2)),) * 2
# this avoids managing random state "against" hypothesis when this function is used in a
# strategy
if random_module is None:
np.random.seed(len(v1) * len(v2))
for count in np.linspace(0, min(len(v1), len(v2)), 3).astype(int):
yield (list(np.random.randint(-len(v1), len(v1), size=count)),
list(np.random.randint(-len(v2), len(v2), size=count)))
yield slice(None), np.random.randint(-len(v2), len(v2), size=len(v1))
yield np.random.randint(-len(v1), len(v1), size=len(v2)), slice(None)
@hyst.composite
def st_valid_inds_of_same_length(draw, v1, v2):
len1, len2 = len(v1), len(v2)
ret = hyst.just(([], []))
# TODO we should include integer arrays here by chaining
# `| hynp.integer_array_indices(shape=(LEN_X,))`
if len1 == len2:
ints = hyst.integers(min_value=-len1, max_value=max(len1 - 1, 0))
slicer = hyst.slices(len1) | hyst.lists(ints, max_size=len1)
ret = ret | hyst.tuples(hyst.shared(slicer, key="st_valid_inds_of_same_length"),
hyst.shared(slicer, key="st_valid_inds_of_same_length"))
if len1 > 0 and len2 > 0:
mlen = min(len1, len2)
ints = hyst.integers(min_value=-mlen, max_value=max(mlen - 1, 0))
slicer = hyst.slices(mlen) | ints | hyst.lists(ints, max_size=mlen)
ret = ret | hyst.tuples(hyst.shared(slicer, key="st_valid_inds_of_same_length_uneven"),
hyst.shared(slicer, key="st_valid_inds_of_same_length_uneven"))
return draw(ret)
@hyst.composite
def st_scaling_value(draw, v1, v2=None):
v1 = draw(v1)
ints = hyst.integers(min_value=-1, max_value=23)
r1 = draw(ints | hyst.just(np.arange(len(v1))))
if v2:
v2 = draw(v2)
return v1, v2, r1, draw(ints | hyst.just(np.arange(len(v2))))
return v1, r1
# TODO match st_valid_inds_of_different_length results to this
def valid_inds_of_different_length(v1, v2, random_module):
# note this potentially yields no result at all for dual 0 length inputs
if len(v1) != len(v2):
yield slice(None), slice(None)
yield list(range(len(v1))), list(range(len(v2)))
if len(v1) > 0 and len(v2) > 0:
if len(v1) > 1:
yield [0, 1], 0
yield [0, 1], [0]
yield [-1, 0, 1], [0]
yield slice(0, -1), []
if len(v2) > 1:
yield 0, [0, 1]
yield [0], [0, 1]
# this avoids managing random state "against" hypothesis when this function is used in a
# strategy
if random_module is None:
np.random.seed(len(v1) * len(v2))
for count1 in np.linspace(0, len(v1), 3).astype(int):
count2 = np.random.randint(0, len(v2))
if count2 == count1:
count2 += 1
if count2 == len(v2):
count2 -= 2
if count2 >= 0:
yield (list(np.random.randint(-len(v1), len(v1), size=count1)),
list(np.random.randint(-len(v2), len(v2), size=count2)))
@hyst.composite
def st_valid_inds_of_different_length(draw, v1, v2):
def _filter(x):
a, b = x
a_type, b_type = type(a), type(b)
if a_type != b_type:
return True # tuple + scalar index
if a_type == tuple:
return len(a) != len(b)
return False # both scalars => not of different length
len1, len2 = len(v1), len(v2)
# TODO we should include integer arrays here
val1 = hyst.slices(len1) # | hynp.integer_array_indices(shape=(len1,))
val2 = hyst.slices(len2) # | hynp.integer_array_indices(shape=(len1,))
ret = hyst.tuples(val1, val2).filter(_filter)
return draw(ret)
@hyst.composite
def st_valid_inds_of_same_or_different_length(draw, v1, v2):
return draw(st_valid_inds_of_same_length(v1, v2) | st_valid_inds_of_different_length(v1, v2))
@hyst.composite
def same_and_different_length(draw, array_strategy):
v = draw(array_strategy)
if isinstance(v, list):
return v, draw(st_valid_inds_of_same_or_different_length(*v))
return v, draw(st_valid_inds_of_same_or_different_length(v, v))
@hyst.composite
def pairs_same_length(draw, array_strategy):
v = draw(array_strategy)
if isinstance(v, list):
return v, draw(st_valid_inds_of_same_length(*v))
return v, draw(st_valid_inds_of_same_length(v, v))
@hyst.composite
def pairs_diff_length(draw, array_strategy):
v = draw(array_strategy)
if isinstance(v, list):
ind_list = draw(st_valid_inds_of_different_length(*v))
else:
ind_list = draw(st_valid_inds_of_different_length(v, v))
# the consuming tests do not work for None as index
assume(len(ind_list))
return v, ind_list
@hyst.composite
def pairs_both_lengths(draw, array_strategy):
return draw(hyst.one_of(pairs_same_length(array_strategy), pairs_diff_length(array_strategy)))
@hyst.composite
def invalid_indices(draw, array_strategy):
length = 42
v = draw(array_strategy)
assert not isinstance(v, list)
invalid_inds = (None, len(v), [len(v)], -len(v) - 1, [-len(v) - 1], [0, len(v)],
[-len(v) - 1] + [0, ] * (length - 1), list(range(length - 1)) + [len(v)])
return v, draw(hyst.sampled_from(invalid_inds))
@hyst.composite
def base_vector_arrays(draw, count=1, dtype=None, max_dim=100):
"""Strategy to generate linear independent |VectorArray| inputs for test functions
Parameters
----------
draw hypothesis control function object
count how many bases do you want
dtype dtype for the generated bases, defaults to `np.float_`
max_dim size limit for the generated
Returns
-------
a list of |VectorArray| linear-independent objects of same dim and length
"""
dtype = dtype or np.float_
# simplest way currently of getting a |VectorSpace| to construct our new arrays from
space_types = _picklable_vector_space_types + _other_vector_space_types
space = draw(vector_arrays(count=1, dtype=dtype, length=hyst.just((1,)), compatible=True, space_types=space_types)
.filter(lambda x: x[0].space.dim > 0 and x[0].space.dim < max_dim)).space
length = space.dim
# this lets hypothesis control np's random state too
random = draw(hyst.random_module())
# scipy performs this check although technically numpy accepts a different range
assume(0 <= random.seed < 2**32 - 1)
random_correlation = random_correlation_gen(random.seed)
def _eigs():
"""Sum must equal to `length` for the scipy construct method"""
min_eig, max_eig = 0.001, 1.
eigs = np.asarray((max_eig-min_eig)*np.random.random(length-1) + min_eig, dtype=float)
return np.append(eigs, [length - np.sum(eigs)])
if length > 1:
mat = [random_correlation.rvs(_eigs(), tol=1e-12) for _ in range(count)]
return [space.from_numpy(m) for m in mat]
else:
scalar = 4*np.random.random((1, 1))+0.1
return [space.from_numpy(scalar) for _ in range(count)]
@hyst.composite
def equal_tuples(draw, strategy, count):
val = draw(strategy)
return draw(hyst.tuples(*[hyst.just(val) for _ in range(count)]))
# stick to a few representative examples to avoid only seeing degenerate cases
# in the selected examples
mus = hyst.dictionaries(
keys=hyst.sampled_from(['t', 'foo', 'bar']),
values=hyst.sampled_from([
np.array([1.]),
np.array([1., 32., 3]),
ExpressionFunction('x+1', 1),
ExpressionFunction('[1., 0] * x + [0, 1.] * x**2', 1),
ConstantFunction(np.array([1., 2, 3]))
])
).filter(lambda mu: 't' not in mu or (not isinstance(mu['t'], Function) and len(mu['t']) == 1)).map(Mu)
|
########################################################################
# Author: NhaiHai Phan, Han Hu
# License: Apache 2.0
# source code snippets from: Tensorflow
########################################################################
'''
Loss function of SecureSGD
'''
import tensorflow as tf
def lossDPSGD(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.argmax(tf.cast(labels, tf.int64), 1)
print(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Intent']
class Intent(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
default_response_platforms: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
events: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
input_context_names: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
is_fallback: Optional[pulumi.Input[bool]] = None,
ml_disabled: Optional[pulumi.Input[bool]] = None,
parent_followup_intent_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[float]] = None,
project: Optional[pulumi.Input[str]] = None,
reset_contexts: Optional[pulumi.Input[bool]] = None,
webhook_state: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a Dialogflow intent. Intents convert a number of user expressions or patterns into an action. An action
is an extraction of a user command or sentence semantics.
To get more information about Intent, see:
* [API documentation](https://cloud.google.com/dialogflow/docs/reference/rest/v2/projects.agent.intents)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dialogflow/docs/)
## Example Usage
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The name of the action associated with the intent.
Note: The action name must not contain whitespaces.
:param pulumi.Input[List[pulumi.Input[str]]] default_response_platforms: The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED
(i.e. default platform).
Each value may be one of `FACEBOOK`, `SLACK`, `TELEGRAM`, `KIK`, `SKYPE`, `LINE`, `VIBER`, `ACTIONS_ON_GOOGLE`, and `GOOGLE_HANGOUTS`.
:param pulumi.Input[str] display_name: The name of this intent to be displayed on the console.
:param pulumi.Input[List[pulumi.Input[str]]] events: The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of
the contexts must be present in the active user session for an event to trigger this intent. See the
[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.
:param pulumi.Input[List[pulumi.Input[str]]] input_context_names: The list of context names required for this intent to be triggered.
Format: projects/<Project ID>/agent/sessions/-/contexts/<Context ID>.
:param pulumi.Input[bool] is_fallback: Indicates whether this is a fallback intent.
:param pulumi.Input[bool] ml_disabled: Indicates whether Machine Learning is disabled for the intent.
Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML
ONLY match mode. Also, auto-markup in the UI is turned off.
:param pulumi.Input[str] parent_followup_intent_name: The unique identifier of the parent intent in the chain of followup intents.
Format: projects/<Project ID>/agent/intents/<Intent ID>.
:param pulumi.Input[float] priority: The priority of this intent. Higher numbers represent higher priorities.
- If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds
to the Normal priority in the console.
- If the supplied value is negative, the intent is ignored in runtime detect intent requests.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[bool] reset_contexts: Indicates whether to delete all contexts in the current session when this intent is matched.
:param pulumi.Input[str] webhook_state: Indicates whether webhooks are enabled for the intent.
* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent.
* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot
filling prompt is forwarded to the webhook.
Possible values are `WEBHOOK_STATE_ENABLED` and `WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['action'] = action
__props__['default_response_platforms'] = default_response_platforms
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['events'] = events
__props__['input_context_names'] = input_context_names
__props__['is_fallback'] = is_fallback
__props__['ml_disabled'] = ml_disabled
__props__['parent_followup_intent_name'] = parent_followup_intent_name
__props__['priority'] = priority
__props__['project'] = project
__props__['reset_contexts'] = reset_contexts
__props__['webhook_state'] = webhook_state
__props__['followup_intent_infos'] = None
__props__['name'] = None
__props__['root_followup_intent_name'] = None
super(Intent, __self__).__init__(
'gcp:diagflow/intent:Intent',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
default_response_platforms: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
events: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
followup_intent_infos: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['IntentFollowupIntentInfoArgs']]]]] = None,
input_context_names: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
is_fallback: Optional[pulumi.Input[bool]] = None,
ml_disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_followup_intent_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[float]] = None,
project: Optional[pulumi.Input[str]] = None,
reset_contexts: Optional[pulumi.Input[bool]] = None,
root_followup_intent_name: Optional[pulumi.Input[str]] = None,
webhook_state: Optional[pulumi.Input[str]] = None) -> 'Intent':
"""
Get an existing Intent resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The name of the action associated with the intent.
Note: The action name must not contain whitespaces.
:param pulumi.Input[List[pulumi.Input[str]]] default_response_platforms: The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED
(i.e. default platform).
Each value may be one of `FACEBOOK`, `SLACK`, `TELEGRAM`, `KIK`, `SKYPE`, `LINE`, `VIBER`, `ACTIONS_ON_GOOGLE`, and `GOOGLE_HANGOUTS`.
:param pulumi.Input[str] display_name: The name of this intent to be displayed on the console.
:param pulumi.Input[List[pulumi.Input[str]]] events: The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of
the contexts must be present in the active user session for an event to trigger this intent. See the
[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['IntentFollowupIntentInfoArgs']]]] followup_intent_infos: Information about all followup intents that have this intent as a direct or indirect parent. We populate this field only
in the output.
:param pulumi.Input[List[pulumi.Input[str]]] input_context_names: The list of context names required for this intent to be triggered.
Format: projects/<Project ID>/agent/sessions/-/contexts/<Context ID>.
:param pulumi.Input[bool] is_fallback: Indicates whether this is a fallback intent.
:param pulumi.Input[bool] ml_disabled: Indicates whether Machine Learning is disabled for the intent.
Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML
ONLY match mode. Also, auto-markup in the UI is turned off.
:param pulumi.Input[str] name: The unique identifier of this intent. Format: projects/<Project ID>/agent/intents/<Intent ID>.
:param pulumi.Input[str] parent_followup_intent_name: The unique identifier of the parent intent in the chain of followup intents.
Format: projects/<Project ID>/agent/intents/<Intent ID>.
:param pulumi.Input[float] priority: The priority of this intent. Higher numbers represent higher priorities.
- If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds
to the Normal priority in the console.
- If the supplied value is negative, the intent is ignored in runtime detect intent requests.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[bool] reset_contexts: Indicates whether to delete all contexts in the current session when this intent is matched.
:param pulumi.Input[str] root_followup_intent_name: The unique identifier of the root intent in the chain of followup intents. It identifies the correct followup intents
chain for this intent. Format: projects/<Project ID>/agent/intents/<Intent ID>.
:param pulumi.Input[str] webhook_state: Indicates whether webhooks are enabled for the intent.
* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent.
* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot
filling prompt is forwarded to the webhook.
Possible values are `WEBHOOK_STATE_ENABLED` and `WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["action"] = action
__props__["default_response_platforms"] = default_response_platforms
__props__["display_name"] = display_name
__props__["events"] = events
__props__["followup_intent_infos"] = followup_intent_infos
__props__["input_context_names"] = input_context_names
__props__["is_fallback"] = is_fallback
__props__["ml_disabled"] = ml_disabled
__props__["name"] = name
__props__["parent_followup_intent_name"] = parent_followup_intent_name
__props__["priority"] = priority
__props__["project"] = project
__props__["reset_contexts"] = reset_contexts
__props__["root_followup_intent_name"] = root_followup_intent_name
__props__["webhook_state"] = webhook_state
return Intent(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[str]:
"""
The name of the action associated with the intent.
Note: The action name must not contain whitespaces.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="defaultResponsePlatforms")
def default_response_platforms(self) -> pulumi.Output[Optional[List[str]]]:
"""
The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED
(i.e. default platform).
Each value may be one of `FACEBOOK`, `SLACK`, `TELEGRAM`, `KIK`, `SKYPE`, `LINE`, `VIBER`, `ACTIONS_ON_GOOGLE`, and `GOOGLE_HANGOUTS`.
"""
return pulumi.get(self, "default_response_platforms")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The name of this intent to be displayed on the console.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def events(self) -> pulumi.Output[Optional[List[str]]]:
"""
The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of
the contexts must be present in the active user session for an event to trigger this intent. See the
[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.
"""
return pulumi.get(self, "events")
@property
@pulumi.getter(name="followupIntentInfos")
def followup_intent_infos(self) -> pulumi.Output[List['outputs.IntentFollowupIntentInfo']]:
"""
Information about all followup intents that have this intent as a direct or indirect parent. We populate this field only
in the output.
"""
return pulumi.get(self, "followup_intent_infos")
@property
@pulumi.getter(name="inputContextNames")
def input_context_names(self) -> pulumi.Output[Optional[List[str]]]:
"""
The list of context names required for this intent to be triggered.
Format: projects/<Project ID>/agent/sessions/-/contexts/<Context ID>.
"""
return pulumi.get(self, "input_context_names")
@property
@pulumi.getter(name="isFallback")
def is_fallback(self) -> pulumi.Output[bool]:
"""
Indicates whether this is a fallback intent.
"""
return pulumi.get(self, "is_fallback")
@property
@pulumi.getter(name="mlDisabled")
def ml_disabled(self) -> pulumi.Output[bool]:
"""
Indicates whether Machine Learning is disabled for the intent.
Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML
ONLY match mode. Also, auto-markup in the UI is turned off.
"""
return pulumi.get(self, "ml_disabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The unique identifier of this intent. Format: projects/<Project ID>/agent/intents/<Intent ID>.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parentFollowupIntentName")
def parent_followup_intent_name(self) -> pulumi.Output[str]:
"""
The unique identifier of the parent intent in the chain of followup intents.
Format: projects/<Project ID>/agent/intents/<Intent ID>.
"""
return pulumi.get(self, "parent_followup_intent_name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[float]:
"""
The priority of this intent. Higher numbers represent higher priorities.
- If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds
to the Normal priority in the console.
- If the supplied value is negative, the intent is ignored in runtime detect intent requests.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="resetContexts")
def reset_contexts(self) -> pulumi.Output[bool]:
"""
Indicates whether to delete all contexts in the current session when this intent is matched.
"""
return pulumi.get(self, "reset_contexts")
@property
@pulumi.getter(name="rootFollowupIntentName")
def root_followup_intent_name(self) -> pulumi.Output[str]:
"""
The unique identifier of the root intent in the chain of followup intents. It identifies the correct followup intents
chain for this intent. Format: projects/<Project ID>/agent/intents/<Intent ID>.
"""
return pulumi.get(self, "root_followup_intent_name")
@property
@pulumi.getter(name="webhookState")
def webhook_state(self) -> pulumi.Output[str]:
"""
Indicates whether webhooks are enabled for the intent.
* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent.
* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot
filling prompt is forwarded to the webhook.
Possible values are `WEBHOOK_STATE_ENABLED` and `WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING`.
"""
return pulumi.get(self, "webhook_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from django.apps import AppConfig
class DeviceConfig(AppConfig):
name = 'backend.device'
def ready(self):
import backend.device.signal_handlers # noqa
|
#!/usr/bin/env python3
##########################################################################################
# Author: Jared L. Ostmeyer
# Date Started: 2017-01-01 (This is my new year's resolution)
# Purpose: Train recurrent neural network
# License: For legal information see LICENSE in the home directory.
##########################################################################################
##########################################################################################
# Libraries
##########################################################################################
import os
import numpy as np
import tensorflow as tf
import dataplumbing as dp
##########################################################################################
# Settings
##########################################################################################
# Model settings
#
num_features = dp.train.num_features
max_steps = dp.train.max_length
num_cells = 250
num_classes = dp.train.num_classes
activation = tf.nn.tanh
initialization_factor = 1.0
# Training parameters
#
num_iterations = 20000
batch_size = 100
learning_rate = 0.001
##########################################################################################
# Model
##########################################################################################
# Inputs
#
x = tf.placeholder(tf.float32, [batch_size, max_steps, num_features]) # Features
l = tf.placeholder(tf.int32, [batch_size]) # Sequence length
y = tf.placeholder(tf.float32, [batch_size]) # Labels
# Trainable parameters
#
s = tf.Variable(tf.random_normal([num_cells], stddev=np.sqrt(initialization_factor))) # Determines initial state
W_g = tf.Variable(
tf.random_uniform(
[num_features+num_cells, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells))
)
)
b_g = tf.Variable(tf.zeros([num_cells]))
W_u = tf.Variable(
tf.random_uniform(
[num_features, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+num_cells))
)
)
b_u = tf.Variable(tf.zeros([num_cells]))
W_a = tf.Variable(
tf.random_uniform(
[num_features+num_cells, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells))
)
)
W_o = tf.Variable(
tf.random_uniform(
[num_cells, num_classes],
minval=-np.sqrt(6.0*initialization_factor/(num_cells+num_classes)),
maxval=np.sqrt(6.0*initialization_factor/(num_cells+num_classes))
)
)
b_o = tf.Variable(tf.zeros([num_classes]))
# Internal states
#
n = tf.zeros([batch_size, num_cells])
d = tf.zeros([batch_size, num_cells])
h = tf.zeros([batch_size, num_cells])
a_max = tf.fill([batch_size, num_cells], -1E38) # Start off with lowest number possible
# Define model
#
h += activation(tf.expand_dims(s, 0))
for i in range(max_steps):
x_step = x[:,i,:]
xh_join = tf.concat(axis=1, values=[x_step, h]) # Combine the features and hidden state into one tensor
u = tf.matmul(x_step, W_u)+b_u
g = tf.matmul(xh_join, W_g)+b_g
a = tf.matmul(xh_join, W_a) # The bias term when factored out of the numerator and denominator cancels and is unnecessary
z = tf.multiply(u, tf.nn.tanh(g))
a_newmax = tf.maximum(a_max, a)
exp_diff = tf.exp(a_max-a_newmax)
exp_scaled = tf.exp(a-a_newmax)
n = tf.multiply(n, exp_diff)+tf.multiply(z, exp_scaled) # Numerically stable update of numerator
d = tf.multiply(d, exp_diff)+exp_scaled # Numerically stable update of denominator
h_new = activation(tf.div(n, d))
a_max = a_newmax
h = tf.where(tf.greater(l, i), h_new, h) # Use new hidden state only if the sequence length has not been exceeded
ly = tf.matmul(h, W_o)+b_o
ly_flat = tf.reshape(ly, [batch_size])
py = tf.nn.sigmoid(ly_flat)
##########################################################################################
# Optimizer/Analyzer
##########################################################################################
# Cost function and optimizer
#
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=ly_flat, labels=y)) # Cross-entropy cost function
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Evaluate performance
#
correct = tf.equal(tf.round(py), tf.round(y))
accuracy = 100.0*tf.reduce_mean(tf.cast(correct, tf.float32))
##########################################################################################
# Train
##########################################################################################
# Operation to initialize session
#
initializer = tf.global_variables_initializer()
# Open session
#
with tf.Session() as session:
# Initialize variables
#
session.run(initializer)
# Each training session represents one batch
#
for iteration in range(num_iterations):
# Grab a batch of training data
#
xs, ls, ys = dp.train.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Update parameters
#
out = session.run((cost, accuracy, optimizer), feed_dict=feed)
print('Iteration:', iteration, 'Dataset:', 'train', 'Cost:', out[0]/np.log(2.0), 'Accuracy:', out[1])
# Periodically run model on test data
#
if iteration%100 == 0:
# Grab a batch of test data
#
xs, ls, ys = dp.test.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Run model
#
out = session.run((cost, accuracy), feed_dict=feed)
print('Iteration:', iteration, 'Dataset:', 'test', 'Cost:', out[0]/np.log(2.0), 'Accuracy:', out[1])
# Save the trained model
#
os.makedirs('bin', exist_ok=True)
saver = tf.train.Saver()
saver.save(session, 'bin/train.ckpt')
|
import os
import re
import sys
import numpy as np
import nltk
import xml.etree.ElementTree as ET
from nltk.tokenize import TweetTokenizer
tknzr = TweetTokenizer()
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from nltk.corpus import stopwords
from collections import Counter
import collections
import time
from time import mktime
from datetime import datetime
from collections import defaultdict
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items()
if len(locs)>1)
def add_id_hours(IDs,timezone):
print (IDs[0])
sumTimezone=int()
for j in range(1, len(IDs), 1):
sumTimezone = sumTimezone + timezone[int(IDs[j])]
return sumTimezone
def check_id(IDs,dataset1):
print (IDs[0])
sumbags=Counter()
for j in range(1, len(IDs), 1):
sumbags = sumbags+dataset1[int(IDs[j])]
return sumbags
def depression_diagnosed(ID, risk_train):
for i in range (0,len(risk_train),1):
if ID==risk_train[i][0]:
a=risk_train[i][1]
return ID,a
def total_posts(ID, Var_train):
for i in range (0,len(Var_train),1):
if ID==Var_train[i][0]:
a=Var_train[i][1]
return ID,a
def convAlph2Num(sent):
alphArray = list(string.ascii_lowercase)
alphSet = set(alphArray)
sentArray = list(sent.lower())
x = ''
for u in sentArray:
if u in alphSet:
u = alphArray.index(u) + 1
u=str(u)
x=x+u
return x
import time
from time import mktime
from datetime import datetime
def getTimeCat(time1):
# extract time categories
xml_str = ET.tostring(time1).decode()
b=xml_str[18]+xml_str[19];
b=int(b)
#print (b)
if (b>=0 and b<8):
timecat = 1 #night
elif (b>=8 and b<16):
timecat = 2 #working hours
elif (b>=16 and b<24):
timecat = 3 #evening
return timecat
|
#!/usr/bin/python3
import _mysql
import db_config
try:
conn = _mysql.connect(db_config.db_host,db_config.db_user,db_config.db_password,db_config.db_name)
except Exception as e:
print(e)
#-------------------------------------------------DROP------------------------------------------------------------
print("\n--------------------------------------DROP-payee_list---------------------------------------")
query_drop_table = "DROP TABLE payee_list"
try:
print("Query: ",query_drop_table)
conn.query(query_drop_table)
print("Result: Successfully dropped table.")
except Exception as e:
print(e)
#-------------------------------------------------CREATE------------------------------------------------------------
print("\n--------------------------------------CREATE-payee_list---------------------------------------")
query_create_payee_list = '''CREATE TABLE payee_list (
id INT NOT NULL AUTO_INCREMENT,
payee_name VARCHAR(45) NOT NULL,
owner_account INT NOT NULL,
payee_account INT NOT NULL,
payee_bank VARCHAR(45) NOT NULL,
payee_branch VARCHAR(45) NOT NULL,
payee_ifsc_code VARCHAR(10) NOT NULL,
PRIMARY KEY (id));'''
try:
print("Query: ",query_create_payee_list)
conn.query(query_create_payee_list)
print("Result: Successfully executed.")
except Exception as e:
print("Error occured!");
print(e)
#---------------------------------------------------INSERT----------------------------------------------------------
print("\n--------------------------------------INSERT-payee_list---------------------------------------")
try:
sql_insert1 = "INSERT INTO payee_list(payee_name,owner_account,payee_account,payee_bank,payee_branch,payee_ifsc_code)"
sql_insert1 += " VALUES('Anthony', 20001, 20002, 'PNB', 'Panjab','PNB123456')"
conn.query(sql_insert1)
sql_insert2 = "INSERT INTO payee_list(payee_name,owner_account,payee_account,payee_bank,payee_branch,payee_ifsc_code)"
sql_insert2 += " VALUES('Chandan', 20002, 20001, 'SBI', 'Bangalore','SBI123456')"
conn.query(sql_insert2)
print("Result: Successfully inserted.")
except Exception as e:
print("Insert error!")
print(e)
print("\n--------------------------------------DONE---------------------------------------")
conn.close()
|
from django.shortcuts import render
from .models import Student
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def student_view(request, id):
print(type(id))
obj = Student.objects.get(StudentID=id)
context = {
"object" : obj
}
return render(request, "student/detstu.html", context)
|
import json
import argparse
from detectron2.data.build import (
get_detection_dataset_dicts,
)
def generate(args):
datasets = []
for d in args.datasets.split(','):
if d == '':
continue
datasets.append(d)
dataset_dicts = get_detection_dataset_dicts(datasets)
print('len of ditaset:',len(dataset_dicts))
try:
with open(args.random_file,'r') as f:
dic=json.load(f)
except:
dic={}
dic[str(args.random_percent)] = {}
seeds = [int(i) for i in args.random_seeds.split(',')]
for i in range(10):
arr = generate_supervised_seed(
dataset_dicts,
args.random_percent,
seeds[i]
)
print(len(arr))
dic[str(args.random_percent)][str(i)] = arr
with open(args.random_file,'w') as f:
f.write(json.dumps(dic))
def generate_supervised_seed(
dataset_dicts, SupPercent, seed
):
num_all = len(dataset_dicts)
num_label = int(SupPercent / 100.0 * num_all)
arr = range(num_all)
import random
random.seed(seed)
return random.sample(arr,num_label)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate random data partitions')
parser.add_argument("--random-file",type=str,default='dataseed/COCO_supervision.txt')
parser.add_argument("--random-percent",type=float,default=10.0)
parser.add_argument("--datasets",type=str,default='coco_2017_train,') # default='voc_2007_trainval,voc_2012_trainval,'
parser.add_argument("--random-seeds",type=str,default="0,1,2,3,4,5,6,7,8,9") # Need to set 10 random number seeds for experiments, divided by ','
args = parser.parse_args()
generate(args)
|
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, TemplateView
from rdmo.core.exports import XMLResponse
from rdmo.core.utils import get_model_field_meta, render_to_format
from rdmo.core.views import CSRFViewMixin, ModelPermissionMixin
from .models import Condition
from .renderers import ConditionRenderer
from .serializers.export import ConditionExportSerializer
log = logging.getLogger(__name__)
class ConditionsView(ModelPermissionMixin, CSRFViewMixin, TemplateView):
template_name = 'conditions/conditions.html'
permission_required = 'conditions.view_condition'
def get_context_data(self, **kwargs):
context = super(ConditionsView, self).get_context_data(**kwargs)
context['export_formats'] = settings.EXPORT_FORMATS
context['meta'] = {
'Condition': get_model_field_meta(Condition)
}
return context
class ConditionsExportView(ModelPermissionMixin, ListView):
model = Condition
context_object_name = 'conditions'
permission_required = 'conditions.view_condition'
def render_to_response(self, context, **response_kwargs):
format = self.kwargs.get('format')
if format == 'xml':
serializer = ConditionExportSerializer(context['conditions'], many=True)
xml = ConditionRenderer().render(serializer.data)
return XMLResponse(xml, name='conditions')
else:
return render_to_format(self.request, format, _('Conditions'), 'conditions/conditions_export.html', context)
|
from ads import Ads, Project, Service, ServiceSet, Profile, BadSelectorException
|
import json
from matchbook.utils import check_status_code, check_call_complete
class BaseEndpoint(object):
def __init__(self, parent):
"""
:param parent: API client.
"""
self.client = parent
def request(self, request_method, urn, method, params={}, data={}, target=None, session=None):
"""
:param request_method: type of request to be sent.
:param urn: matchbook urn to append to url specified.
:param method: Matchbook method to be used.
:param params: Params to be used in request.
:param data: data to be sent in request body.
:param target: target to get from returned data, if none returns full response.
:param session: Requests session to be used, reduces latency.
"""
session = session or self.client.session
data['session-token'] = self.client.session_token
data['user-id'] = self.client.user_id
request_url = '%s%s%s' % (self.client.url, urn, method)
response = session.request(
request_method, request_url, params=params, data=json.dumps(data), headers=self.client.headers
)
check_status_code(response)
if ('per-page' in params.keys()) and target:
resp_data = response.json().get(target, [])
while not check_call_complete(response.json()):
params['offset'] += response.json().get('total', 0) + 1
response = session.request(
request_method, request_url, params=params, data=json.dumps(data), headers=self.client.headers
)
resp_data += response.json().get(target, [])
return resp_data
else:
return response
@staticmethod
def process_response(response_json, resource, date_time_sent, date_time_received=None):
"""
:param response_json: Response in json format
:param resource: Resource data structure
:param date_time_sent: Date time sent
:param date_time_received: Date time received response from request
"""
if isinstance(response_json, list):
return [
resource(date_time_sent=date_time_sent, TIMESTAMP=date_time_received.strftime('%Y-%m-%d %H:%M:%S.%f'),
**x).json() for x in response_json]
else:
response_result = response_json.get('result', response_json)
if isinstance(response_result, list):
return [resource(date_time_sent=date_time_sent,
TIMESTAMP=date_time_received.strftime('%Y-%m-%d %H:%M:%S.%f'),
**x).json() for x in response_result]
else:
return resource(date_time_sent=date_time_sent,
TIMESTAMP=date_time_received.strftime('%Y-%m-%d %H:%M:%S.%f'),
**response_result).json()
|
import numpy as np
import open3d as o3d
from .line_mesh import LineMesh
from polylidar.polylidarutil import COLOR_PALETTE
from polylidar import MatrixDouble, MatrixInt, create_tri_mesh_copy
EXTRINSICS = None
MAX_POLYS = 10
ORANGE = (255 / 255, 188 / 255, 0)
GREEN = (0, 255 / 255, 0)
def open_3d_mesh_to_trimesh(mesh: o3d.geometry.TriangleMesh):
triangles = np.asarray(mesh.triangles)
vertices = np.asarray(mesh.vertices)
triangles = np.ascontiguousarray(triangles)
vertices_mat = MatrixDouble(vertices)
triangles_mat = MatrixInt(triangles)
triangles_mat_np = np.asarray(triangles_mat)
tri_mesh = create_tri_mesh_copy(vertices_mat, triangles_mat)
return tri_mesh
def create_open_3d_mesh_from_tri_mesh(tri_mesh):
"""Create an Open3D Mesh given a Polylidar TriMesh"""
triangles = np.asarray(tri_mesh.triangles)
vertices = np.asarray(tri_mesh.vertices)
triangle_normals = np.asarray(tri_mesh.triangle_normals)
return create_open_3d_mesh(triangles, vertices, triangle_normals, counter_clock_wise=tri_mesh.counter_clock_wise)
def create_open_3d_mesh(triangles, points, triangle_normals=None, color=COLOR_PALETTE[0], counter_clock_wise=True):
"""Create an Open3D Mesh given triangles vertices
Arguments:
triangles {ndarray} -- Triangles array
points {ndarray} -- Points array
Keyword Arguments:
color {list} -- RGB COlor (default: {[1, 0, 0]})
Returns:
mesh -- Open3D Mesh
"""
mesh_o3d = o3d.geometry.TriangleMesh()
if points.ndim == 1:
points = points.reshape((int(points.shape[0] / 3), 3))
if triangles.ndim == 1:
triangles = triangles.reshape((int(triangles.shape[0] / 3), 3))
# Open 3D expects triangles to be counter clockwise
if not counter_clock_wise:
triangles = np.ascontiguousarray(np.flip(triangles, 1))
mesh_o3d.triangles = o3d.utility.Vector3iVector(triangles)
mask = np.isnan(points).any(axis=1) # I think that we need this with open3d 0.10.0
points[mask, :] = [0,0,0]
mesh_o3d.vertices = o3d.utility.Vector3dVector(points)
if triangle_normals is None:
mesh_o3d.compute_vertex_normals()
mesh_o3d.compute_triangle_normals()
elif triangle_normals.ndim == 1:
triangle_normals_ = triangle_normals.reshape((int(triangle_normals.shape[0] / 3), 3))
mesh_o3d.triangle_normals = o3d.utility.Vector3dVector(triangle_normals_)
else:
mesh_o3d.triangle_normals = o3d.utility.Vector3dVector(triangle_normals)
mesh_o3d.paint_uniform_color(color)
mesh_o3d.compute_vertex_normals()
return mesh_o3d
def flatten(l): return [item for sublist in l for item in sublist]
def update_points(pcd, pc):
pcd.points = o3d.utility.Vector3dVector(pc)
def set_line(line_set, points, lines, colors):
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
def construct_grid(size=10, n=10, color=[0.5, 0.5, 0.5], plane='xy', plane_offset=-1, translate=[0, 0, 0]):
grid_ls = o3d.geometry.LineSet()
my_grid = make_grid(size=size, n=n, color=color, plane=plane, plane_offset=plane_offset, translate=translate)
set_line(grid_ls, *my_grid)
return grid_ls
def make_grid(size=10, n=10, color=[0.5, 0.5, 0.5], plane='xy', plane_offset=-1, translate=[0, 0, 0]):
"""draw a grid as a line set"""
# lineset = o3d.geometry.LineSet()
s = size / float(n)
s2 = 0.5 * size
points = []
for i in range(0, n + 1):
x = -s2 + i * s
points.append([x, -s2, plane_offset])
points.append([x, s2, plane_offset])
for i in range(0, n + 1):
z = -s2 + i * s
points.append([-s2, z, plane_offset])
points.append([s2, z, plane_offset])
points = np.array(points)
if plane == 'xz':
points[:, [2, 1]] = points[:, [1, 2]]
points = points + translate
n_points = points.shape[0]
lines = [[i, i + 1] for i in range(0, n_points - 1, 2)]
colors = [list(color)] * (n_points - 1)
return points, lines, colors
def clear_polys(all_polys, vis):
for line_mesh in all_polys:
line_mesh.remove_line(vis)
return []
def handle_shapes(vis, planes, obstacles, all_polys, line_radius=0.15):
all_polys = clear_polys(all_polys, vis)
for plane, _ in planes:
points = np.array(plane.exterior)
line_mesh = LineMesh(points, colors=GREEN, radius=line_radius)
line_mesh.add_line(vis)
all_polys.append(line_mesh)
for plane, _ in obstacles:
points = np.array(plane.exterior)
line_mesh = LineMesh(points, colors=ORANGE, radius=line_radius)
line_mesh.add_line(vis)
all_polys.append(line_mesh)
return all_polys
def create_lines(planes, obstacles, line_radius=0.15, rotate_func=None):
all_polys = []
for plane, _ in planes:
points = np.array(plane.exterior)
if rotate_func:
points = rotate_func(points)
line_mesh = LineMesh(points, colors=GREEN, radius=line_radius)
all_polys.append(line_mesh)
for plane, _ in obstacles:
points = np.array(plane.exterior)
if rotate_func:
points = rotate_func(points)
line_mesh = LineMesh(points, colors=ORANGE, radius=line_radius)
all_polys.append(line_mesh)
return all_polys
def get_extrinsics(vis):
ctr = vis.get_view_control()
camera_params = ctr.convert_to_pinhole_camera_parameters()
return camera_params.extrinsic
def set_initial_view(vis, extrinsics=[EXTRINSICS]):
ctr = vis.get_view_control()
camera_params = ctr.convert_to_pinhole_camera_parameters()
camera_params.extrinsic = extrinsics
ctr.convert_from_pinhole_camera_parameters(camera_params)
|
import csv
import os
import sys
def make_rename_dict(rewrite_path):
rename_mapping = {}
for line in open(rewrite_path, encoding='utf-16'):
try:
k, v = line.split(" was replaced by ")
rename_mapping[k[5:]] = v.strip()
except ValueError:
continue
print('{} roles renamed'.format(len(rename_mapping)))
return rename_mapping
def update_roles(role_path, rename_dict, new_path=None):
if not new_path:
new_path = role_path
updated = 0
tmp = 'roles.tmp'
reader = csv.DictReader(open(role_path), dialect='excel-tab')
writer = csv.DictWriter(open(tmp, 'w'), reader.fieldnames, dialect='excel-tab')
writer.writeheader()
for row in reader:
if row['name'] in rename_dict:
updated += 1
if row['aliases'] == 'null':
row['aliases'] = rename_dict[row['name']]
else:
row['aliases'] += ";" + rename_dict[row['name']]
writer.writerow(row)
print("{} role aliases updated".format(updated))
os.rename(tmp, new_path)
if __name__ == "__main__":
roles_path = os.path.dirname(sys.argv[0])+"/../../Annotations/Roles.tsv"
role_rewrite_path = sys.argv[1]
rename_dict = make_rename_dict(role_rewrite_path)
update_roles(roles_path, rename_dict)
|
"""
This script wraps the execution of console program so that a prompt appear
after the end of the console program (so that the console does not close as
soon as the user program finished).
Usage:
heconsole program [options]
Example:
$ heconsole python /path/to/a/script.py --spam eggs
$ ...
$ The process terminated with exit code 0.
$ Press a key to close this window...
$
"""
import sys
import subprocess
def main():
""" heconsole main entrypoint """
ret = 0
if '--help' in sys.argv or '-h' in sys.argv or len(sys.argv) == 1:
print(__doc__)
else:
program = sys.argv[1]
args = sys.argv[2:]
try:
if args:
ret = subprocess.call([program] + args)
else:
ret = subprocess.call([program])
except (OSError, subprocess.CalledProcessError) as e:
print('failed to start process: program = %r, args = %r. '
'Error=%s' % (program, args, str(e)))
print('\nProcess terminated with exit code %d' % ret)
prompt = 'Press ENTER to close this window...'
input(prompt)
sys.exit(ret)
if __name__ == '__main__':
main()
|
import pytz
import dateutil.relativedelta as relativedelta
import dateutil.rrule as rrule
from datetime import datetime, timedelta
from django.utils import timezone
from django.utils.text import slugify
from django.core.management.base import BaseCommand
from events.models import Event, Location
class Command(BaseCommand):
help = 'Create all PYATL events for a calndar year.'
TWITCH_LOCATION_NAME = 'PyATL Official Twitch Channel'
TWITCH_LOCATION_DESCRIPTION_HTML = '''<p>
The event will be live streamed through the official
<a href="https://twitch.com/pyatl">PyATL Twitch Channel</a>.
</p>
'''
DISCORD_LOCATION_NAME = 'PyATL Official Discord'
DISCORD_LOCATION_DESCRIPTION_HTML = '''<p>
The event will be live on our Jam Session Discord channel. Join us at the <a href="https://discord.gg/5UBnR3P">PyATL Discord here.</a>
</p>
'''
TWITCH_EVENT_NAME_CODING_LIVE_STREAM = 'Live coding stream on PyATL\'s official Twitch channel'
TWITCH_EVENT_SHORT_DESCRIPTION = 'Twitch streaming session on the official PyATL Twitch channell.'
TWITCH_EVENT_DESCRIPTION_HTML = '''
<p>
Coding live streams about Python and related technologies.
You can also watch the event after it is finished by visiting
our official Twitch PyATL channel.
</p>
<p>
The event will be live streamed through the official
<a href="https://twitch.com/pyatl">PyATL Twitch Channel</a>.
</p>
<p>
<iframe
src="https://player.twitch.tv/?channel=pyatl&parent=pyatl.dev"
height="360"
width="640"
frameborder="0"
scrolling="no"
allowfullscreen="true">
</iframe>
</p>
'''
JAM_SESSION_EVENT_NAME = 'PyATL Jam Session'
JAM_SESSION_EVENT_SHORT_DESCRIPTION = 'Jam Session meetings are intended to be hands-on, collaborative experiences with your fellow PyATL members.'
JAM_SESSION_EVENT_DESCRIPTION_HTML = '''
<p>
Note: all Jam Sessions will be held online on our <a
href="https://discord.gg/5UBnR3P">Discord instance</a> until
further notice. We hope that will be able to meet all of you face
to face later! The event will be hosted on the PyATL Discord
channel; the link will be displayed after you RSVP.
<p>
<p>
What is a Jam Session?
https://github.com/pyatl/jam-sessions/wiki
</p>
<p>
Jam Session meetings are intended to be hands-on, collaborative
experiences with your fellow PyATL members. Programmers of all
experience levels are welcome! We will be meeting from 7pm to 10pm
to work together on a coding challenge or on any project you
want. We recommend bringing a laptop with you, optionally with
Python installed (but the coding challenge does not require it).
</p>
<p>
Each month we provide a coding puzzle or exercise that is designed
to be accessible to novices to Python, but that also provide
additional challenges for more advanced users. We use the online
platform at https://www.cyber-dojo.org/ to run the challenge and
share our solutions with each other.
</p>
<p>
The Jam session is also a safe space to work and ask for help on
any Python project. Bring your own hobby projects, a work project,
your new startup idea - whatever you like. Or come along and see
others are working on, and sit down with them! The organizers will
be there a bit early to help introduce people, organize, and make
sure everyone is comfortable.
</p>'''
MEETUP_EVENT_NAME = 'Python Atlanta Meetup'
MEETUP_EVENT_SHORT_DESCRIPTION = 'Python Atlanta Meetup Monthly Event'
MEETUP_EVENT_DESCRIPTION_HTML = '''
<p>All in-person events are on hold until further notice.</p>
<p>We will be holding events live on our Twitch channel.</p>
<p>
<iframe
src="https://player.twitch.tv/?channel=pyatl&parent=pyatl.dev"
height="360"
width="640"
frameborder="0"
scrolling="no"
allowfullscreen="true">
</iframe>
</p>
<p>The meeting starts at 7:30PM in the North Avenue room. Some of us get together before the meeting between 6:00 and 6:30 to have dinner and talk.</p>
<p>Excellent Talks Scheduled: To be announced</p>
'''
def create_event(self, name, short_description, description, start, end, location):
event, created = Event.objects.get_or_create(
name=name,
slug=slugify(name),
short_description=short_description,
description=description,
start=start,
end=end,
published=True,
location=location)
return event
def handle(self, *args, **options):
# get the current year
# starting date - now when the script is run
# end date - last day of the year
# rules = every tuesday and friday. Every 1st and 2nd thursday
# for each of the rules, we need to create a specific event.
now = timezone.now()
start_date = datetime(now.year, 1, 1, 19, 0, 0).replace(tzinfo=pytz.utc)
end_date = datetime(now.year, 12, 31, 19, 0, 0).replace(tzinfo=pytz.utc)
# location for twitch related events
twitch_location = Location.objects.create(
name=self.TWITCH_LOCATION_NAME,
slug=slugify(self.TWITCH_LOCATION_NAME),
description=self.TWITCH_LOCATION_DESCRIPTION_HTML,)
discord_location = Location.objects.create(
name=self.DISCORD_LOCATION_NAME,
slug=slugify(self.DISCORD_LOCATION_NAME),
description=self.DISCORD_LOCATION_DESCRIPTION_HTML,)
tuesdays = rrule.rrule(
rrule.WEEKLY,
byweekday=relativedelta.TU,
dtstart=start_date)
fridays = rrule.rrule(
rrule.WEEKLY,
byweekday=relativedelta.FR,
dtstart=start_date)
thursdays = rrule.rrule(
rrule.WEEKLY,
byweekday=relativedelta.TH,
dtstart=start_date)
tuesdays_days = tuesdays.between(
start_date,
end_date,
inc=True)
fridays_days = fridays.between(
start_date,
end_date,
inc=True)
thursdays_days = thursdays.between(
start_date,
end_date,
inc=True)
# first get the first and second thursdays of the month
first_and_second_thursdays = []
for thursday in thursdays_days:
if thursday.day <= 14:
first_and_second_thursdays.append(thursday)
# then get the first thirsdays
first_thursdays = []
for thursday in first_and_second_thursdays:
if thursday.day <= 7:
first_thursdays.append(thursday)
# remove the first thursday so we only endup
# with second thursdays on this list
first_and_second_thursdays.remove(thursday)
# reassign for redability
second_thursdays = first_and_second_thursdays
# streaming on tuesdays
for day in tuesdays_days:
self.create_event(
self.TWITCH_EVENT_NAME_CODING_LIVE_STREAM,
self.TWITCH_EVENT_SHORT_DESCRIPTION,
self.TWITCH_EVENT_DESCRIPTION_HTML,
day,
day + timedelta(hours=2),
twitch_location)
# streaming on fridays
for day in fridays_days:
self.create_event(
self.TWITCH_EVENT_NAME_CODING_LIVE_STREAM,
self.TWITCH_EVENT_SHORT_DESCRIPTION,
self.TWITCH_EVENT_DESCRIPTION_HTML,
day,
day + timedelta(hours=2),
twitch_location)
# jam session
for day in first_thursdays:
self.create_event(
self.JAM_SESSION_EVENT_NAME,
self.JAM_SESSION_EVENT_SHORT_DESCRIPTION,
self.JAM_SESSION_EVENT_DESCRIPTION_HTML,
day,
day + timedelta(hours=2),
discord_location)
# meetup
for day in second_thursdays:
self.create_event(
self.MEETUP_EVENT_NAME,
self.MEETUP_EVENT_SHORT_DESCRIPTION,
self.MEETUP_EVENT_DESCRIPTION_HTML,
day,
day + timedelta(hours=2),
discord_location)
self.stdout.write(self.style.SUCCESS('Events Created'))
|
"""
Return the min/max of the input list of arrays
"""
from typing import List
import numpy
def minmax(*xs):
# type: (List) -> List
"""Return the min/max of the input list of arrays
Example:
minmax(X) returns [min(X), max(X)]
minmax(X, Y) returns [[min(X), max(X)], [min(Y), max(Y)]]
Args:
xs (list): variable number of inputs
Returns:
a list of min/max values
"""
def tail(xs): return xs[1:]
def aux(xs, accum):
if len(xs) == 0:
if len(accum) == 1:
return accum[0]
else:
return accum
else:
return aux(
tail(xs),
accum + [[numpy.amin(xs[0]), numpy.amax(xs[0])]])
return aux(xs, [])
|
"""Push Repository Layer"""
import multiprocessing
import base64
from PiCN.Layers.ICNLayer.ContentStore import BaseContentStore
from PiCN.Packets import Packet, Interest, Nack, NackReason, Content
from PiCN.Processes import LayerProcess
from PiCN.Layers.NFNLayer.Parser import DefaultNFNParser
from PiCN.Layers.NFNLayer.Parser.AST import *
def is_publish_expression(ast) -> bool:
if ast.type == AST_FuncCall:
return ast._element == "/remote/publish" and len(ast.params) == 2 and isinstance(ast.params[0], AST_Name) \
and isinstance(ast.params[1], AST_String)
return False
class PushRepositoryLayer(LayerProcess):
"""Push Repository Layer"""
def __init__(self, cs: BaseContentStore = None, log_level=255):
super().__init__(logger_name="PushRepoLyr", log_level=log_level)
self.cs = cs
def data_from_higher(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
pass # this is already the highest layer
def data_from_lower(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
if len(data) != 2:
self.logger.warning("PushRepo Layer expects to receive [face_id, Interest] from lower layer")
return
if type(data[0]) != int:
self.logger.warning("PushRepo Layer expects to receive [face_id, Interest] from lower layer")
return
if not isinstance(data[1], Packet):
self.logger.warning("PushRepo Layer expects to receive [face_id, Interest] from lower layer. Drop.")
return
face_id = data[0]
interest = data[1]
self.handle_interest_from_lower(face_id, interest, to_lower)
def handle_interest_from_lower(self, face_id: int, interest: Interest, to_lower: multiprocessing.Queue):
self.logger.info("Incoming interest: " + interest.name.to_string())
# incoming interest is nfn expression
if interest.name.string_components[-1] == "NFN":
try:
parser = DefaultNFNParser()
nfn_str, prepended_name = parser.network_name_to_nfn_str(interest.name)
ast = parser.parse(nfn_str)
# assert that valid publish expression
if is_publish_expression(ast):
# store to database
data_name = ast.params[0]._element
payload = ast.params[1]._element
try:
payload = base64.b64decode(payload[7:])
self.logger.info("Payload is base64 encoded. Decoded.")
except:
self.logger.info("Invalid publish expression. The payload could not be decoded.")
nack = Nack(interest.name, reason=NackReason.COMP_NOT_PARSED, interest=interest)
self.queue_to_lower.put([face_id, nack])
self.cs.add_content_object(Content(data_name, payload))
self.logger.info("Add to database: " + data_name)
# reply confirmation
confirmation = Content(interest.name, "ok")
to_lower.put([face_id, confirmation])
else:
self.logger.info("Invalid publish expression. Wrong format.")
nack = Nack(interest.name, reason=NackReason.COMP_NOT_PARSED, interest=interest)
self.queue_to_lower.put([face_id, nack])
except:
self.logger.info("Invalid publish expression.")
nack = Nack(interest.name, reason=NackReason.COMP_NOT_PARSED, interest=interest)
self.queue_to_lower.put([face_id, nack])
# incoming interest is data request
else:
db_entry = self.cs.find_content_object(interest.name)
if db_entry is not None:
self.logger.info("Found in database")
to_lower.put([face_id, db_entry.content])
return
else:
self.logger.info("Not found in database")
nack = Nack(interest.name, NackReason.NO_CONTENT, interest)
to_lower.put([face_id, nack])
return
|
'''App.py - front end using dash bootstrap components
July 2, 2020 Peter Koppelman'''
import sqlite3
import dash as dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import sys
from apps import layout, callbacks, reference
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], \
suppress_callback_exceptions=True)
# the style arguments for the sidebar. We use position:fixed and a fixed width
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "18rem",
"padding": "2rem 1rem",
"background-color": "#f8f9fa",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
TEXT_ALIGN_CENTER = {
'text-align': 'center'
}
HEADER_STYLE = {
'justify-content': 'center',
'display': 'block',
'text-align': 'center',
'align-items': 'center',
'margin-left': '18rem',
'background': 'rgb(0,191,255,.6)'
}
sidebar = html.Div([
html.Div([
html.H4("Robo Investing Tool", className="display-5"),
html.Hr(),
dbc.Nav([
dbc.NavLink("Welcome", href="/page-1", id="page-1-link"),
dbc.NavLink("Login", href="/page-2", id="page-2-link"),
dbc.NavLink("Open an account", href="/page-3", id="page-3-link"),
dbc.NavLink("Create a sample portfolio", href="/page-4", id="page-4-link"),
dbc.NavLink("Contact Us", href="/page-5", id="page-5-link"),
dbc.NavLink("FAQ", href="/page-6", id="page-6-link"),
dbc.NavLink('-------------------------------'),
dbc.NavLink("Employee Transaction", href="/page-7", id="page-7-link"),
],
vertical=True,
pills=True),
], style=SIDEBAR_STYLE,
),
html.Div([
html.H2('The Shore-Koppelman Group'),
html.H3('Robo-Investing Tool'),
], style = HEADER_STYLE
),
]
)
content = html.Div(id="page-content", style=CONTENT_STYLE)
column_names = []
df_portfolio = pd.DataFrame(columns = column_names)
callbacks.sample_portfolio(app, df_portfolio)
# record = pd.DataFrame(columns = column_names)
callbacks.login(app)
callbacks.open_account(app)
callbacks.contact_layout(app)
try:
connect = sqlite3.connect(reference.database)
cursor = connect.cursor()
except sqlite3.OperationalError:
print('There was a problem opening up the roboinvest database')
sys.exit('There was a problem opening up the roboinvest database')
### get info for dropdowns used in journal_data_entry
# Customer name and customer id
cursor.execute( \
'SELECT \
CASE \
when Middle_initial is Null \
then First_name || " " || Last_name \
else First_name || " " || Middle_initial || " " || Last_name \
END name, \
Cust_Id \
FROM \
Customer_Master')
cust = cursor.fetchall()
cust_info = []
for i in enumerate(cust):
my_dict = dict()
my_dict['label'] = i[1][0]
my_dict['value'] = i[1][1]
cust_info.append(my_dict)
## Employee last name and id number
cursor.execute( \
'SELECT \
Last_name, \
ee_id \
FROM \
Employee_Master')
ee = cursor.fetchall()
ee_info = []
for i in enumerate(ee):
my_dict = dict()
my_dict['label'] = i[1][0]
my_dict['value'] = i[1][1]
ee_info.append(my_dict)
# get valid account numbers for each customer
cursor.execute( \
'SELECT \
customer_master.cust_id, \
account_master.account_number \
FROM \
customer_master \
Inner Join account_master On account_master.cust_id = Customer_Master.cust_id')
acct = cursor.fetchall()
acct_info = []
for i in enumerate(acct):
my_dict = dict()
my_dict['label'] = i[1][0]
my_dict['value'] = i[1][1]
acct_info.append(my_dict)
# Close the cursor. Send date to journal_data_entry callback and screen.
cursor.close()
callbacks.journal_data_entry(app, cust_info, ee_info, acct_info)
content = html.Div(id="page-content", style=CONTENT_STYLE)
app.layout = html.Div([
dcc.Location(id="url", refresh = False), sidebar, content
])
# this callback uses the current pathname to set the active state of the
# corresponding nav link to true, allowing users to tell see page they are on
@app.callback(
[Output(f"page-{i}-link", "active") for i in range(1, 8)],
[Input("url", "pathname")])
def toggle_active_links(pathname):
'''Treat page 1 as the homepage / index'''
if pathname == "/":
return True, False, False, False, False, False, False
return [pathname == f"/page-{i}" for i in range(1, 8)]
@app.callback(Output("page-content", "children"),
[Input("url", "pathname")])
def render_page_content(pathname):
'''call layouts from sidebar'''
if pathname in ['/', '/page-1']:
return layout.welcome()
elif pathname == "/page-2":
return layout.login()
elif pathname == "/page-3":
return layout.open_account()
elif pathname == "/page-4":
return layout.sample_portfolio_layout(app, df_portfolio)
# return layout.sample_portfolio_layout()
elif pathname == "/page-5":
return layout.contact_layout()
elif pathname == '/page-6':
return layout.faq()
elif pathname == '/page-7':
# return layout.journal_data_entry(app, cust_info, ee_info, acct_info)
return layout.journal_data_entry(cust_info, ee_info, acct_info)
else:
return dbc.Jumbotron([
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognized...")]
)
if __name__ == "__main__":
app.run_server(debug=True)
|
import pygame
import requests
import openpyxl
book = openpyxl.load_workbook('Canteen-Copy.xlsx', data_only=True) # get data from excel file
from mergesort import *
from html.parser import HTMLParser
#get data from each sheet
worksheet_A = book.get_sheet_by_name('Canteen_A')
worksheet_B = book.get_sheet_by_name('Canteen_B')
worksheet_1 = book.get_sheet_by_name('Canteen_1')
worksheet_2 = book.get_sheet_by_name('Canteen_2')
worksheet_4 = book.get_sheet_by_name('Canteen_4')
worksheet_9 = book.get_sheet_by_name('Canteen_9')
worksheet_11 = book.get_sheet_by_name('Canteen_11')
worksheet_13 = book.get_sheet_by_name('Canteen_13')
worksheet_14 = book.get_sheet_by_name('Canteen_14')
worksheet_16 = book.get_sheet_by_name('Canteen_16')
worksheet_NIE = book.get_sheet_by_name('Canteen_NIE')
worksheet_NorthHill = book.get_sheet_by_name('Canteen_NorthHill')
class Canteen():
def __init__(self,name, location_on_map, real_location, seat_capacity, time_open, phone):
self.name = name
self.stalls = [] # list of Stall objects
self.location_on_map = location_on_map
self.real_location = real_location
self.seat_capacity = seat_capacity
self.time_open = time_open
self.phone = phone
type_stall_name = ['Others', 'Western Cuisine', 'Mixed Rice', 'Indian Cuisine','Beverages & Desserts',\
'Chinese Cuisine', 'Vegetarian Food', 'Mala']
class Stall():
def __init__(self, name, type_stall):
self.name = name
self.menu = []
self.type_stall = type_stall
Canteen_A = Canteen("Canteen_A", (1.347016,103.680244),"North Spine Plaza, 76 Nanyang Drive", 1838, "7am-3pm", 64658588)
Canteen_B = Canteen("Canteen_B", (1.342447,103.682390)," South Spine, 50 Nanyang Avenue", 1050,"7am-3pm", 67900355)
Canteen_1 = Canteen("Canteen_1", (1.346571,103.686025),"Hall 1, 21 NanyangCircle", 310, "7am-9pm", 63343033)
Canteen_2 = Canteen("Canteen_2", (1.348347,103.685456), "Hall 2, 35 Students Walk", 446, "7am-9pm", 63343033)
Canteen_4 = Canteen("Canteen_4", (1.344263,103.685321), "10 Nanyang Drive NTU Hall 4", 303, "7am-9pm", 68998600)
Canteen_9 = Canteen("Canteen_9", (1.352256,103.685257), "Hall 9, 50 Nanyang Avenue", 293,"7am-9pm",96923456)
Canteen_11 = Canteen("Canteen_11", (1.354904,103.686476), "Hall 11, 21 Nanyang Avenue", 210,"7am-9pm", 97866726)
Canteen_13 = Canteen("Canteen_13", (1.351716,103.681076), "Hall 13, 32 Nanyang Cresent", 210,"7am-9pm", 98510908)
Canteen_14 = Canteen("Canteen_14", (1.352718,103.682166), "Hall 14, 34 Nanyang Cresent", 270, "7am-9pm", 81127239)
Canteen_16 = Canteen("Canteen_16", (1.350296,103.680923), "Hall 16, 50 Nanyang Walk", 304, "7am-9pm",94505893)
Canteen_NIE = Canteen("Canteen_NIE", (1.348749,103.677622),"1 Nanyang Walk", 405, "7am-9pm", 67903888)
Canteen_NorthHill = Canteen("Canteen_NorthHill", (1.354422,103.688176), "NorthHill, 60 Nanyang Cresent", 440,"7am-9pm", 85080232)
list_worksheet = [worksheet_1, worksheet_11, worksheet_13, worksheet_14, worksheet_16, worksheet_2, worksheet_4,\
worksheet_9, worksheet_A, worksheet_B, worksheet_NIE, worksheet_NorthHill]
list_Canteen = [Canteen_1, Canteen_11, Canteen_13, Canteen_14, Canteen_16, Canteen_2, Canteen_4,\
Canteen_9, Canteen_A, Canteen_B, Canteen_NIE, Canteen_NorthHill]
for k in range(len(list_Canteen)):
row = row_stall = 3
try:
while True:
name_stall = list_worksheet[k].cell(row = row_stall, column = 3).value
type_stall = list_worksheet[k].cell(row = row_stall, column = 2).value
if name_stall != None:
list_Canteen[k].stalls.append(Stall(name_stall, type_stall))
row_stall += 1
if row_stall == list_worksheet[k].max_row:
break
except IndexError:
pass
for i in list_Canteen[k].stalls:
try:
menu = []
while list_worksheet[k].cell(row = row, column = 4).value != None:
menu.append((list_worksheet[k].cell(row =row, column = 4).value, list_worksheet[k].cell(row = row,column = 5).value, list_worksheet[k].cell(row = row, column = 8).value))
row += 1
i.menu = menu
row += 1
if list_worksheet[k].max_row == row:
break
except IndexError:
pass
def reload_data(canteen_name, stall_name):
global list_worksheet, list_Canteen
book = openpyxl.load_workbook('Canteen-Copy.xlsx', data_only=True)
for i in range(len(list_worksheet)):
if list_worksheet[i].title == canteen_name:
list_worksheet[i] = book.get_sheet_by_name(str(canteen_name))
row = 3
for k in list_Canteen[i].stalls:
if k.name == stall_name:
try:
menu = []
while list_worksheet[i].cell(row = row, column = 3).value != stall_name:
row += 1
while list_worksheet[i].cell(row = row, column = 4).value != None:
menu.append((list_worksheet[i].cell(row =row, column = 4).value, list_worksheet[i].cell(row = row,column = 5).value, list_worksheet[i].cell(row = row, column = 8).value))
row += 1
k.menu = menu
row += 1
except Exception:
pass
break
break
'''for i in range(len(list_Canteen)): #use bubblesort to sort the canteen by name
swap = 0
for k in range(len(list_Canteen)-1-i):
if list_Canteen[k].name > list_Canteen[k+1].name:
list_Canteen[k] ,list_Canteen[k+1] = list_Canteen[k+1], list_Canteen[k]
list_worksheet[k], list_worksheet[k+1] = list_worksheet[k+1], list_worksheet[k]
swap =1
if swap == 0:
break'''
for i in list_Canteen: #use bubblesort to sort the foodstalls by name
for k in range(len(i.stalls)):
swap = 0
for j in range(len(i.stalls) - 1 - k):
if i.stalls[j].name > i.stalls[j+1].name:
i.stalls[j] ,i.stalls[j+1] = i.stalls[j+1], i.stalls[j]
swap = 1
if swap == 0:
break
for i in list_Canteen: # use bubble sort to sort the dish in foodstall by name
for k in i.stalls:
for j in range(len(k.menu)):
swap = 0
for t in range(len(k.menu) - 1 - j):
if k.menu[t][0] > k.menu[t+1][0]:
k.menu[t] ,k.menu[t+1] = k.menu[t+1], k.menu[t]
swap = 1
if swap == 0:
break
def search_food_by_name(food_name, price_sort, rank_sort): #find the food by searching name
global list_Canteen
information = []
for i in list_Canteen:
for k in i.stalls:
for j in k.menu:
if j[0] == food_name:
information.append((i,k.name, j[1], j[2]))
# i.name is name of canteen, k.name is name of stall, j[1] is price of food, j[2] is rank of food
if price_sort == 1:
for i in range(len(information) - 1):
swap = 0
for k in range(len(information)-1-i):
if information[k][2] > information[k+1][2]:
information[k], information[k+1] = information[k+1], information[k]
swap =1
elif rank_sort == 1:
for i in range(len(information) - 1):
swap = 0
for k in range(len(information)-1-i):
if information[k][3] > information[k+1][3]:
information[k], information[k+1] = information[k+1], information[k]
swap =1
return information
#this part is to search food by some initial charaters input from user
# this list contains all the name of food
all_food_name = []
for i in list_Canteen:
for k in i.stalls:
for j in k.menu:
if j[0] not in all_food_name:
all_food_name.append(j[0])
all_food_name = mergesort(all_food_name)
def search_foodname_by_characters(characters):
global all_food_name
available_food = []
charaters = characters.lower()
t = k =0
crashed = False
while len(available_food) < 11 and not crashed:
for i in range(t, len(all_food_name)):
k = i
if charaters == all_food_name[i].lower()[:len(characters)] and all_food_name[i] not in available_food:
t = i + 1
available_food.append(all_food_name[i])
break
if k == len(all_food_name) - 1:
crashed = True
t = 0
crashed = False
while len(available_food) < 11 and not crashed:
for i in range(t, len(all_food_name)):
k = i
if charaters in all_food_name[i].lower() and all_food_name[i] not in available_food:
t = i + 1
available_food.append(all_food_name[i])
break
if k == len(all_food_name) - 1:
crashed = True
return available_food[:10]
#this function is used to search for food based on provided price, stall name and canteen name from user
def search_food_by_price(price_of_food, name_of_stall_to_search, number_of_star):
# price_of_food is a tuple containing the maximum and minimun value of food price
global list_Canteen
list_of_food_in_stall = name_of_stall_to_search.menu # this list contains the menu of stall
food_satisfy_price = []
if number_of_star >0:
for i in list_of_food_in_stall:
if price_of_food[0] < float(i[1]) < price_of_food[1] and number_of_star == i[2]:
food_satisfy_price.append(i)
else:
for i in list_of_food_in_stall:
try:
if price_of_food[0] < float(i[1]) < price_of_food[1]:
food_satisfy_price.append(i)
except Exception:
continue
return food_satisfy_price
def search_stalls_by_name(stall_name):
global list_Canteen
canteen_have_this_stall = []
for i in list_Canteen:
for k in i.stalls:
if k.name == stall_name:
canteen_have_this_stall.append(i.name)
break
return canteen_have_this_stall
def search_by_type_stall(type_stall_input):
global location_type_stall, list_Canteen
for i in location_type_stall:
if i.name == type_stall_input:
return i.location
def check_character(a,b):
a = a.lower()
letter_check = [chr(97 + i) for i in range(26)]+ [str(i) for i in range(9)]
letter_a = [ i for i in a if i in letter_check]
letter_b = [ i for i in b.lower() if i in letter_check]
if letter_a == letter_b:
return True
return False
def search_for_stall_name(stall_name, min_value, max_value, rank):
result =[]
for i in list_Canteen:
for k in i.stalls:
if k.type_stall == stall_name:
for j in k.menu:
try:
if min_value<= j[1] <= max_value and j[2] == rank:
result.append((i, k.name,j[0],j[1] ))
except Exception:
continue
return result
type_of_food_image = []
for i in range(1,9):
try:
image = pygame.image.load("icon %s.png"%(i))
image = pygame.transform.smoothscale(image, (198,114))
type_of_food_image.append(image)
except Exception:
image = pygame.image.load("food.jpg")
image = pygame.transform.scale(image, (198,114))
type_of_food_image.append(image)
# this class is used to extract data from html instructions of google map
class MyHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.my_data = []
def handle_data(self, data):
self.my_data.append(data)
def return_data(self):
return self.my_data
|
import sys
def get_screen_count_gnome():
import gi
gi.require_version('Gdk','3.0')
from gi.repository import Gdk
d = Gdk.Display()
return d.get_n_monitors()
def get_screen_count_x():
try:
# Try python-xlib
from Xlib import X, display
except ImportError:
# Try xrandr app
bash = 'xrandr -q | grep Screen | wc -l'
return int(os.popen(bash).read())
d = display.Display()
return d.screen_count()
def get_screen_count_win32():
import win32api
return len(win32api.EnumDisplayMonitors())
|
import bcrypt
from .string_encoding import encode_string
def hash_password(password):
"""Hash a password.
Parameters
----------
password : str or bytes
Human readable password.
Returns
-------
hashed_password : bytes
The hashed password.
"""
return bcrypt.hashpw(encode_string(password), bcrypt.gensalt())
def check_password(password, hashed_password):
"""Check if a password is the same than the hashed password.
Parameters
----------
password : str or bytes
Human readable password.
hashed_password : str or bytes
The hashed password.
Returns
-------
is_same_password : bool
Return True if the two passwords are identical.
"""
return bcrypt.checkpw(encode_string(password),
encode_string(hashed_password))
|
import pytest
import tensorflow as tf
from tefla.core.layers import fully_connected
@pytest.fixture(autouse=True)
def clean_graph():
tf.reset_default_graph()
def test_trainable_true():
x = tf.placeholder(tf.float32, [1, 10, 10, 3])
x = fully_connected(x, 15, is_training=True, reuse=False, name='fc1', trainable=True)
trainable_vars = [v.name for v in tf.trainable_variables()]
assert 'fc1/W:0' in trainable_vars
assert 'fc1/b:0' in trainable_vars
def test_trainable_false():
x = tf.placeholder(tf.float32, [1, 10, 10, 3])
x = fully_connected(x, 15, is_training=True, reuse=False, name='fc1', trainable=False)
trainable_vars = [v.name for v in tf.trainable_variables()]
assert 'fc1/W:0' not in trainable_vars
assert 'fc1/b:0' not in trainable_vars
if __name__ == '__main__':
pytest.main([__file__])
|
from time import sleep
from random import randint
from math import hypot
from os import system
winsoundimport = False
try:
from winsound import beep
winsoundimport = True
except:
pass
finally:
pass
def ConfigureCmd():
system("echo off")
system("color 0a")
system("cls")
ConfigureCmd()
#Agents&Target setup
AgentSmith = [randint(1, 100), randint(1, 100)]
Agent1 = [randint(1, 100), randint(1, 100)]
Agent2 = [randint(1, 100), randint(1, 100)]
Agent3 = [randint(1, 100), randint(1, 100)]
Agent4 = [randint(1, 100), randint(1, 100)]
Target = [randint(1, 100), randint(1, 100)]
def transmit(subject1: list, subject2: list) -> None:
sleep(hypot(abs(subject1[0]-subject2[0]), abs(subject1[1]-subject2[1]))/10)
def Setup():
print(f"Intel is searching the Target{Target}")
sleep(randint(2, 5))
print()
print(f"Intel found the Target{Target}")
sleep(2)
print()
print(f"Intel is now transmitting the location of the Target{Target} to AgentSmith{AgentSmith}")
sleep(randint(1, 5))
print()
print(f"AgentSmith{AgentSmith} received the location of the Target{Target}")
sleep(2)
print()
print(f"AgentSmith{AgentSmith} is now Transmitting the location of the Target{Target} to Agent1{Agent1}")
transmit(AgentSmith, Agent1)
print()
print(f"AgentSmith{AgentSmith} is now Transmitting the location of the Target{Target} to Agent2{Agent2}")
transmit(AgentSmith, Agent2)
print()
print(f"AgentSmith{AgentSmith} is now Transmitting the location of the Target{Target} to Agent3{Agent3}")
transmit(AgentSmith, Agent3)
print()
print(f"AgentSmith{AgentSmith} is now Transmitting the location of the Target{Target} to Agent4{Agent4}")
transmit(AgentSmith, Agent4)
print()
print("All Agents Are Good To Go")
def Hunt():
print()
print("Agents started the Hunt")
print()
while True:
print(f"AgentSmith: {AgentSmith}")
print(f"Agent1 : {Agent1}")
print(f"Agent2 : {Agent2}")
print(f"Agent3 : {Agent3}")
print(f"Agent4 : {Agent4}")
print(f"Target : {Target}")
print()
if AgentSmith == Target:
print(f"AgentSmith({AgentSmith[0]}, {AgentSmith[1]}) got the Target({Target[0]}, {Target[1]})")
break
elif Agent1 == Target:
print(f"Agent1({Agent1[0]}, {Agent1[1]}) got the Target({Target[0]}, {Target[1]})")
break
elif Agent2 == Target:
print(f"Agent2({Agent2[0]}, {Agent2[1]}) got the Target({Target[0]}, {Target[1]})")
break
elif Agent3 == Target:
print(f"Agent3({Agent3[0]}, {Agent3[1]}) got the Target({Target[0]}, {Target[1]})")
break
elif Agent4 == Target:
print(f"Agent4({Agent4[0]}, {Agent4[1]}) got the Target({Target[0]}, {Target[1]})")
break
else:
for i in range(0, 2):
if AgentSmith[0] < Target[0]:
AgentSmith[0] += 1
if AgentSmith[0] > Target[0]:
AgentSmith[0] -= 1
if AgentSmith[1] < Target[1]:
AgentSmith[1] += 1
if AgentSmith[1] > Target[1]:
AgentSmith[1] -= 1
if Agent1[0] < Target[0]:
Agent1[0] += 1
if Agent1[0] > Target[0]:
Agent1[0] -= 1
if Agent1[1] < Target[1]:
Agent1[1] += 1
if Agent1[1] > Target[1]:
Agent1[1] -= 1
if Agent2[0] < Target[0]:
Agent2[0] += 1
if Agent2[0] > Target[0]:
Agent2[0] -= 1
if Agent2[1] < Target[1]:
Agent2[1] += 1
if Agent2[1] > Target[1]:
Agent2[1] -= 1
if Agent3[0] < Target[0]:
Agent3[0] += 1
if Agent3[0] > Target[0]:
Agent3[0] -= 1
if Agent3[1] < Target[1]:
Agent3[1] += 1
if Agent3[1] > Target[1]:
Agent3[1] -= 1
if Agent4[0] < Target[0]:
Agent4[0] += 1
if Agent4[0] > Target[0]:
Agent4[0] -= 1
if Agent4[1] < Target[1]:
Agent4[1] += 1
if Agent4[1] > Target[1]:
Agent4[1] -= 1
if randint(0, 1000) == randint(0, 1000):
print(f"Target Jumped")
print()
sleep(1)
if winsoundimport:
Beep(1000, 1000)
Setup()
Hunt()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.