content
stringlengths 5
1.05M
|
|---|
from moai.data.datasets.common import load_color_image
from moai.utils.arguments import ensure_string_list
import torch
import glob
import os
import typing
import logging
__all__ = ["StructuredImages"]
log = logging.getLogger(__name__)
class StructuredImages(torch.utils.data.Dataset):
def __init__(self,
root: str='',
**kwargs: typing.Mapping[str, typing.Mapping[str, typing.Any]],
):
self.key_to_list = {}
self.key_to_xform = {}
for k, m in kwargs.items():
glob_list = ensure_string_list(m['glob'])
files = []
for g in glob_list:
files += glob.glob(os.path.join(root, g))
self.key_to_list[k] = list(map(lambda f: os.path.join(root, f), files))
self.key_to_xform[k] = m['output_space']
def __len__(self) -> int:
return len(next(iter(self.key_to_list.values())))
def __getitem__(self, index: int) -> typing.Dict[str, torch.Tensor]:
ret = { }
for k, l in self.key_to_list.items():
ret[k] = load_color_image(l[index], self.key_to_xform[k])
return ret
|
class change_constraint:
""" Generates a partial SQL command to change a foreign key in a table """
def __init__(self, constraint):
self.constraint = constraint
def apply_to_table(self, table):
""" Changes the constraint in the table
:param table: The table to change the constraint on
:param type: mygrations.formats.mysql.definitions.table
"""
table.change_constraint(self.constraint)
def __str__(self):
return 'DROP FOREIGN KEY `%s`, ADD %s' % (self.constraint.name, str(self.constraint))
|
import matplotlib.pyplot as plt
from hyperparameters import *
import numpy as np
from tqdm import tqdm
from imutils import paths
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
class RandomColorAffine(layers.Layer):
def __init__(self, brightness=0, jitter=0, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.jitter = jitter
def call(self, images, training=True):
if training:
batch_size = tf.shape(images)[0]
brightness_scales = 1 + tf.random.uniform(
(batch_size, 1, 1, 1), minval=-self.brightness, maxval=self.brightness
)
jitter_matrices = tf.random.uniform(
(batch_size, 1, 3, 3), minval=-self.jitter, maxval=self.jitter
)
color_transforms = (
tf.eye(3, batch_shape=[batch_size, 1]) * brightness_scales
)
images = tf.clip_by_value(tf.matmul(images, color_transforms), 0, 1)
return images
def get_augmenter(min_area, brightness, jitter):
zoom_factor = 1.0 - tf.sqrt(min_area)
return keras.Sequential(
[
keras.Input(shape=(para_SSL['image_size'], para_SSL['image_size'], para_SSL['image_channels'])),
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
preprocessing.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
RandomColorAffine(brightness, jitter),
]
)
class ContrastiveModel(keras.Model):
def __init__(self):
super().__init__()
self.temperature = para_SSL['temperature']
self.contrastive_augmenter = get_augmenter(**contrastive_augmentation)
self.classification_augmenter = get_augmenter(**classification_augmentation)
self.encoder = tf.keras.applications.ResNet50(include_top=True,weights=None, classes=1024, classifier_activation=None)
self.encoder.trainable = True
# Non-linear MLP as projection head
self.projection_head = keras.Sequential(
[
keras.Input(shape=(1024,)),
layers.Dense(para_SSL['width'], activation="relu"),
layers.Dense(para_SSL['width']),
],
name="projection_head",
)
def call(self, x, training=True):
if training:
augmented_images_1 = self.contrastive_augmenter(x)
augmented_images_2 = self.contrastive_augmenter(x)
features_1 = self.encoder(augmented_images_1)
features_2 = self.encoder(augmented_images_2)
project_1 = self.projection_head(features_1)
project_2 = self.projection_head(features_2)
return project_1, project_2
else:
return self.encoder(x)
|
#This file handles the statistics portion of the game
titles = []
stats_array = []
def save_stats():
f = open("statistics.csv", "w")
write_str = ""
for x in range(len(titles)):
if x == len(titles) - 1:
write_str = write_str + str(titles[x]) + "\n"
break
write_str = write_str + str(titles[x]) + ","
f.write(write_str)
for player in stats_array:
write_str = ""
for x in range(len(player)):
if x == len(player) - 1:
write_str = write_str + str(player[x]) + "\n"
break
write_str = write_str + str(player[x]) + ","
f.write(write_str)
f.close()
def load_stats_file():
global titles, stats_array
f = open("statistics.csv", "r")
titles = f.readline().strip().split(',')
for line in f:
player_stats = line.strip().split(',')
stats_array.append(player_stats)
f.close()
f = open("stats_old.csv", "w")
write_str = ""
for x in range(len(titles)):
if x == len(titles) - 1:
write_str = write_str + str(titles[x]) + "\n"
break
write_str = write_str + str(titles[x]) + ","
f.write(write_str)
for player in stats_array:
write_str = ""
for x in range(len(player)):
if x == len(player) - 1:
write_str = write_str + str(player[x]) + "\n"
break
write_str = write_str + str(player[x]) + ","
f.write(write_str)
f.close()
print("Loaded the following stats: ")
print(titles)
def register_statistics(discord_handle):
for player in stats_array:
if player[0] == discord_handle:
return True
blank_stats = []
for _ in titles:
blank_stats.append(0)
blank_stats[0] = discord_handle
stats_array.append(blank_stats)
save_stats()
|
from django.db import models
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=50, unique=True, null=False, blank=False)
price = models.IntegerField(null=False, blank=False)
def __str__(self):
return self.name
|
import argparse
import base64
import json
from re import A
import numpy as np
from typing import Dict, Any
import requests
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
def read_text(text_file: str = "./text.txt") -> str:
with open(text_file, "r") as f:
text = f.read()
return text
def request_grpc(
text: str,
model_spec_name: str = "text",
signature_name: str = "serving_default",
address: str = "localhost",
port: int = 8500,
timeout_second: int = 5,
) -> str:
serving_address = f"{address}:{port}"
channel = grpc.insecure_channel(serving_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_spec_name
request.model_spec.signature_name = signature_name
request.inputs["text"].CopyFrom(tf.make_tensor_proto([text]))
response = stub.Predict(request, timeout_second)
prediction = response.outputs["output_0"].string_val[0].decode("utf-8")
return prediction
def request_rest(
text: str,
model_spec_name: str = "text",
signature_name: str = "serving_default",
address: str = "localhost",
port: int = 8501,
timeout_second: int = 5,
):
serving_address = f"http://{address}:{port}/v1/models/{model_spec_name}:predict"
headers = {"Content-Type": "application/json"}
request_dict = {"inputs": {"text": [text]}}
response = requests.post(
serving_address,
json.dumps(request_dict),
headers=headers,
)
return dict(response.json())["outputs"][0]
def main():
parser = argparse.ArgumentParser(description="request inception v3")
parser.add_argument(
"-f", "--format", default="GRPC", type=str, help="GRPC or REST request"
)
parser.add_argument(
"-i",
"--text_file",
default="./text.txt",
type=str,
help="input text file path",
)
parser.add_argument(
"-t", "--target", default="localhost", type=str, help="target address"
)
parser.add_argument(
"-s", "--timeout_second", default=5, type=int, help="timeout in second"
)
parser.add_argument(
"-m",
"--model_spec_name",
default="text",
type=str,
help="model spec name",
)
parser.add_argument(
"-n",
"--signature_name",
default="serving_default",
type=str,
help="model signature name",
)
args = parser.parse_args()
text = read_text(text_file=args.text_file)
if args.format.upper() == "GRPC":
prediction = request_grpc(
text=text,
model_spec_name=args.model_spec_name,
signature_name=args.signature_name,
address=args.target,
port=8500,
timeout_second=args.timeout_second,
)
elif args.format.upper() == "REST":
prediction = request_rest(
text=text,
model_spec_name=args.model_spec_name,
signature_name=args.signature_name,
address=args.target,
port=8501,
timeout_second=args.timeout_second,
)
else:
raise ValueError("Undefined format; should be GRPC or REST")
print(prediction)
if __name__ == "__main__":
main()
|
"""
zamień element pierwszy i ostatni w tablicy [1,2,3, 'wiadomo']
"""
t = [1,2,3, 'wiadomo']
buff = t[0]
t[0] = t[-1]
t[-1] = buff
print(t)
|
#!/usr/bin/python3
"""
Test the main module.
"""
import pytest
import ising.__main__
__ARGS = ["--length", "8"]
def test_cback():
"""
Test the C backend.
"""
assert all(
map(
lambda x: x is not None,
ising.__main__.main(pass_args=__ARGS + ["--backend", "c"], test=True),
)
)
def test_mcback():
"""
Test the Monte-Carlo backend.
"""
assert all(
map(
lambda x: x is not None,
ising.__main__.main(
pass_args=__ARGS
+ ["--backend", "monte-carlo", "--depth", "10", "--mc-points", "100"],
test=True,
),
)
)
def test_fullback():
"""
Test the Python backend.
"""
assert all(
map(
lambda x: x is not None,
ising.__main__.main(pass_args=__ARGS + ["--backend", "python"], test=True),
)
)
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from ngraph.frontends.neon.aeon_shim import AeonDataLoader
from ngraph.util.persist import get_data_cache_or_nothing
'''
Contains the helper functions for video_c3d.py
'''
def common_config(manifest_file, manifest_root, batch_size):
'''
Common configuration file for aeon loader
manifest_file(str): Name of the manifest file
manifest_root(str): Path for the manifest file
batch_size(int): Batch size used for training
'''
cache_root = get_data_cache_or_nothing('ucf-cache/')
video_config = {'type': "video",
'max_frame_count': 16,
'frame': {'height': 112, 'width': 112}}
label_config = {'type': "label",
'binary': True}
augmentation_config = {'type': 'image',
'scale': [0.875, 0.875]}
configs = {'manifest_filename': manifest_file,
'manifest_root': manifest_root,
'batch_size': batch_size,
'block_size': 5000,
'augmentation': [augmentation_config],
'cache_directory': cache_root,
'etl': [video_config, label_config]}
return configs
def make_validation_loader(manifest_file, manifest_root, batch_size, subset_pct=100):
'''
Validation data configuration for aeon loader. Returns the object to be used for getting
validatin data.
manifest_file(str): Name of the manifest file
manifest_root(str): Path for the manifest file
batch_size(int): Batch size used for training
subset_pct(int): Percent data to be used for validation
'''
aeon_config = common_config(manifest_file, manifest_root, batch_size)
aeon_config['subset_fraction'] = float(subset_pct / 100.0)
dl = AeonDataLoader(aeon_config)
return dl
def make_train_loader(manifest_file, manifest_root, batch_size, subset_pct=100, random_seed=0):
'''
Training data configuration for aeon loader. Returns the object to be used for getting
training data.
manifest_file(str): Name of the manifest file
manifest_root(str): Path for the manifest file
batch_size(int): Batch size used for training
subset_pct(int): Percent data to be used for training
random_seed(int): Random number generator seed
'''
aeon_config = common_config(manifest_file, manifest_root, batch_size)
aeon_config['subset_fraction'] = float(subset_pct / 100.0)
aeon_config['shuffle_manifest'] = True
aeon_config['shuffle_enable'] = True
aeon_config['random_seed'] = random_seed
aeon_config['augmentation'][0]['center'] = False
aeon_config['augmentation'][0]['flip_enable'] = True
dl = AeonDataLoader(aeon_config)
return dl
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ## ###############################################
# Author: Santillan Garcia Josue
# Codigo modificado de blink.py
# Future imports (Python 2.7 compatibility)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import Raspberry Pi's GPIO control library
import RPi.GPIO as GPIO
# Initializes virtual board (comment out for hardware deploy)
import virtualboard
# importamos sleep
from time import sleep
# Disable warnings
# GPIO.setwarnings(False)
# Set up Rpi.GPIO library to use physical pin numbers
GPIO.setmode(GPIO.BOARD)
# Set up pin no. 32 as output and default it to low
GPIO.setup(32, GPIO.OUT, initial=GPIO.LOW)
# configuracion de los pines a utilizar
GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(24, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(22, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(18, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(16, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(10, GPIO.OUT, initial=GPIO.LOW)
# Inicializacion del array de los pines
pines = [32, 26, 24, 22, 18, 16, 12, 10]
i = 0 # Indice para seleccionar un pin
direccionIzquierda = True # Condicion para evaluar la direccion de los leds
while True:
sleep(0.5) # Esperamos 500 ms
GPIO.output(pines[i], GPIO.HIGH) # Asignamos un 1 logico en el pin segun el valor del indice
sleep(0.5) # Esperamos otros 500 ms
GPIO.output(pines[i], GPIO.LOW) # Asignamos un 0 logico en el pin segun el valor del indice
# Condicion para evaluar si el indice esta en los limites del arreglo de pines
if i >= 7:
direccionIzquierda = False
elif i <= 0:
direccionIzquierda = True
# Condicion para evaluar si el conteo del indice debe ser positivo o negativo
if direccionIzquierda:
i += 1
else:
i -= 1
|
from . import tic_tac_toe
from . import connect_4
game_types = { str(i) : game_type for (i, game_type) in enumerate((
tic_tac_toe,
connect_4,
))}
STATE_PROPOSED = 0
STATE_ACTIVE = 1
STATE_COMPLETE = 2
STATE_FORFEIT = 3
STATE_CANCELLED = 4
STATES = [
STATE_PROPOSED,
STATE_ACTIVE,
STATE_COMPLETE,
STATE_FORFEIT,
STATE_CANCELLED,
]
|
import matplotlib
matplotlib.use('Agg')
import os
import sys
import numpy as np
import json
import matplotlib.pyplot as plt
import caffe
from caffe import layers as L
from caffe import params as P
from activity_data_provider_layer import ActivityDataProvider
from build_val_model import act_proto, exp_proto
import config
def learning_params(param_list):
param_dicts = []
for pl in param_list:
param_dict = {}
param_dict['lr_mult'] = pl[0]
if len(pl) > 1:
param_dict['decay_mult'] = pl[0]
param_dicts.append(param_dict)
return param_dicts
fixed_weights = learning_params([[0, 0], [0, 0]])
fixed_weights_lstm = learning_params([[0, 0], [0, 0], [0, 0]])
def pj_x(mode, batchsize, exp_T, exp_vocab_size):
n = caffe.NetSpec()
mode_str = json.dumps({'mode':mode, 'batchsize':batchsize})
n.img_feature, n.label, n.exp, n.exp_out, n.exp_cont_1, n.exp_cont_2 = \
L.Python(module='activity_data_provider_layer',
layer='ActivityDataProviderLayer',
param_str=mode_str, ntop=6)
# Attention
n.att_conv1 = L.Convolution(n.img_feature, kernel_size=1, stride=1, num_output=512, pad=0, weight_filler=dict(type='xavier'))
n.att_conv1_relu = L.ReLU(n.att_conv1)
n.att_conv2 = L.Convolution(n.att_conv1_relu, kernel_size=1, stride=1, num_output=1, pad=0, weight_filler=dict(type='xavier'))
n.att_reshaped = L.Reshape(n.att_conv2,reshape_param=dict(shape=dict(dim=[-1,1,14*14])))
n.att_softmax = L.Softmax(n.att_reshaped, axis=2)
n.att_map = L.Reshape(n.att_softmax,reshape_param=dict(shape=dict(dim=[-1,1,14,14])))
dummy = L.DummyData(shape=dict(dim=[batchsize, 1]), data_filler=dict(type='constant', value=1), ntop=1)
n.att_feature = L.SoftAttention(n.img_feature, n.att_map, dummy)
n.att_feature_resh = L.Reshape(n.att_feature, reshape_param=dict(shape=dict(dim=[-1,2048])))
# Prediction
n.prediction = L.InnerProduct(n.att_feature_resh, num_output=config.NUM_OUTPUT_UNITS, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.prediction, n.label)
n.accuracy = L.Accuracy(n.prediction, n.label)
# Embed Activity GT answer during training
n.exp_emb_ans = L.Embed(n.label, input_dim=config.NUM_OUTPUT_UNITS, num_output=300, \
weight_filler=dict(type='uniform', min=-0.08, max=0.08))
n.exp_emb_ans_tanh = L.TanH(n.exp_emb_ans)
n.exp_emb_ans2 = L.InnerProduct(n.exp_emb_ans_tanh, num_output=2048, weight_filler=dict(type='xavier'))
# merge activity answer and visual feature
n.exp_emb_resh = L.Reshape(n.exp_emb_ans2, reshape_param=dict(shape=dict(dim=[-1,2048,1,1])))
n.exp_emb_tiled_1 = L.Tile(n.exp_emb_resh, axis=2, tiles=14)
n.exp_emb_tiled = L.Tile(n.exp_emb_tiled_1, axis=3, tiles=14)
n.img_embed = L.Convolution(n.img_feature, kernel_size=1, stride=1, num_output=2048, pad=0, weight_filler=dict(type='xavier'))
n.exp_eltwise = L.Eltwise(n.img_embed, n.exp_emb_tiled, eltwise_param={'operation': P.Eltwise.PROD})
n.exp_eltwise_sqrt = L.SignedSqrt(n.exp_eltwise)
n.exp_eltwise_l2 = L.L2Normalize(n.exp_eltwise_sqrt)
n.exp_eltwise_drop = L.Dropout(n.exp_eltwise_l2, dropout_param={'dropout_ratio': 0.3})
# Attention for Explanation
n.exp_att_conv1 = L.Convolution(n.exp_eltwise_drop, kernel_size=1,
stride=1, num_output=512, pad=0, weight_filler=dict(type='xavier'))
n.exp_att_conv1_relu = L.ReLU(n.exp_att_conv1)
n.exp_att_conv2 = L.Convolution(n.exp_att_conv1_relu, kernel_size=1,
stride=1, num_output=1, pad=0, weight_filler=dict(type='xavier'))
n.exp_att_reshaped = L.Reshape(n.exp_att_conv2,reshape_param=dict(shape=dict(dim=[-1,1,14*14])))
n.exp_att_softmax = L.Softmax(n.exp_att_reshaped, axis=2)
n.exp_att_map = L.Reshape(n.exp_att_softmax,reshape_param=dict(shape=dict(dim=[-1,1,14,14])))
exp_dummy = L.DummyData(shape=dict(dim=[batchsize, 1]), data_filler=dict(type='constant', value=1), ntop=1)
n.exp_att_feature_prev = L.SoftAttention(n.img_feature, n.exp_att_map, exp_dummy)
n.exp_att_feature_resh = L.Reshape(n.exp_att_feature_prev, reshape_param=dict(shape=dict(dim=[-1, 2048])))
n.exp_att_feature_embed = L.InnerProduct(n.exp_att_feature_resh, num_output=2048, weight_filler=dict(type='xavier'))
n.exp_att_feature = L.Eltwise(n.exp_emb_ans2, n.exp_att_feature_embed, eltwise_param={'operation': P.Eltwise.PROD})
# Embed explanation
n.exp_embed_ba = L.Embed(n.exp, input_dim=exp_vocab_size, num_output=300, \
weight_filler=dict(type='uniform', min=-0.08, max=0.08))
n.exp_embed = L.TanH(n.exp_embed_ba)
# LSTM1 for Explanation
n.exp_lstm1 = L.LSTM(\
n.exp_embed, n.exp_cont_1,\
recurrent_param=dict(\
num_output=2048,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
n.exp_lstm1_dropped = L.Dropout(n.exp_lstm1,dropout_param={'dropout_ratio':0.3})
# merge with LSTM1 for explanation
n.exp_att_resh = L.Reshape(n.exp_att_feature, reshape_param=dict(shape=dict(dim=[1, -1, 2048])))
n.exp_att_tiled = L.Tile(n.exp_att_resh, axis=0, tiles=exp_T)
n.exp_eltwise_all = L.Eltwise(n.exp_lstm1_dropped, n.exp_att_tiled, eltwise_param={'operation': P.Eltwise.PROD})
n.exp_eltwise_all_l2 = L.L2Normalize(n.exp_eltwise_all)
n.exp_eltwise_all_drop = L.Dropout(n.exp_eltwise_all_l2, dropout_param={'dropout_ratio': 0.3})
# LSTM2 for Explanation
n.exp_lstm2 = L.LSTM(\
n.exp_eltwise_all_drop, n.exp_cont_2,\
recurrent_param=dict(\
num_output=1024,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
n.exp_lstm2_dropped = L.Dropout(n.exp_lstm2,dropout_param={'dropout_ratio':0.3})
n.exp_prediction = L.InnerProduct(n.exp_lstm2_dropped, num_output=exp_vocab_size, weight_filler=dict(type='xavier'), axis=2)
n.exp_loss = L.SoftmaxWithLoss(n.exp_prediction, n.exp_out,
loss_param=dict(ignore_label=-1),
softmax_param=dict(axis=2))
n.exp_accuracy = L.Accuracy(n.exp_prediction, n.exp_out, axis=2, ignore_label=-1)
return n.to_proto()
def make_answer_vocab(adic, vocab_size):
"""
Returns a dictionary that maps words to indices.
"""
adict = {}
id = 0
for qid in adic.keys():
answer = adic[qid]
if answer in adict:
continue
else:
adict[answer] = id
id +=1
return adict
def make_exp_vocab(exp_dic):
"""
Returns a dictionary that maps words to indices.
"""
exp_vdict = {'<EOS>': 0}
exp_vdict[''] = 1
exp_id = 2
for qid in exp_dic.keys():
exp_strings = exp_dic[qid]
for exp_str in exp_strings:
exp_list = ActivityDataProvider.seq_to_list(exp_str)
for w in exp_list:
if w not in exp_vdict:
exp_vdict[w] = exp_id
exp_id += 1
return exp_vdict
def make_vocab_files():
"""
Produce the answer and explanation vocabulary files.
"""
print('making answer vocab...', config.ANSWER_VOCAB_SPACE)
_, adic, _ = ActivityDataProvider.load_data(config.ANSWER_VOCAB_SPACE)
answer_vocab = make_answer_vocab(adic, config.NUM_OUTPUT_UNITS)
print('making explanation vocab...', config.EXP_VOCAB_SPACE)
_, _, expdic = ActivityDataProvider.load_data(config.EXP_VOCAB_SPACE)
explanation_vocab = make_exp_vocab(expdic)
return answer_vocab, explanation_vocab
def reverse(dict):
rev_dict = {}
for k, v in dict.items():
rev_dict[v] = k
return rev_dict
def to_str(type, idxs, cont, r_adict, r_exp_vdict):
if type == 'a':
return r_adict[idxs]
elif type == 'exp':
words = []
for idx in idxs:
if idx == 0:
break
words.append(r_exp_vdict[idx])
return ' '.join(words)
def batch_to_str(type, batch_idx, batch_cont, r_adict, r_exp_vdict):
converted = []
for idxs, cont in zip(batch_idx, batch_cont):
converted.append(to_str(type, idxs, cont, r_adict, r_exp_vdict))
return converted
def main():
if not os.path.exists('./model'):
os.makedirs('./model')
answer_vocab, explanation_vocab = {}, {}
if os.path.exists('./model/adict.json') and os.path.exists('./model/exp_vdict.json'):
print('restoring vocab')
with open('./model/adict.json','r') as f:
answer_vocab = json.load(f)
with open('./model/exp_vdict.json','r') as f:
exp_vocab = json.load(f)
else:
answer_vocab, exp_vocab = make_vocab_files()
with open('./model/adict.json','w') as f:
json.dump(answer_vocab, f)
with open('./model/exp_vdict.json','w') as f:
json.dump(exp_vocab, f)
r_adict = reverse(answer_vocab)
r_exp_vdict = reverse(exp_vocab)
print('answer vocab size:', len(answer_vocab))
print('exp vocab size:', len(exp_vocab))
with open('./model/proto_train.prototxt', 'w') as f:
f.write(str(pj_x(config.TRAIN_DATA_SPLITS, config.BATCH_SIZE, \
config.MAX_WORDS_IN_EXP, len(exp_vocab))))
with open('./model/act_proto_test_gt.prototxt', 'w') as f:
f.write(str(act_proto('val', config.VAL_BATCH_SIZE, len(exp_vocab), use_gt=True)))
with open('./model/act_proto_test_pred.prototxt', 'w') as f:
f.write(str(act_proto('val', config.VAL_BATCH_SIZE, len(exp_vocab), use_gt=False)))
with open('./model/exp_proto_test.prototxt', 'w') as f:
f.write(str(exp_proto('val', config.VAL_BATCH_SIZE, 1, len(exp_vocab))))
caffe.set_device(config.GPU_ID)
caffe.set_mode_gpu()
solver = caffe.get_solver('./pj_x_solver.prototxt')
train_loss = np.zeros(config.MAX_ITERATIONS)
train_acc = np.zeros(config.MAX_ITERATIONS)
train_loss_exp = np.zeros(config.MAX_ITERATIONS)
train_acc_exp = np.zeros(config.MAX_ITERATIONS)
results = []
for it in range(config.MAX_ITERATIONS):
solver.step(1)
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
train_acc[it] = solver.net.blobs['accuracy'].data
train_loss_exp[it] = solver.net.blobs['exp_loss'].data
train_acc_exp[it] = solver.net.blobs['exp_accuracy'].data
if it != 0 and it % config.PRINT_INTERVAL == 0:
print('Iteration:', it)
c_mean_loss = train_loss[it-config.PRINT_INTERVAL:it].mean()
c_mean_acc = train_acc[it-config.PRINT_INTERVAL:it].mean()
c_mean_loss_exp = train_loss_exp[it-config.PRINT_INTERVAL:it].mean()
c_mean_acc_exp = train_acc_exp[it-config.PRINT_INTERVAL:it].mean()
print('Train loss for classification:', c_mean_loss)
print('Train accuracy for classification:', c_mean_acc)
print('Train loss for exp:', c_mean_loss_exp)
print('Train accuracy for exp:', c_mean_acc_exp)
predicted_ans = solver.net.blobs['prediction'].data
predicted_ans = predicted_ans.argmax(axis=1)
answers = solver.net.blobs['label'].data
generated_exp = solver.net.blobs['exp_prediction'].data
generated_exp = generated_exp.argmax(axis=2).transpose()
target_exp = solver.net.blobs['exp_out'].data.transpose()
exp_out_cont = solver.net.blobs['exp_cont_2'].data.transpose()
predict_str = batch_to_str('a', predicted_ans, np.ones_like(predicted_ans),
r_adict, r_exp_vdict)
answers_str = batch_to_str('a', answers, np.ones_like(answers),
r_adict, r_exp_vdict)
generated_str = batch_to_str('exp', generated_exp, exp_out_cont,
r_adict, r_exp_vdict)
target_str = batch_to_str('exp', target_exp, exp_out_cont,
r_adict, r_exp_vdict)
count = 0
for pred, ans, exp, target in zip(predict_str, answers_str, generated_str, target_str):
if count == 10:
break
print('Pred:', pred)
print('A:', ans)
print('Because...')
print('\tgenerated:', exp)
print('\ttarget:', target)
count += 1
if __name__ == '__main__':
main()
|
from unittest import TestCase
from unittest.mock import patch
from dd_import.environment import Environment
class TestEnvironment(TestCase):
def test_check_environment_reimport_findings_empty(self):
with self.assertRaises(Exception) as cm:
environment = Environment()
environment.check_environment_reimport_findings()
self.assertEqual('DD_URL is missing / DD_API_KEY is missing / DD_PRODUCT_TYPE_NAME is missing / DD_PRODUCT_NAME is missing / DD_ENGAGEMENT_NAME is missing / DD_TEST_NAME is missing / DD_TEST_TYPE_NAME is missing', str(cm.exception))
self.assertTrue(environment.active)
self.assertTrue(environment.verified)
self.assertFalse(environment.push_to_jira)
self.assertTrue(environment.close_old_findings)
@patch.dict('os.environ', {'DD_URL': 'url',
'DD_API_KEY': 'api_key',
'DD_PRODUCT_TYPE_NAME': 'product_type',
'DD_PRODUCT_NAME': 'product',
'DD_ENGAGEMENT_NAME': 'engagement',
'DD_TEST_NAME': 'test',
'DD_TEST_TYPE_NAME': 'test_type',
'DD_FILE_NAME': 'file_name',
'DD_ACTIVE': 'False',
'DD_VERIFIED': 'False',
'DD_MINIMUM_SEVERITY': 'minimum_severity',
'DD_PUSH_TO_JIRA': 'True',
'DD_CLOSE_OLD_FINDINGS': 'False',
'DD_VERSION': 'version',
'DD_ENDPOINT_ID': 'endpoint_id',
'DD_SERVICE': 'service',
'DD_BUILD_ID': 'build_id',
'DD_COMMIT_HASH': 'commit_hash',
'DD_BRANCH_TAG': 'branch_tag'})
def test_check_environment_reimport_findings_complete(self):
environment = Environment()
environment.check_environment_reimport_findings()
self.assertEqual(environment.url, 'url')
self.assertEqual(environment.api_key, 'api_key')
self.assertEqual(environment.product_type_name, 'product_type')
self.assertEqual(environment.product_name, 'product')
self.assertEqual(environment.engagement_name, 'engagement')
self.assertEqual(environment.test_name, 'test')
self.assertEqual(environment.test_type_name, 'test_type')
self.assertEqual(environment.file_name, 'file_name')
self.assertEqual(environment.url, 'url')
self.assertFalse(environment.active)
self.assertFalse(environment.verified)
self.assertEqual(environment.minimum_severity, 'minimum_severity')
self.assertTrue(environment.push_to_jira)
self.assertFalse(environment.close_old_findings)
self.assertEqual(environment.version, 'version')
self.assertEqual(environment.endpoint_id, 'endpoint_id')
self.assertEqual(environment.service, 'service')
self.assertEqual(environment.build_id, 'build_id')
self.assertEqual(environment.commit_hash, 'commit_hash')
self.assertEqual(environment.branch_tag, 'branch_tag')
def test_check_environment_languages_empty(self):
with self.assertRaises(Exception) as cm:
environment = Environment()
environment.check_environment_languages()
self.assertEqual('DD_URL is missing / DD_API_KEY is missing / DD_PRODUCT_TYPE_NAME is missing / DD_PRODUCT_NAME is missing / DD_FILE_NAME is missing', str(cm.exception))
self.assertTrue(environment.active)
self.assertTrue(environment.verified)
self.assertFalse(environment.push_to_jira)
self.assertTrue(environment.close_old_findings)
@patch.dict('os.environ', {'DD_URL': 'url',
'DD_API_KEY': 'api_key',
'DD_PRODUCT_TYPE_NAME': 'product_type',
'DD_PRODUCT_NAME': 'product',
'DD_FILE_NAME': 'file_name'})
def test_check_environment_languages_complete(self):
environment = Environment()
environment.check_environment_languages()
self.assertEqual(environment.url, 'url')
self.assertEqual(environment.api_key, 'api_key')
self.assertEqual(environment.product_type_name, 'product_type')
self.assertEqual(environment.product_name, 'product')
self.assertEqual(environment.file_name, 'file_name')
|
"""
An interface to the python-swiftclient api through Django.
"""
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
import json
from difflib import get_close_matches
dataset = json.load(open("data.json"))
word = input("Enter the word: ")
def translate(word):
word = word.lower() # .lower() convert string to lowercase.
if word in dataset:
print("Meaning - ")
meanings = dataset[word]
for meaning in meanings:
print("-> ",meaning)
else:
replacement = get_close_matches(word,dataset.keys(),n=1,cutoff=0.75)
# "get_close_matches" is a function of difflib library that provides a close match available to a string with a defined ]
# cutoff value(match-propability)
if not replacement:
print("Sorry! The word doesn't exist in dictonary.")
else:
response = input("Did you mean %s? If yes enter Y else N\n"% replacement[0])
response.lower()
if response is "y":
translate(replacement[0])
else:
print("Sorry! The word doesn't exist in dictonary.")
translate(word)
|
carro = int(input('Quantos anos tem o seu carro?'))
if carro <=5:
print('Seu carro é novo!')
else:
print('Seu carro é velho!')
print('--FIM--')
#TEMOS COMO DIMINUIR O CÓDIGO ASSIM!!!
#carro = int(input('Quantos anos tem o seu carro?'))
#print('Carro novo' if carro<=5 else 'Carro velho')
#print('--FIM--')
|
#!/usr/bin/env python
import os
import sys
import json
import re
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR, exit
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Get and merge output files of the task
Usage:
%(script)s [options] TaskID
Examples:
# Download all output files in task 165 to the current directory
%(script)s 165
# Download all output files in task 165 and merge all the files into larger ones, where the merged file size will not exceed 1GB
%(script)s -g 1G 165
# Download all dst files in task 237 to directory "/some/dir"
%(script)s -f '*.dst' -D /some/dir 237
# List all root file names in task 46. Do NOT download
%(script)s -l -f '*.root' 46
# Download all root files in task 329 to directory "output" and merge to files smaller than 800MB, reserve the root files after merge
%(script)s -g 800M -k -f '*.root' -D output 329
""" % {'script': Script.scriptName} )
Script.registerSwitch("m:", "method=", "Downloading method: local_rsync, dfc, cp, daemon_rsync")
Script.registerSwitch("l", "list", "List all output files. Do NOT download")
Script.registerSwitch("D:", "dir=", "Output directory")
Script.registerSwitch("f:", "filter=", "Name pattern for filtering output file")
Script.registerSwitch("r", "reload", "Redownload every file each time")
Script.registerSwitch("u", "checksum", "Use checksum for file validation. Could be slow for some special situations")
Script.registerSwitch("g:", "merge=", "Set max size for merged destination file (e.g., 500000, 2G, 700M)")
Script.registerSwitch("k", "keep", "Keep downloaded output files after merge. Use with -g option")
Script.parseCommandLine( ignoreErrors = False )
options = Script.getUnprocessedSwitches()
args = Script.getPositionalArgs()
from IHEPDIRAC.WorkloadManagementSystem.Client.TaskClient import TaskClient
taskClient = TaskClient()
from IHEPDIRAC.Badger.private.output.GetOutputHandler import GetOutputHandler
downloadCounter = {'total': 0, 'ok': 0, 'error': 0, 'skip': 0, 'notexist': 0}
downloadSpeed = {'size': 0, 'span': 0}
mergeCounter = {'total': 0, 'ok': 0, 'error': 0}
mergeSpeed = {'size': 0, 'span': 0}
removeCounter = {'ok': 0, 'error': 0}
def sizeConvert(sizeString):
unitDict = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4, 'P': 1024**5}
size = -1
try:
unit = sizeString[-1]
if unit in unitDict:
size = int(sizeString[:-1]) * unitDict[unit]
else:
size = int(sizeString)
except:
pass
return size
def getTaskOutput(taskID):
lfnList = []
result = taskClient.getTaskJobs(taskID)
if not result['OK']:
gLogger.error(result['Message'])
return lfnList
jobIDs = result['Value']
outFields = ['Info']
result = taskClient.getJobs(jobIDs, outFields)
if not result['OK']:
gLogger.error(result['Message'])
return lfnList
jobsInfo = [json.loads(info[0]) for info in result['Value']]
for jobInfo in jobsInfo:
if 'OutputFileName' in jobInfo:
lfnList += jobInfo['OutputFileName']
return lfnList
def filterOutput(lfnList, pattern):
newList = []
# convert wildcard to regular expression
ptemp = pattern.replace('.', '\.')
ptemp = ptemp.replace('?', '.')
ptemp = ptemp.replace('*', '.*')
ptemp = '^' + ptemp + '$'
for lfn in lfnList:
m = re.search(ptemp, lfn)
if m:
newList.append(lfn)
return newList
def listOutput(lfnList):
for lfn in lfnList:
print lfn
def downloadCallback(lfn, result):
status = result['status']
size = result['size'] if 'size' in result else 0
span = result['span'] if 'span' in result else 0
size /= (1024*1024.)
speed = 0
if span != 0 and status == 'ok':
speed = size / span
downloadSpeed['size'] += size
downloadSpeed['span'] += span
if status not in downloadCounter:
downloadCounter[status] = 0
downloadCounter[status] += 1
downloadCounter['total'] += 1
if status != 'skip':
gLogger.always('[Downloaded] %-8s %9.2f MB %7.2f MB/s %s' % (status, size, speed, os.path.basename(lfn)))
else:
gLogger.debug('[Downloaded] %-8s %9.2f MB %7.2f MB/s %s' % (status, size, speed, os.path.basename(lfn)))
def mergeCallback(fileList, mergePath, mergeSize, mergeSpan, ret):
status = 'ok' if ret else 'error'
size = mergeSize / (1024*1024.)
if status not in mergeCounter:
mergeCounter[status] = 0
mergeCounter[status] += 1
mergeCounter['total'] += 1
mergeSpeed['size'] += size
mergeSpeed['span'] += mergeSpan
gLogger.always('[Merged] %-8s %9.2f MB %6d files %s' % (status, size, len(fileList), mergePath))
def removeCallback(localPath):
removeCounter['ok'] += 1
def main():
method = ['dfc', 'http', 'daemon_rsync', 'cp', 'local_rsync']
listFile = False
localValidation = True
outputDir = '.'
pattern = None
mergeMaxSize = 0
removeDownload = False
useChecksum = False
for option in options:
(switch, val) = option
if switch == 'm' or switch == 'method':
method = val
if switch == 'l' or switch == 'list':
listFile = True
if switch == 'D' or switch == 'dir':
outputDir = val
if switch == 'f' or switch == 'filter':
pattern = val
if switch == 'r' or switch == 'reload':
localValidation = False
if switch == 'u' or switch == 'checksum':
useChecksum = True
if switch == 'g' or switch == 'merge':
mergeMaxSize = sizeConvert(val)
if mergeMaxSize < 0:
gLogger.error('Invalid merge size format: %s' % val)
return 1
if switch == 'k' or switch == 'keep':
removeDownload = False
if len(args) != 1:
gLogger.error('There must be one and only one task ID specified')
return 1
try:
taskID = int(args[0])
except:
gLogger.error('Invalid task ID: %s' % args[0])
return 1
lfnList = getTaskOutput(taskID)
if pattern is not None:
lfnList = filterOutput(lfnList, pattern)
lfnList = sorted(lfnList)
if listFile:
listOutput(lfnList)
return 0
# lfnList is ready now
taskFileNumber = len(lfnList)
gLogger.always('- Files in the request :', taskFileNumber)
if taskFileNumber == 0:
return 0
try:
handler = GetOutputHandler(lfnList, method, localValidation, useChecksum)
except Exception, e:
gLogger.error(' Could not initialize get output handler from:', method)
return 1
realMethod = handler.getMethod()
gLogger.info('- Using download method:', realMethod)
gLogger.always('- Checking available files...')
handler.checkRemote()
remoteFileNumber = handler.getAvailNumber()
gLogger.always('- Available files :', remoteFileNumber)
gLogger.always('')
if remoteFileNumber == 0:
return 0
# Download and merge
downloadDir = os.path.join(outputDir, 'output_task_%s/download' % taskID)
if not os.path.exists(downloadDir):
os.makedirs(downloadDir)
if mergeMaxSize == 0:
handler.download(downloadDir, downloadCallback)
else:
if remoteFileNumber < taskFileNumber:
s = raw_input('- Task not finished, do you realy want to merge the output files? (y/N) ')
if s != 'y' and s != 'Y':
return 0
gLogger.always('')
exts = set()
for lfn in lfnList:
exts.add(os.path.splitext(lfn)[1])
exts = list(exts)
if len(exts) > 1:
gLogger.error(' Can not merge! Different file types in output:', ', '.join(exts))
gLogger.error(' Use "-f" option to filter the output files')
return 0
ext = exts[0]
mergeDir = os.path.join(outputDir, 'output_task_%s/merge' % taskID)
if not os.path.exists(mergeDir):
os.makedirs(mergeDir)
else:
for f in os.listdir(mergeDir):
if f.endswith(ext):
os.remove(os.path.join(mergeDir, f))
handler.downloadAndMerge(downloadDir, mergeDir, 'task_%s_merge' % taskID, ext, mergeMaxSize, removeDownload,
downloadCallback, mergeCallback, removeCallback)
# Result
gLogger.always('')
gLogger.always('Total: %s' % taskFileNumber)
gLogger.always('Available: %s' % remoteFileNumber)
gLogger.always('')
gLogger.always('Download: %s' % downloadCounter['total'])
gLogger.always(' - OK: %s' % downloadCounter['ok'])
gLogger.always(' - Skipped: %s' % downloadCounter['skip'])
gLogger.always(' - Error: %s' % downloadCounter['error'])
totalDownloadSpeed = downloadSpeed['size']/downloadSpeed['span'] if downloadSpeed['span'] != 0 else 0
gLogger.always('Average Speed: %.2f MB/s' % totalDownloadSpeed)
gLogger.always('Files downloaded in:', downloadDir)
if mergeMaxSize > 0:
gLogger.always('')
gLogger.always('Merge: %s' % mergeCounter['total'])
gLogger.always(' - OK: %s' % mergeCounter['ok'])
gLogger.always(' - Error: %s' % mergeCounter['error'])
totalMergeSpeed = mergeSpeed['size']/mergeSpeed['span'] if mergeSpeed['span'] != 0 else 0
gLogger.always('Merge Speed: %.2f MB/s' % totalMergeSpeed)
gLogger.always('Files merged in:', mergeDir)
gLogger.debug('')
gLogger.debug('Download counter:', downloadCounter)
gLogger.debug('Merge counter:', mergeCounter)
if removeCounter['ok'] > 0:
gLogger.always('')
gLogger.always('%s downloaded files removed from:'%removeCounter['ok'], downloadDir)
return 0
if __name__ == '__main__':
if main():
Script.showHelp()
|
from pynetest.lib.expectation import Expectation
from pynetest.matchers import between
class ToBeBetweenExpectation(Expectation):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
super().__init__("to_be_between", between(lower, upper), self.message_format)
def message_format(self, subject, params):
lower, upper = params
if subject < lower:
return "Expected <{subject}> to be between <{0}> and <{1}> " \
"but it was less than or equal to <{0}>"
else:
return "Expected <{subject}> to be between <{0}> and <{1}> " \
"but it was greater than or equal to <{1}>"
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class CryptoKeyIAMMember(pulumi.CustomResource):
"""
Allows creation and management of a single member for a single binding within
the IAM policy for an existing Google Cloud KMS crypto key.
~> **Note:** This resource _must not_ be used in conjunction with
`google_kms_crypto_key_iam_policy` or they will fight over what your policy
should be. Similarly, roles controlled by `google_kms_crypto_key_iam_binding`
should not be assigned to using `google_kms_crypto_key_iam_member`.
"""
def __init__(__self__, __name__, __opts__=None, crypto_key_id=None, member=None, role=None):
"""Create a CryptoKeyIAMMember resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not crypto_key_id:
raise TypeError('Missing required property crypto_key_id')
elif not isinstance(crypto_key_id, basestring):
raise TypeError('Expected property crypto_key_id to be a basestring')
__self__.crypto_key_id = crypto_key_id
"""
The key ring ID, in the form
`{project_id}/{location_name}/{key_ring_name}/{crypto_key_name}` or
`{location_name}/{key_ring_name}/{crypto_key_name}`. In the second form,
the provider's project setting will be used as a fallback.
"""
__props__['cryptoKeyId'] = crypto_key_id
if not member:
raise TypeError('Missing required property member')
elif not isinstance(member, basestring):
raise TypeError('Expected property member to be a basestring')
__self__.member = member
"""
The user that the role should apply to.
"""
__props__['member'] = member
if not role:
raise TypeError('Missing required property role')
elif not isinstance(role, basestring):
raise TypeError('Expected property role to be a basestring')
__self__.role = role
"""
The role that should be applied. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
__props__['role'] = role
__self__.etag = pulumi.runtime.UNKNOWN
"""
(Computed) The etag of the project's IAM policy.
"""
super(CryptoKeyIAMMember, __self__).__init__(
'gcp:kms/cryptoKeyIAMMember:CryptoKeyIAMMember',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'cryptoKeyId' in outs:
self.crypto_key_id = outs['cryptoKeyId']
if 'etag' in outs:
self.etag = outs['etag']
if 'member' in outs:
self.member = outs['member']
if 'role' in outs:
self.role = outs['role']
|
from app.data_layer.datastore import Datastore
class SongFacade:
def __init__(self):
self._datastore = Datastore()
def create_song(self, song_name, song_artist, song_genre):
"""
:param str song_name:
:param str song_artist:
:param str song_genre:
:rtype: dict
"""
return self._datastore.create_song(song_name, song_artist, song_genre)
def delete_song(self, song_id):
"""
:param int song_id:
"""
self._datastore.delete_song(song_id)
def get_song(self, song_id):
"""
:param int song_id:
:rtype: dict
"""
return self._datastore.get_song(song_id)
def list_songs(self):
"""
:rtype: dict
"""
return self._datastore.list_songs()
def replace_song(self, song_id, song_name, song_artist, song_genre):
"""
:param int song_id:
:param str song_name:
:param str song_artist:
:param str song_genre:
:rtype: dict
"""
return self._datastore.replace_song(song_id, song_name, song_artist, song_genre)
|
from datahub.dataset.core.pagination import DatasetCursorPagination
class TeamsDatasetViewCursorPagination(DatasetCursorPagination):
"""
Cursor Pagination for TeamsDatasetView
"""
ordering = ('id',)
|
# Fixed options
_algorithms = ('Adaptive', 'Non-adaptive')
_weighFuncs = ('Gaussian', 'Linear', 'JSD')
_directions = ('Forward', 'Backward')
_boostMethods = ('Sum similarity', 'Counts')
_yesNo = ('Yes', 'No')
def validatestr(value):
'''Validate that given value is a non-empty string. Used to validate
tracker parameters.'''
try:
s = str(value)
return None if (len(s) == 0) else s
except:
raise ValueError
def _isValidOption(value, options):
'''Validate that given value is a string from a predetermined set.
Used to validate tracker parameters.'''
if value in options:
return value
else:
raise ValueError
def validAlgorithm(value):
'''Validate algorithm -- in lower case'''
return _isValidOption(value, _algorithms).lower()
def validWeighting(value):
'''Validate weighting function'''
return _isValidOption(value, _weighFuncs)
def validDirection(value):
'''Validate direction is Forward (false means backward)'''
return _isValidOption(value, _directions) == 'Forward'
def sumSimilarity(value):
'''Validate boost methods is Sum distances (false means Counts)'''
return _isValidOption(value, _boostMethods) == 'Sum similarity'
def validCleaning(value):
return _isValidOption(value, _yesNo) == 'Yes'
|
import time
import math
maps = {
'costs': {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
},
'autocomplete_costs': {
')': 1,
']': 2,
'}': 3,
'>': 4,
},
'opening': {
"(": ")",
"[": "]",
"{": "}",
"<": ">",
},
'closing': {
")": "(",
"]": "[",
"}": "{",
">": "<",
}
}
def findCorruptedInLine(line):
stack = []
for idx, i in enumerate(line):
if i in maps['closing']:
expecting = maps["closing"][i]
curr = stack.pop()
if expecting != curr:
print(f"Invalid token \033[38;2;209;62;29m'{curr}'\033[0m expected \033[38;2;209;62;29m'{expecting}'\033[0m ")
return maps["costs"][i], []
else:
stack.append(i)
return 0, [maps['opening'][i] for i in stack[::-1]]
def part1(inputData):
points = 0
for idx, instruction in enumerate(inputData):
points += findCorruptedInLine(instruction)[0]
return points
def part2(inputData):
points = []
incompleteLines = [j[1] for i in inputData if (j := findCorruptedInLine(i))[0] == 0]
for idx, instruction in enumerate(incompleteLines):
point = 0
for char in instruction:
point = point * 5 + maps["autocomplete_costs"][char]
points.append(point)
points.sort()
return points[len(points)//2]
if __name__ == '__main__':
with open("input.txt") as file: data = list(file.read().splitlines())
start_time = time.time_ns()
print('\033[38;2;60;179;113mDay10 Part 1: {} \033[0m'.format(part1(data)))
print('\033[38;2;60;179;113mDay10 Part 2: {} \033[0m'.format(part2(data)))
end_time = time.time_ns()
print(f'\033[38;2;60;179;113mDay10: {(end_time - start_time)/1000000} ms \033[0m')
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Dan Halbert for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:py:mod:`~adafruit_ble.services.standard.hid`
=======================================================
BLE Human Interface Device (HID)
* Author(s): Dan Halbert for Adafruit Industries
"""
import struct
from micropython import const
import _bleio
from adafruit_ble.characteristics import Attribute
from adafruit_ble.characteristics import Characteristic
from adafruit_ble.characteristics.int import Uint8Characteristic
from adafruit_ble.uuid import StandardUUID
from .. import Service
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE.git"
_HID_SERVICE_UUID_NUM = const(0x1812)
_REPORT_UUID_NUM = const(0x2A4D)
_REPORT_MAP_UUID_NUM = const(0x2A4B)
_HID_INFORMATION_UUID_NUM = const(0x2A4A)
_HID_CONTROL_POINT_UUID_NUM = const(0x2A4C)
_REPORT_REF_DESCR_UUID_NUM = const(0x2908)
_REPORT_REF_DESCR_UUID = _bleio.UUID(_REPORT_REF_DESCR_UUID_NUM)
_PROTOCOL_MODE_UUID_NUM = const(0x2A4E)
_APPEARANCE_HID_KEYBOARD = const(961)
_APPEARANCE_HID_MOUSE = const(962)
_APPEARANCE_HID_JOYSTICK = const(963)
_APPEARANCE_HID_GAMEPAD = const(964)
# Boot keyboard and mouse not currently supported.
_BOOT_KEYBOARD_INPUT_REPORT_UUID_NUM = const(0x2A22)
_BOOT_KEYBOARD_OUTPUT_REPORT_UUID_NUM = const(0x2A32)
_BOOT_MOUSE_INPUT_REPORT_UUID_NUM = const(0x2A33)
# Output reports not currently implemented (e.g. LEDs on keyboard)
_REPORT_TYPE_INPUT = const(1)
_REPORT_TYPE_OUTPUT = const(2)
# Boot Protocol mode not currently implemented
_PROTOCOL_MODE_BOOT = b'\x00'
_PROTOCOL_MODE_REPORT = b'\x01'
class ReportIn:
"""A single HID report that transmits HID data into a client."""
uuid = StandardUUID(_REPORT_UUID_NUM)
def __init__(self, service, report_id, usage_page, usage, *, max_length):
self._characteristic = _bleio.Characteristic.add_to_service(
service.bleio_service,
self.uuid.bleio_uuid,
properties=Characteristic.READ | Characteristic.NOTIFY,
read_perm=Attribute.ENCRYPT_NO_MITM, write_perm=Attribute.NO_ACCESS,
max_length=max_length, fixed_length=True)
self._report_id = report_id
self.usage_page = usage_page
self.usage = usage
_bleio.Descriptor.add_to_characteristic(
self._characteristic, _REPORT_REF_DESCR_UUID,
read_perm=Attribute.ENCRYPT_NO_MITM, write_perm=Attribute.NO_ACCESS,
initial_value=struct.pack('<BB', self._report_id, _REPORT_TYPE_INPUT))
def send_report(self, report):
"""Send a report to the peers"""
self._characteristic.value = report
class ReportOut:
"""A single HID report that receives HID data from a client."""
# pylint: disable=too-few-public-methods
uuid = StandardUUID(_REPORT_UUID_NUM)
def __init__(self, service, report_id, usage_page, usage, *, max_length):
self._characteristic = _bleio.Characteristic.add_to_service(
service.bleio_service,
self.uuid.bleio_uuid,
max_length=max_length,
fixed_length=True,
properties=(Characteristic.READ | Characteristic.WRITE |
Characteristic.WRITE_NO_RESPONSE),
read_perm=Attribute.ENCRYPT_NO_MITM, write_perm=Attribute.ENCRYPT_NO_MITM
)
self._report_id = report_id
self.usage_page = usage_page
self.usage = usage
_bleio.Descriptor.add_to_characteristic(
self._characteristic, _REPORT_REF_DESCR_UUID,
read_perm=Attribute.ENCRYPT_NO_MITM, write_perm=Attribute.NO_ACCESS,
initial_value=struct.pack('<BB', self._report_id, _REPORT_TYPE_OUTPUT))
_ITEM_TYPE_MAIN = const(0)
_ITEM_TYPE_GLOBAL = const(1)
_ITEM_TYPE_LOCAL = const(2)
_MAIN_ITEM_TAG_START_COLLECTION = const(0b1010)
_MAIN_ITEM_TAG_END_COLLECTION = const(0b1100)
_MAIN_ITEM_TAG_INPUT = const(0b1000)
_MAIN_ITEM_TAG_OUTPUT = const(0b1001)
_MAIN_ITEM_TAG_FEATURE = const(0b1011)
class HIDService(Service):
"""
Provide devices for HID over BLE.
:param str hid_descriptor: USB HID descriptor that describes the structure of the reports. Known
as the report map in BLE HID.
Example::
from adafruit_ble.hid_server import HIDServer
hid = HIDServer()
"""
uuid = StandardUUID(0x1812)
boot_keyboard_in = Characteristic(uuid=StandardUUID(0x2A22),
properties=(Characteristic.READ |
Characteristic.NOTIFY),
read_perm=Attribute.ENCRYPT_NO_MITM,
write_perm=Attribute.NO_ACCESS,
max_length=8, fixed_length=True)
boot_keyboard_out = Characteristic(uuid=StandardUUID(0x2A32),
properties=(Characteristic.READ |
Characteristic.WRITE |
Characteristic.WRITE_NO_RESPONSE),
read_perm=Attribute.ENCRYPT_NO_MITM,
write_perm=Attribute.ENCRYPT_NO_MITM,
max_length=1, fixed_length=True)
protocol_mode = Uint8Characteristic(uuid=StandardUUID(0x2A4E),
properties=(Characteristic.READ |
Characteristic.WRITE_NO_RESPONSE),
read_perm=Attribute.OPEN,
write_perm=Attribute.OPEN,
initial_value=1, max_value=1)
"""Protocol mode: boot (0) or report (1)"""
# bcdHID (version), bCountryCode (0 not localized), Flags: RemoteWake, NormallyConnectable
# bcd1.1, country = 0, flag = normal connect
# TODO: Make this a struct.
hid_information = Characteristic(uuid=StandardUUID(0x2A4A),
properties=Characteristic.READ,
read_perm=Attribute.ENCRYPT_NO_MITM,
write_perm=Attribute.NO_ACCESS,
initial_value=b'\x01\x01\x00\x02')
"""Hid information including version, country code and flags."""
report_map = Characteristic(uuid=StandardUUID(0x2A4B),
properties=Characteristic.READ,
read_perm=Attribute.ENCRYPT_NO_MITM,
write_perm=Attribute.NO_ACCESS,
fixed_length=True)
"""This is the USB HID descriptor (not to be confused with a BLE Descriptor). It describes
which report characteristic are what."""
suspended = Uint8Characteristic(uuid=StandardUUID(0x2A4C),
properties=Characteristic.WRITE_NO_RESPONSE,
read_perm=Attribute.NO_ACCESS,
write_perm=Attribute.ENCRYPT_NO_MITM,
max_value=1)
"""Controls whether the device should be suspended (0) or not (1)."""
def __init__(self, hid_descriptor=None, service=None):
super().__init__(report_map=hid_descriptor)
if service:
# TODO: Add support for connecting to a remote hid server.
pass
self._init_devices()
def _init_devices(self):
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
self.devices = []
hid_descriptor = self.report_map
global_table = [None] * 10
local_table = [None] * 3
collections = []
top_level_collections = []
i = 0
while i < len(hid_descriptor):
b = hid_descriptor[i]
tag = (b & 0xf0) >> 4
_type = (b & 0b1100) >> 2
size = b & 0b11
size = 4 if size == 3 else size
i += 1
data = hid_descriptor[i:i+size]
if _type == _ITEM_TYPE_GLOBAL:
global_table[tag] = data
elif _type == _ITEM_TYPE_MAIN:
if tag == _MAIN_ITEM_TAG_START_COLLECTION:
collections.append({"type": data,
"locals": list(local_table),
"globals": list(global_table),
"mains": []})
elif tag == _MAIN_ITEM_TAG_END_COLLECTION:
collection = collections.pop()
# This is a top level collection if the collections list is now empty.
if not collections:
top_level_collections.append(collection)
else:
collections[-1]["mains"].append(collection)
elif tag == _MAIN_ITEM_TAG_INPUT:
collections[-1]["mains"].append({"tag": "input",
"locals": list(local_table),
"globals": list(global_table)})
elif tag == _MAIN_ITEM_TAG_OUTPUT:
collections[-1]["mains"].append({"tag": "output",
"locals": list(local_table),
"globals": list(global_table)})
else:
raise RuntimeError("Unsupported main item in HID descriptor")
local_table = [None] * 3
else:
local_table[tag] = data
i += size
def get_report_info(collection, reports):
""" Gets info about hid reports """
for main in collection["mains"]:
if "type" in main:
get_report_info(main, reports)
else:
report_size, report_id, report_count = [x[0] for x in main["globals"][7:10]]
if report_id not in reports:
reports[report_id] = {"input_size": 0, "output_size": 0}
if main["tag"] == "input":
reports[report_id]["input_size"] += report_size * report_count
elif main["tag"] == "output":
reports[report_id]["output_size"] += report_size * report_count
for collection in top_level_collections:
if collection["type"][0] != 1:
raise NotImplementedError("Only Application top level collections supported.")
usage_page = collection["globals"][0][0]
usage = collection["locals"][0][0]
reports = {}
get_report_info(collection, reports)
if len(reports) > 1:
raise NotImplementedError("Only one report id per Application collection supported")
report_id, report = list(reports.items())[0]
output_size = report["output_size"]
if output_size > 0:
self.devices.append(ReportOut(self, report_id, usage_page, usage,
max_length=output_size // 8))
input_size = reports[report_id]["input_size"]
if input_size > 0:
self.devices.append(ReportIn(self, report_id, usage_page, usage,
max_length=input_size // 8))
|
import setuptools
import os
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="cs-kit",
version=os.environ.get("VERSION", "0.0.0"),
author="Hank Doupe",
author_email="henrymdoupe@gmail.com",
description=("Developer tools for compute.studio."),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/compute-studio-org/Compute-Studio-Toolkit",
packages=setuptools.find_packages(),
install_requires=["paramtools", "cs-storage", "fsspec", "pyyaml"],
include_package_data=True,
entry_points={
"console_scripts": [
"csk-init=cs_kit.cli:init",
"csk-token=cs_kit.cli:cs_token",
"csk=cs_kit.cli:cli",
]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
#!/usr/bin/env python3
import json
import logging
import sys
import click
from habu.lib.host import gather_details
@click.command()
@click.option('-v', 'verbose', is_flag=True, default=False, help='Verbose output.')
def cmd_host(verbose):
"""
Collect information about the host where habu is running.
Example:
\b
$ habu.host
{
"kernel": [
"Linux",
"demo123",
"5.0.6-200.fc29.x86_64",
"#1 SMP Wed Apr 3 15:09:51 UTC 2019",
"x86_64",
"x86_64"
],
"distribution": [
"Fedora",
"29",
"Twenty Nine"
],
"libc": [
"glibc",
"2.2.5"
],
"arch": "x86_64",
"python_version": "3.7.3",
"os_name": "Linux",
"cpu": "x86_64",
"static_hostname": "demo123",
"fqdn": "demo123.lab.sierra"
}
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
print("Gather information about the host...", file=sys.stderr)
result = gather_details()
if result:
print(json.dumps(result, indent=4))
else:
print("[X] Unable to gather information")
return True
if __name__ == '__main__':
cmd_host()
|
import glob
def get_patient_paths(data_path):
patient_paths = glob.glob(data_path + "/*")
return patient_paths
def get_input_paths(patient_path):
input_paths = glob.glob(patient_path + "/*CT*", recursive=True)
return input_paths
def get_label_paths(patient_path):
label_paths = glob.glob(patient_path + "/*RS*", recursive=True)
return label_paths
def flatten_list(top_list):
return [item for sublist in top_list for item in sublist]
|
from Functions.General_Functions import isNaN, getNContext, pad_array, candidateClsf, filterNumbers, filterNumbers_MaintainDistances
from Method_Handler import MethodHandler
import nltk
import math
import sklearn as sk
import numpy as np
import pandas as pd
import statistics as stats
import os
from tempfile import mkdtemp
from joblib import load, dump
pd.options.mode.chained_assignment = None # default='warn'
def preprocessBy(method='CV-TFIDF', *args):
return preprocessHandler.execMethod(method, None, *args)
def classifyWith(method='CV-SVM', *args):
return classifHandler.execMethod(method, None, *args)
def CV_TFIDF(data, k):
test_list, train_list = kSplit(data, k)
pre_test = []
pre_train = []
IDF_list = []
for i,t in enumerate(train_list):
IDF = getIDF(t)
if not os.path.isdir('Models'):
os.mkdir('Models')
path = os.path.abspath('Models')
filename = os.path.join(path, 'IDFv6') + '_' + str(i) + '.joblib'
dump(IDF, filename, compress=1)
IDF_list.append(IDF)
pre_train.append(transformSet(t, IDF))
pre_test.append(transformSet(test_list[i], IDF))
return IDF_list, pre_train, pre_test
def kSplit(data, k):
data = sk.utils.shuffle(data) #randomly shuffled
data = data.reset_index(drop=True)
test = []
train = []
l = len(data)
n = math.floor(l/k)
r = l % k
index = 0
n += 1
for i in range(k):
if r == 0:
r = -1
n -= 1
test_split = data.loc[index:(index+n-1),:]
test_split = test_split.reset_index(drop=True)
train_split = data.loc[:index-1,:]
train_split = train_split.append(data.loc[(index+n):,:], ignore_index=True)
train_split = train_split.reset_index(drop=True)
index += n
if r > 0: r -= 1
test.append(test_split)
train.append(train_split)
return test, train
def filterDotEmpty(token):
return False if token in ['.', '','(',')','<','>',',',':',';','!','?','[',']','{','}','-','/','\\'] else True
def splitToken(tk, splitList):
stk = tk
isFraction = False
for symbol in splitList:
if symbol in stk:
stk = stk.split(symbol)[1]
if isNaN(stk): stk = tk
elif '/' in tk: isFraction = True
return stk, isFraction
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
# ordinal_words = {'first':1, 'second':2, 'third':3, 'fifth':5, 'eighth':8, 'ninth':9, 'twelfth':12}
# ordinal_endings = [('ieth', 'y'), ('th', '')]
textnum = textnum.replace('-', ' ')
current = result = 0
curstring = ""
onnumber = False
for i,word in enumerate(textnum.split()):
# if word in ordinal_words:
# scale, increment = (1, ordinal_words[word])
# current = current * scale + increment
# if scale > 100:
# result += current
# current = 0
# onnumber = True
# else:
# for ending, replacement in ordinal_endings:
# if word.endswith(ending):
# word = "%s%s" % (word[:-len(ending)], replacement)
if word not in numwords:
if onnumber:
if result == current == 0 and textnum.split()[i-1] == 'and':
curstring += "and "
else:
curstring += repr(result + current) + " "
curstring += word + " "
result = current = 0
onnumber = False
else:
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
onnumber = True
if onnumber:
curstring += repr(result + current)
return curstring
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": nltk.corpus.wordnet.ADJ,
"N": nltk.corpus.wordnet.NOUN,
"V": nltk.corpus.wordnet.VERB,
"R": nltk.corpus.wordnet.ADV}
return tag_dict.get(tag, nltk.corpus.wordnet.NOUN)
def generateNumbersDataFrame(data):
abstractList = data['Abstract'].to_list()
for i, abstract in enumerate(abstractList):
new_sentence = []
N_list = []
N_sentences_list = []
N_close_words_list = []
N_close_words_distances_list = []
N_isFraction_list = []
abstract = text2int(abstract)
sentences = nltk.sent_tokenize(abstract)
for sentence in sentences:
#tokenize
tokens = nltk.word_tokenize(sentence)
#lemmatize
lemmatizer = nltk.stem.WordNetLemmatizer()
lemma_tokens = [lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in tokens]
lemma_tokens, isFraction_list = filterTokens(lemma_tokens)
#pos tagging
pos_tokens = nltk.pos_tag(lemma_tokens)
#chuncking
grammar = r"""
NP: # NP stage
{(<DT>*)?(<CD>)*?(<RB>)?(<VBP>*)?(<JJ.*>*)?<NN.*>*(<VBP>*)?(<JJ.*>*)?(<NN.*>*)?}
VP:
{(<MD>)?(<TO>)?<VB.*>*(<RP>)?}
"""
chunk_parser = nltk.RegexpParser(grammar)
sentence_tree = chunk_parser.parse(pos_tokens)
#sentence_tree.draw()
for j,token in enumerate(pos_tokens):
if not isNaN(token[0]) and "." not in token[0] and float(token[0]) > 2: #token must be an integer number
N_list.append(token[0])
N_sentences_list.append(filterNumbers(lemma_tokens))
#words, distances = getNContext(tokens, j, 3) method 1
words, distances = getNPfromNumber(sentence_tree, token[0], j) #method 2
N_close_words_list.append(words)
N_close_words_distances_list.append(distances)
N_isFraction_list.append(isFraction_list[j])
partial_data = pd.DataFrame(data={'PMID': [data['PMID'][i]]*len(N_list), 'N': N_list, 'N sentence words': N_sentences_list, 'N close words': N_close_words_list, 'N close words distances': N_close_words_distances_list, 'Is fraction': N_isFraction_list})
full_data = full_data.append(partial_data, ignore_index=True, sort=True) if 'full_data' in locals() else partial_data
full_data = candidateClsf(full_data, data)
return full_data
def getNPfromNumber(sent_tree, number, index):
a = q = -1
b = []
for tree in sent_tree:
if isinstance(tree, tuple):
tree = [tree]
for word in tree:
a += 1
if a == index:
b = [w[0] for w in tree]
c = [abs(index-q+i) for i,w in enumerate(tree)]
return filterNumbers_MaintainDistances(b,c)
q = a + 1 #indice inicio sub-arbol
return []
def filterTokens(tokens):
new_sentence = []
isFraction_list = []
tokens = list(filter(filterDotEmpty, tokens))
for token in tokens:
token = token.replace(",", "") #Take out commas from numbers (english format)
token, isFraction = splitToken(token, ['/', '-'])
new_sentence.append(token)
isFraction_list.append(isFraction)
return new_sentence, isFraction_list
def getIDF(train_set):
#vocab = getVocab(train_set['N close words'])
vect = sk.feature_extraction.text.TfidfVectorizer(max_features=None, use_idf=True, vocabulary=None, min_df=0.01)
words = []
for i, doc in enumerate(train_set['N sentence words']):
if train_set['Is Candidate'][i] == 1:
words.append(' '.join(train_set['N close words'][i]))
IDF = vect.fit(words)
return IDF
def getVocab(column):
vocab = []
for word_bag in column:
for word in word_bag:
if word not in vocab and isNaN(word):
vocab.append(word)
return vocab
def transformSet(dataset, IDF):
close_words = []
for word_list in dataset['N close words']:
close_words.append(' '.join(word_list))
X = IDF.transform(close_words)
weights = []
means = []
for row in X:
weights.append(row.toarray())
means.append(stats.mean(row.toarray()[0]))
IDF_words = IDF.vocabulary_.keys()
dataset['N close words weights'] = weights
dataset['Weight means'] = means
dataset = createWordColumns(dataset, IDF_words, True)
return dataset
def createWordColumns(df, words, use_distances=False):
for i in df.index:
l = len(df.index)
weights = df['N close words weights'].iloc[i][0]
distances = df['N close words distances'].iloc[i]
for j, w in enumerate(words):
if w not in df.columns:
df[w] = pd.to_numeric([0.0]*l, downcast='float')
df[str(w + ' dist')] = pd.to_numeric([50.0]*l, downcast='integer') #entendiendo 50 como una distancia muy grande
if w in df.columns:
df[w][i] = weights[j]
if w in df['N close words'].iloc[i]:
windex = df['N close words'].iloc[i].index(w)
df[str(w + ' dist')][i] = distances[windex]
return df
def SVM(train, test, index, c, kernel, gamma, prob):
classifier = sk.svm.SVC(C=c,kernel=kernel, gamma=gamma, class_weight='balanced', probability=prob)
train_set = train.iloc[:,8:]
train_set['Is fraction'] = train['Is fraction']
test_set = test.iloc[:,8:]
test_set['Is fraction'] = test['Is fraction']
classifier.fit(np.matrix(train_set), train['Is Candidate'])
class_results = classifier.predict(np.matrix(test_set))
class_prob = classifier.predict_proba(np.matrix(test_set))
if not os.path.isdir('Models'):
os.mkdir('Models')
path = os.path.abspath('Models')
filename = os.path.join(path, 'SVMv8')
if index != None and isinstance(index, int):
dump(classifier, filename + '_' + str(index) + '.joblib', compress=1)
else:
dump(classifier, filename + '.joblib', compress=1)
return class_results, class_prob
def CV_SVM(train_list, test_list):
true_class = []
predicted_class = []
true_class_probs = []
for i, t in enumerate(train_list):
predictions, probs = classifyWith('SVM', t, test_list[i], i, 2, 'rbf', 'scale', True)
true_class.extend(test_list[i]['Is Candidate'])
predicted_class.extend(predictions)
true_class_probs.extend(probs[:,1])
return true_class, predicted_class, true_class_probs
def NN(train, test, index, *args):
return 0
def CV_NN(train_list, test_list):
true_class = []
predicted_class = []
true_class_probs = []
for i, t in enumerate(train_list):
a = 0
return true_class, predicted_class, true_class_probs
#PREPROCESS METHODS
preProcessMethods = {
"CV-TFIDF": CV_TFIDF
}
classifMethodDicc = {
"SVM": SVM,
"CV-SVM": CV_SVM,
"NN": NN,
"CV-NN": CV_NN
}
preprocessHandler = MethodHandler(preProcessMethods)
classifHandler = MethodHandler(classifMethodDicc)
|
"""
A Deep Q-learning agent implementation for the NetworkAttackSimulator
Uses Experience Replay and a seperate Target Network along with the main DQN.
"""
from network_attack_simulator.agents.agent import Agent
import random
import numpy as np
import time
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import RMSprop
from keras import backend as K
import tensorflow as tf
def huber_loss(y_true, y_pred, loss_delta=1.0):
err = y_true - y_pred
cond = K.abs(err) < loss_delta
L2 = 0.5 * K.square(err)
L1 = loss_delta * (K.abs(err) - 0.5 * loss_delta)
loss = tf.where(cond, L2, L1)
output = K.mean(loss)
return output
class Brain:
""" Fully connected single-layer neural network"""
def __init__(self, state_size, num_actions, hidden_units, learning_rate):
self.state_size = state_size
self.num_actions = num_actions
self.hidden_units = hidden_units
self.learning_rate = learning_rate
self.model = self._create_model()
self.target_model = self._create_model()
def _create_model(self):
model = Sequential()
model.add(Dense(self.hidden_units, activation='relu', input_dim=self.state_size))
model.add(Dense(self.num_actions, activation='linear'))
opt = RMSprop(lr=self.learning_rate)
model.compile(loss=huber_loss, optimizer=opt)
return model
def train(self, x, y, batch_size=64, epoch=1, verbose=0):
self.model.fit(x, y, batch_size=batch_size, epochs=epoch, verbose=verbose)
def predict(self, s, target=False):
if target:
return self.target_model.predict(s)
return self.model.predict(s)
def predictOne(self, s, target=False):
return self.predict(s.reshape(1, self.state_size), target=target).flatten()
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())
def reset(self):
self.model = self._create_model()
self.target_model = self._create_model()
class Memory:
"""Replay buffer"""
def __init__(self, capacity):
self.capacity = capacity
self.samples = []
self.reset()
def add(self, sample):
self.samples.append(sample)
if len(self.samples) > self.capacity:
self.samples.pop(0)
def sample(self, n):
n = min(n, len(self.samples))
return random.sample(self.samples, n)
def reset(self):
self.samples = []
class DQNAgent(Agent):
def __init__(self, state_size, num_actions, hidden_units=256,
gamma=0.99,
min_epsilon=0.05,
max_epsilon=1.0,
epsilon_decay_lambda=0.0001,
learning_rate=0.00025,
memory_capacity=10000,
batch_size=32,
update_target_freq=1000):
"""
Initialize a new Deep Q-network agent
Parameters:
int state_size : size of a state (i.e. input into network)
int num_actions : size of action space (i.e. output of network)
int hidden_units : number of hidden units of hidden Q-network layer
float gamma : discount
float min_epsilon : minimum exploration probability
float max_epsilon : maximum exploration probability
float epsilon_decay_lambda : lambda for exponential epsilon decay
float learning_rate : RMSprop learning rate
int memory_capacity : capacity of replay buffer
int batch_size : Q-network training batch size
int update_target_freq : target model update frequency in terms of number of steps
"""
self.state_size = state_size
self.num_actions = num_actions
self.gamma = gamma
self.min_epsilon = min_epsilon
self.max_epsilon = max_epsilon
self.epsilon_decay_lambda = epsilon_decay_lambda
self.epsilon = max_epsilon
self.batch_size = batch_size
self.update_target_freq = update_target_freq
self.brain = Brain(state_size, num_actions, hidden_units, learning_rate)
self.memory = Memory(memory_capacity)
self.steps = 0
def train(self, env, num_episodes=100, max_steps=100, timeout=None, verbose=False, **kwargs):
if "visualize_policy" in kwargs:
visualize_policy = kwargs["visualize_policy"]
else:
visualize_policy = 0
self.print_message("Starting training for {} episodes".format(num_episodes), verbose)
# stores timesteps and rewards for each episode
episode_timesteps = []
episode_rewards = []
episode_times = []
training_start_time = time.time()
steps_since_update = 0
reporting_window = min(num_episodes / 10, 100)
for e in range(num_episodes):
start_time = time.time()
timesteps, reward = self._run_episode(env, max_steps)
ep_time = time.time() - start_time
episode_rewards.append(reward)
episode_timesteps.append(timesteps)
episode_times.append(ep_time)
self.epsilon = self.epsilon_decay()
steps_since_update += timesteps
if steps_since_update > self.update_target_freq:
self.brain.update_target_model()
steps_since_update = 0
self.report_progress(e, reporting_window, episode_timesteps, verbose)
if e > 0 and visualize_policy != 0 and e % visualize_policy == 0:
gen_episode = self.generate_episode(env, max_steps)
self.print_message("Visualizing current policy. Episode length = {0}"
.format(len(gen_episode)), verbose)
env.render_episode(gen_episode)
if timeout is not None and time.time() - training_start_time > timeout:
self.print_message("Timed out after {} sec on episode {:.2f}".format(timeout, e),
verbose)
break
total_training_time = time.time() - training_start_time
self.print_message("Training complete after {} episodes and {:.2f} sec"
.format(e, total_training_time), verbose)
return episode_timesteps, episode_rewards, episode_times
def reset(self):
self.brain.reset()
self.memory.reset()
self.epsilon = self.max_epsilon
self.steps = 0
def _run_episode(self, env, max_steps):
"""
Train the agent for a single episode using Q-learning algorithm
"""
a_space = env.action_space
s = self._process_state(env.reset())
ep_reward = 0
ep_timesteps = 0
for _ in range(max_steps):
# interact with environment
a = self.act(s)
ns, r, done = env.step(a_space[a])
ns = self._process_state(ns)
if done:
ns = None
# train agent
self.observe((s, a, r, ns))
self.replay()
s = ns
ep_reward += r
ep_timesteps += 1
self.steps += 1
if done:
break
return ep_timesteps, ep_reward
def _process_state(self, s):
""" Convert state into format that can be handled by NN"""
return s.flatten()
def act(self, s):
""" Choose action using epsilon greedy action selection """
if random.random() < self.epsilon:
return random.randint(0, self.num_actions-1)
else:
return np.argmax(self.brain.predictOne(s))
def _choose_greedy_action(self, state, action_space, epsilon=0.05):
if random.random() < epsilon:
return random.randint(0, self.num_actions-1)
return np.argmax(self.brain.predictOne(self._process_state(state)))
def observe(self, sample):
""" Add an observation to replay memory and also adjust epsilon """
self.memory.add(sample)
def epsilon_decay(self):
""" Decay the epsilon value based on episode number """
# exponential decay by steps
diff = self.max_epsilon - self.min_epsilon
temp = diff * math.exp(-self.epsilon_decay_lambda * self.steps)
return self.min_epsilon + temp
def replay(self):
""" Perform parameter updates """
# each sample is observation = (s, a, r, ns) tuple
batch = self.memory.sample(self.batch_size)
batch_len = len(batch)
no_state = np.zeros(self.state_size)
states = np.array([o[0] for o in batch])
next_states = np.array([(no_state if o[3] is None else o[3]) for o in batch])
p = self.brain.predict(states)
next_p = self.brain.predict(next_states, target=True)
x = np.zeros((batch_len, self.state_size))
y = np.zeros((batch_len, self.num_actions))
for i in range(batch_len):
o = batch[i]
s, a, r, ns = o
# set target for given s, a, r, ns observation
t = p[i]
if ns is None:
# terminal state
t[a] = r
else:
t[a] = r + self.gamma * np.amax(next_p[i])
x[i] = s
y[i] = t
self.brain.train(x, y, self.batch_size)
def print_message(self, message, verbose):
if verbose:
print("DQN Agent: {}".format(message))
def __str__(self):
return "DQNAgent"
|
"""Back-end code to support the Prereg Challenge initiative
Keeping the code together in this file should make it easier to remove the
features added to the GakuNin RDM specifically to support this initiative in the future.
Other resources that are a part of the Prereg Challenge:
* website/static/js/pages/prereg-landing-page.js
* website/static/css/prereg.css
"""
from flask import request
from framework.auth import decorators
from framework.auth.core import Auth
from framework.utils import iso8601format
from website.prereg import utils
def prereg_landing_page(**kwargs):
"""Landing page for the prereg challenge"""
auth = kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
is_logged_in = kwargs['auth'].logged_in
campaign = request.path.strip('/') or 'prereg'
if is_logged_in:
registerable_nodes = [
node for node
in auth.user.contributor_to
if node.has_permission(user=auth.user, permission='admin')
]
has_projects = bool(registerable_nodes)
has_draft_registrations = bool(utils.drafts_for_user(auth.user, campaign).count())
else:
has_projects = False
has_draft_registrations = False
return {
'is_logged_in': is_logged_in,
'has_draft_registrations': has_draft_registrations,
'has_projects': has_projects,
'campaign_long': utils.PREREG_CAMPAIGNS[campaign],
'campaign_short': campaign
}
@decorators.must_be_logged_in
def prereg_draft_registrations(auth, **kwargs):
"""API endpoint; returns prereg draft registrations the user can resume"""
campaign = kwargs.get('campaign', 'prereg')
drafts = utils.drafts_for_user(auth.user, campaign)
return {
'draftRegistrations': [
{
'dateUpdated': iso8601format(draft.datetime_updated),
'dateInitiated': iso8601format(draft.datetime_initiated),
'node': {
'title': draft.branched_from.title,
},
'initiator': {
'name': draft.initiator.fullname,
},
'url': draft.branched_from.web_url_for(
'edit_draft_registration_page',
draft_id=draft._id,
),
}
for draft in drafts
],
}
|
# -*- coding: utf-8 -*-
# Copyright 2015 Pietro Brunetti <pietro.brunetti@itb.cnr.it>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Selecting peptides using IF treshold values"""
__authors__ = "Pietro Brunetti"
import os
import wx
import wx.lib.agw.floatspin as FS
import DialogCommons
class Dialog(wx.Dialog):
""" Dialog to set the threshold for selection"""
def __init__(
self, parent, ID, title, size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE):
sizer = DialogCommons.createMainSizer(self, parent, ID,
title, pos, size, style)
self._createSelBox(sizer)
self._createPlotBox(sizer)
DialogCommons.createBtnSizer(self, sizer,
"The OK button to start the Selection")
self.SetSizer(sizer)
sizer.Fit(self)
def _createSelBox(self, sizer):
# setting default values
"""
Create a box that allows user to choose
the select parameters
"""
self.protThr = 0.3
self.peptThr = 0.5
self._createSpinner(sizer, "Protein IF threshold ", self.protThr,
"Protein identification frequency threshold",
self.OnProtSpin)
self._createSpinner(sizer, "Peptide IF threshold ", self.peptThr,
"Peptide identification frequency threshold",
self.OnPeptSpin)
def _createSpinner(self, sizer, title, defaultValue, helptxt, fun):
p_sizer = wx.BoxSizer(wx.HORIZONTAL)
p_Stat = wx.StaticText(self, -1, title)
p_Spin = FS.FloatSpin(self, -1, min_val=0, max_val=1,
increment=0.1, value=defaultValue)
# ,extrastyle=FS.FS_LEFT)
p_Spin.SetDigits(1)
p_Spin.SetHelpText(helptxt)
p_Spin.Bind( FS.EVT_FLOATSPIN, fun)
p_sizer.Add(p_Stat, 1)
p_sizer.Add(p_Spin, 1)
sizer.Add(p_sizer, 0,
wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
def _createPlotBox(self, sizer):
self.HaveHisto = None
self._createCheck(sizer, " Histogramm",
"Plot Peptide/Protein frequencies Histogram",
self.OnHistos)
self.HaveScatter = None
self._createCheck(sizer, " Scatter Plot",
"Plot Peptide/Protein frequencies Scatter Plot",
self.OnScatter)
def _createCheck(self, sizer, title, helptxt, funz):
check = wx.CheckBox( self, -1, title, style = wx.ALIGN_RIGHT )
check.SetHelpText(helptxt,)
check.Bind( wx.EVT_CHECKBOX, funz)
sizer.Add(check, 0,
wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
def OnProtSpin(self, e):
""" Setting the Protein threshold"""
floatspin = e.GetEventObject()
self.protThr = floatspin.GetValue()
txt = "Protein IF threshold: {0}\n".format(self.protThr)
self.GetParent().SetStatusText(txt)
def OnPeptSpin(self, e):
""" Setting the Peptide threshold"""
floatspin = e.GetEventObject()
self.peptThr = floatspin.GetValue()
txt = "Peptide IF threshold: {0}\n".format(self.peptThr)
self.GetParent().SetStatusText(txt)
def OnHistos(self, e):
""" Make a histogram? """
self.HaveHisto = e.IsChecked()
def OnScatter(self, e):
""" Make a Scatter Plot?"""
self.HaveScatter = e.IsChecked()
def GetValue(self):
return {"protThr": self.protThr,
"peptThr": self.peptThr,
"haveHisto": self.HaveHisto,
"haveScatter": self.HaveScatter}
|
DATABASES = {'default': {'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'}}
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 'secret-key'
INSTALLED_APPS = [
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.auth',
'cv',
# 'researchprojects',
]
|
# https://leetcode.com/problems/design-add-and-search-words-data-structure/
# Design a data structure that supports adding new words and finding if a string
# matches any previously added string.
# Implement the WordDictionary class:
################################################################################
from collections import defaultdict
class TrieNode():
def __init__(self):
self.children = defaultdict(TrieNode)
self.is_word = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
node = self.root
for char in word:
node = node.children[char]
node.is_word = True
def search(self, word: str) -> bool:
return self.dfs(self.root, 0, word)
def dfs(self, node, idx, word):
if idx == len(word): return node.is_word
char = word[idx]
if char == ".":
for child in node.children:
if self.dfs(node.children[child], idx + 1, word):
return True
else:
if char not in node.children:
return False
else:
return self.dfs(node.children[char], idx + 1, word)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
|
#from OpenAttack.OpenAttack2.attack_assist.filter_words.protein import PROTEIN_FILTER_WORDS
from .chinese import CHINESE_FILTER_WORDS
from .english import ENGLISH_FILTER_WORDS
from .protein import PROTEIN_FILTER_WORDS
def get_default_filter_words(lang):
from ...tags import TAG_Chinese, TAG_English,TAG_Protein
if lang == TAG_Chinese:
return CHINESE_FILTER_WORDS
if lang == TAG_English:
return ENGLISH_FILTER_WORDS
if lang == TAG_Protein:
return PROTEIN_FILTER_WORDS
return PROTEIN_FILTER_WORDS
|
import socket
import tokens
import connection
import io
import os
from PIL import Image
from message.literalMessage import LiteralMessage
from baseApplication import BaseApplication
class ClientApplication(BaseApplication):
def __init__(self, host, port):
super().__init__(host, port, tokens.CLIENT_TOKEN)
def show_image_file_from_storage(self):
filename = input("Filename:")
file = self.get_file(filename)
img = Image.open(io.BytesIO(file))
img.show()
def see_files_in_storage(self):
files = self.get_files_from_storage()
for filename in files:
print(filename)
def send_file_to_storage(self):
filename = input("Filename:")
self.send_file(filename)
def send_job(self, token):
filename = input("Filename:")
dstfilename = input("Destination filename:")
self.send_literal(token)
self.send_literal(filename)
self.send_literal(dstfilename)
messageToken = self.receive_message().value
message = self.receive_message().value
if messageToken == tokens.INFO_MESSAGE or messageToken == tokens.ERROR_MESSAGE:
print(message)
def remove_file(self):
filename = input("Filename:")
self.send_literal(tokens.REMOVE_FILE)
self.send_literal(filename)
result = self.receive_message(True, 1.0)
if result is not None:
if result.value == tokens.ERROR_MESSAGE or result.value == tokens.INFO_MESSAGE:
message = self.receive_message().value
print(message)
def see_a_logfile(self):
files = [logfile for logfile in self.get_files_from_storage() if os.path.splitext(logfile)[1].lower() == '.log']
count = 0
for logfile in files:
print('{} - {}'.format(count, logfile))
count += 1
index = int(input('Index:'))
filename = files[index]
file = self.get_file(filename)
file = io.BytesIO(file).read()
print('Log:')
print(file.decode('UTF-8'))
def print_commands(self):
print('Commands:')
print('0 - Exit')
print('1 - Flip Image Horizontal')
print('2 - Flip Image Vertical')
print('3 - Rotate Image 90.')
print('4 - Rotate Image 180.')
print('5 - Rotate Image 270.')
print('6 - See Files in Storage.')
print('7 - Send File to Storage.')
print('8 - Show Image File from Storage.')
print('9 - Remove File from Storage.')
print('10 - See a logfile.')
def menu(self):
while not self.is_closed():
self.print_commands()
cmd = int(input("Cmd>"))
if cmd == 0:
self.close()
elif cmd == 1:
self.send_job(tokens.JOB_FLIP_HORIZONTAL)
elif cmd == 2:
self.send_job(tokens.JOB_FLIP_VERTICAL)
elif cmd == 3:
self.send_job(tokens.JOB_ROTATE_90)
elif cmd == 4:
self.send_job(tokens.JOB_ROTATE_180)
elif cmd == 5:
self.send_job(tokens.JOB_ROTATE_270)
elif cmd == 6:
self.see_files_in_storage()
elif cmd == 7:
self.send_file_to_storage()
elif cmd == 8:
self.show_image_file_from_storage()
elif cmd == 9:
self.remove_file()
elif cmd == 10:
self.see_a_logfile()
host = input('Host: ')
ClientApplication(host, 50007)
|
'''
run_tests.py
run unit test cases
'''
import os
import sys
import subprocess as sp
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
test_dir = os.path.dirname(os.path.abspath("__file__"))
sandbox = os.path.join(test_dir, 'sandbox')
os.makedirs(sandbox, exist_ok=True)
files = os.listdir(test_dir)
files = [x for x in files if x.endswith('.py')]
files = [x for x in files if not x.startswith('run_examples.py')]
files = [x for x in files if not x.startswith('run_tests.py')]
files = [x for x in files if not x.startswith('test_module.py')]
files = [x for x in files if not x.startswith('test_memory.py')]
for t in files:
case = t[5:-3]
d1 = os.path.join(sandbox, 's', case)
d2 = os.path.join(sandbox, 'p', case)
os.makedirs(d1, exist_ok=True)
os.makedirs(d2, exist_ok=True)
os.chdir(d1)
print("#### running (serial): " + t)
comm = [sys.executable, "../../../"+t]
p = sp.Popen(comm, stdout=sp.PIPE, stderr=sp.STDOUT, stdin=sp.PIPE)
lines, errs = p.communicate()
print(lines.decode())
print(errs)
os.chdir(d2)
print("#### running (parallel): " + t)
comm = [sys.executable, "../../../"+t, "-p"]
p = sp.Popen(comm, stdout=sp.PIPE, stderr=sp.STDOUT, stdin=sp.PIPE)
lines, errs = p.communicate()
print(lines.decode())
print(errs)
|
#!/usr/bin/env python3
###########################################################################
# Bobbi June 2020
#
# Alternative server for ADTPro's VEDRIVE.SYSTEM
# Virtual Ethernet Drive for Apple II / ProDOS
#
# See https://www.adtpro.com/protocolv1.html
#
###########################################################################
pd25 = False # Default to old-style date/time --prodos25 to use new format
file1 = "/home/pi/virtual-1.po" # Disk image drive 1 --disk1 to override
file2 = "/home/pi/virtual-2.po" # Disk image drive 2 --disk2 to override
###########################################################################
import socket
import time
import os
import getopt
import sys
IP = "::"
PORT = 6502
BLKSZ = 512
# vt100 colour codes for pretty printing
BLK = '\033[90m'
RED = '\033[91m'
GRN = '\033[92m'
YEL = '\033[93m'
BLU = '\033[94m'
MAG = '\033[95m'
CYN = '\033[96m'
WHT = '\033[97m'
ENDC = '\033[0m'
# Globals
systemd = False # True if running under Systemd
packet = 1 # Sent packet counter
prevblk = -1 # Last block read/written
prevdrv = -1 # Last drive read/written
prevop = -1 # Last operation (read or write)
prevcs = -1 # Previous checksum
col = 0 # Used to control logging printout
skip1 = 0 # Bytes to skip over header (Drive 1)
skip2 = 0 # Bytes to skip over header (Drive 2)
#
# Get date/time bytes
#
def getDateTimeBytes():
global pd25
t = time.localtime()
dt = []
if pd25:
# ProDOS 2.5+
word1 = 2048 * t.tm_mday + 64 * t.tm_hour + t.tm_min
word2 = 4096 * (t.tm_mon + 1) + t.tm_year
else:
# Legacy ProDOS <2.5
word1 = t.tm_mday + 32 * t.tm_mon + 512 * (t.tm_year - 2000)
word2 = t.tm_min + 256 * t.tm_hour
dt.append(word1 & 0xff)
dt.append((word1 & 0xff00) >> 8)
dt.append(word2 & 0xff)
dt.append((word2 & 0xff00) >> 8)
return dt
#
# Append byte b to list l, return updated checksum
#
def appendbyte(l, b, csin):
l.append(b)
return csin ^ b
#
# Pretty print info about each request
#
def printinfo(drv, blknum, isWrite, isError, cs):
global systemd, prevblk, prevdrv, prevop, prevcs, col
if drv != prevdrv:
if systemd:
print('\nDrive {}'.format(drv))
else:
print('\n{}Drive {}{}'.format(BLU, drv, ENDC))
col = 0
e = '+' if ((blknum == prevblk) and (drv == prevdrv) and (isWrite == prevop) and (cs == prevcs)) else ' '
e = 'X' if isError else e
if systemd:
c = 'W' if isWrite else 'R'
print(' {0}{1}{2:05d}'.format(e, c, blknum), end='', flush=True)
else:
c = RED if isWrite else GRN
print('{0} {1}{2:05d}{3}'.format(c, e, blknum, ENDC), end='', flush=True)
col += 1
if col == 8:
print('')
col = 0
prevblk = blknum
prevdrv = drv
prevop = isWrite
prevcs = cs
#
# Read block with date/time update
#
def read3(sock, addr, d):
global packet, skip
if d[1] == 0x03:
file = file1
drv = 1
skip = skip1
else:
file = file2
drv = 2
skip = skip2
blknum = d[2] + 256 * d[3]
err = False
try:
with open(file, 'rb') as f:
b = blknum * BLKSZ + skip
f.seek(b)
block = f.read(BLKSZ)
except:
err = True
dt = getDateTimeBytes()
l = []
appendbyte(l, packet & 0xff, 0) # Packet number
packet += 1
cs = appendbyte(l, 0xc5, 0) # "E"
cs = appendbyte(l, d[1], cs) # 0x03 or 0x05
cs = appendbyte(l, d[2], cs) # Block num LSB
cs = appendbyte(l, d[3], cs) # Block num MSB
cs = appendbyte(l, dt[0], cs) # Time of day LSB
cs = appendbyte(l, dt[1], cs) # Time of day MSB
cs = appendbyte(l, dt[2], cs) # Date LSB
cs = appendbyte(l, dt[3], cs) # Date MSB
appendbyte(l, cs, cs) # Checksum for header
# Signal read errors by responding with incorrect checksum
if err:
cs += 1
else:
cs = 0
for i in range (0, BLKSZ):
cs = appendbyte(l, block[i], cs)
appendbyte(l, cs, cs) # Checksum for datablock
printinfo(drv, blknum, False, err, cs)
b = sock.sendto(bytearray(l), addr)
#print('Sent {} bytes to {}'.format(b, addr))
#
# Write block
#
def write(sock, addr, d):
global packet
if d[1] == 0x02:
file = file1
drv = 1
skip = skip1
else:
file = file2
drv = 2
skip = skip2
cs = 0
for i in range (0, BLKSZ):
cs ^= d[i+5]
blknum = d[2] + 256 * d[3]
err = False
if cs == d[517]:
try:
with open(file, 'r+b') as f:
b = blknum * BLKSZ + skip
f.seek(b)
for i in range (0, BLKSZ):
f.write(bytes([d[i+5]]))
except:
err = True # Write error
else:
err == True # Bad checksum
# Signal write errors by responding with bad data checksum.
# Use sender's checksum + 1, so there is never an inadvertent match.
if err:
cs = d[517] + 1
l = []
appendbyte(l, packet & 0xff, 0) # Packet number
packet += 1
appendbyte(l, 0xc5, 0) # "E"
appendbyte(l, d[1], 0) # 0x02 or 0x04
appendbyte(l, d[2], 0) # Block num LSB
appendbyte(l, d[3], 0) # Block num MSB
appendbyte(l, cs, 0) # Checksum of datablock
printinfo(drv, blknum, True, err, cs)
b = sock.sendto(bytearray(l), addr)
#print('Sent {} bytes to {}'.format(b, addr))
#
# See if file is a 2MG and, if so, that it contains .PO image
# Returns bytes to skip over header
#
def check2MG(filename):
try:
with open(filename, 'rb') as f:
hdr = f.read(16)
except:
return 0
if (hdr[0] == 0x32) and (hdr[1] == 0x49) and (hdr[2] == 0x4d) and (hdr[3] == 0x47):
print('** ' + filename + ' is a 2MG file **')
if hdr[0x0c] != 0x01:
print('** Warning NOT in ProDOS order **')
return 64
return 0
def usage():
print('usage: veserver [OPTION]...')
print(' -h, --help Show this help');
print(' -p, --prodos25 Use ProDOS 2.5 date/time format');
print(' -1 FNAME, --disk1=FNAME Specify filename for disk 1 image');
print(' -2 FNAME, --disk2=FNAME Specify filename for disk 2 image');
#
# Entry point
#
# Check whether we are running under Systemd or not
if 'INVOCATION_ID' in os.environ:
systemd = True
short_opts = "hp1:2:"
long_opts = ["help", "prodos25", "disk1=", "disk2="]
try:
args, vals = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.error as e:
print (str(e))
usage()
sys.exit(2)
for a, v in args:
if a in ('-h', '--help'):
usage()
sys.exit(0)
elif a in ('-p', '--prodos25'):
pd25 = True
elif a in ('-1', '--disk1'):
file1 = v
elif a in ('-2', '--disk2'):
file2 = v
print("VEServer v1.0")
if pd25:
print("ProDOS 2.5+ Clock Driver")
else:
print("Legacy ProDOS Clock Driver")
print("Disk 1: {}".format(file1))
skip1 = check2MG(file1)
print("Disk 2: {}".format(file2))
skip2 = check2MG(file2)
with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as s:
s.bind((IP, PORT))
print("veserver - listening on UDP port {}".format(PORT))
while True:
data, address = s.recvfrom(1024)
#print('Received {} bytes from {}'.format(len(data), address))
if (data[0] == 0xc5):
if (data[1] == 0x03) or (data[1] == 0x05):
read3(s, address, data)
elif (data[1] == 0x02) or (data[1] == 0x04):
write(s, address, data)
|
class Solution:
def canCross(self, stones: List[int]) -> bool:
fail = set()
stone_set = set(stones)
stack = []
stack.append((0,0))
while stack:
cur_pos, jump = stack.pop()
for j in (jump-1, jump, jump+1):
pos = cur_pos+j
if pos > 0 and pos in stone_set and (pos, j) not in fail:
if pos == stones[-1]:
return True
stack.append((pos, j))
fail.add((cur_pos, jump))
return False
|
# Imbalanced Classification
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
import imageio
import os
try:
os.mkdir('./figures')
except:
pass
images = []
for i,s in enumerate(np.linspace(2000,50,80)):
features, label = make_blobs(n_samples=[2000,int(s)],\
n_features=2,\
centers=[(-5,5),(5,5)],\
random_state=47,cluster_std=3)
if i==0:
min1, max1 = features[:,0].min()-1,features[:,0].max()+1
min2, max2 = features[:,1].min()-1,features[:,1].max()+1
x1grid = np.arange(min1,max1,.1)
x2grid = np.arange(min2,max2,.1)
xx, yy = np.meshgrid(x1grid,x2grid)
r1,r2 = xx.flatten(), yy.flatten()
r1,r2 = r1.reshape((len(r1), 1)), r2.reshape((len(r2), 1))
grid = np.hstack((r1,r2))
model = LogisticRegression()
model.fit(features,label)
plt.figure(figsize=(6,3))
yp = model.predict(grid)
zz = yp.reshape(xx.shape)
plt.contourf(xx,yy,zz,cmap='Paired')
for cv in range(2):
row = np.where(label==cv)
plt.scatter(features[row,0],features[row,1],cmap = 'Paired')
plt.tight_layout()
figname = './figures/Imbalanced_'+str(10+i)+'.png'
plt.savefig(figname,dpi=300)
plt.close()
images.append(imageio.imread(figname))
# add images in reverse
for i in range(80):
images.append(images[79-i])
try:
imageio.mimsave('Imbalanced_Classification.mp4', images)
except:
imageio.mimsave('Imbalanced_Classification.gif', images)
|
import inspect
import tempfile
import subprocess
import shutil
import os
import sys
import threading
from itertools import chain
def header(func):
signature = inspect.signature(func)
args = [str(k) if v.default is inspect.Parameter.empty else str(k) + "=" + str(v.default)
for k, v in signature.parameters.items()]
currentLine = func.__name__ + "("
lines = []
indent = " " * (len(func.__name__) + 1)
separator = ""
for arg in args:
newLine = currentLine + separator + arg
if len(newLine) > 80:
lines.append(currentLine + separator)
currentLine = indent + arg
else:
currentLine = newLine
separator = ", "
lines.append(currentLine + ")")
return("\n".join(lines))
def printHeader(func):
print(f"\n#### `{func.__name__}`")
print(f"```\n{header(func)}\n```")
def printHelp(x):
print(inspect.getdoc(x))
def quotePosix(args):
"""
Given a list of command line arguments, quote them so they can be can be
printed on POSIX
"""
def q(x):
if " " in x:
return "'" + x + "'"
else:
return x
return [q(x) for x in args]
def quoteWindows(args):
"""
Given a list of command line arguments, quote them so they can be can be
printed in Windows CLI
"""
def q(x):
if " " in x:
return '"' + x + '"'
else:
return x
return [q(x) for x in args]
def panelizeAndDraw(name, command):
dirname = tempfile.mkdtemp()
output = os.path.join(dirname, "x.kicad_pcb")
try:
outimage = f"doc/resources/{name}.png"
subprocess.run(command + [output], check=True, capture_output=True)
r = subprocess.run(["pcbdraw", "plot", "--help"], capture_output=True)
if r.returncode == 0:
# We have a new PcbDraw
r = subprocess.run(["pcbdraw", "plot", "--vcuts", "Cmts.User", "--silent", output,
outimage], check=True, capture_output=True)
else:
# We have an old PcbDraw
r = subprocess.run(["pcbdraw", "--vcuts", "--silent", output,
outimage], check=True, capture_output=True)
subprocess.run(["convert", outimage, "-define",
"png:include-chunk=none", outimage], check=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("Command: " + " ".join(e.cmd), file=sys.stderr)
print("Stdout: " + e.stdout.decode("utf8"), file=sys.stderr)
print("Stderr: " + e.stderr.decode("utf8"), file=sys.stderr)
sys.exit(1)
shutil.rmtree(dirname)
runExampleThreads = []
def runBoardExample(name, args):
"""
Run kikit CLI command with args - omitting the output file name. Prints a
markdown with the command and includes a generated board image.
Generating images runs in parallel, so do not forget to invoke
runBoardExampleJoin() at the end of your script.
"""
realArgs = ["python3", "-m", "kikit.ui"] + list(chain(*args))
# We print first, so in a case of failure we have the command in a nice
# copy-paste-ready form
args[0] = ["kikit"] + args[0]
args[-1] = args[-1] + ["panel.kicad_pcb"]
print("```")
print("# Linux")
for i, c in enumerate(args):
if i != 0:
print(" ", end="")
end = "\n" if i + 1 == len(args) else " \\\n"
print(" ".join(quotePosix(c)), end=end)
print("\n# Windows")
for i, c in enumerate(args):
if i != 0:
print(" ", end="")
end = "\n" if i + 1 == len(args) else " ^\n"
print(" ".join(quoteWindows(c)), end=end)
print("```\n")
print("".format(name))
t = threading.Thread(target=lambda: panelizeAndDraw(name, realArgs))
t.start()
global runExampleThreads
runExampleThreads.append(t)
def runScriptingExample(name, args):
"""
Run a Python panelization script that takes the name of the output as a last
argument and create a drawing of it.
"""
realArgs = ["python3"] + list(chain(*args))
print("```")
for i, c in enumerate(args):
if i != 0:
print(" ", end="")
end = "\n" if i + 1 == len(args) else " \\\n"
print(" ".join(quote(c)), end=end)
print("```\n")
print("".format(name))
t = threading.Thread(target=lambda: panelizeAndDraw(name, realArgs))
t.start()
global runExampleThreads
runExampleThreads.append(t)
def runExampleJoin():
for t in runExampleThreads:
t.join()
|
class Solution:
def findMin(self, nums: List[int]) -> int:
i,j = 0,len(nums)-1
while i < j:
m = (i+j)//2
if nums[m] > nums[j]:
i = m+1
else:
j = m
return nums[i]
|
from i2 import Sig
from meshed import DAG, FuncNode
from meshed.base import _func_nodes_to_graph_dict
from meshed.dag import _separate_func_nodes_and_var_nodes
from meshed.itools import topological_sort
# separate with comments lines
def pairs(xs):
if len(xs) <= 1:
return xs
else:
pairs = list(zip(xs, xs[1:]))
return pairs
def mk_mock_funcnode(arg, out):
@Sig(arg)
def func():
pass
# name = "_mock_" + str(arg) + "_" + str(out) # f-string
name = f"_mock_{str(arg)}_{str(out)}" # f-string
return FuncNode(func=func, out=out, name=name)
def funcnodes_from_pairs(pairs):
return list(map(mk_mock_funcnode_from_tuple, pairs))
def curry(func):
def res(*args):
return func(tuple(args))
return res
def uncurry(func):
def res(tup):
return func(*tup)
return res
mk_mock_funcnode_from_tuple = uncurry(mk_mock_funcnode)
def reorder_on_constraints(funcnodes, outs):
extra_nodes = funcnodes_from_pairs(pairs(outs))
funcnodes += extra_nodes
graph = _func_nodes_to_graph_dict(funcnodes)
nodes = topological_sort(graph)
print("after ordering:", nodes)
ordered_nodes = [node for node in nodes if node not in extra_nodes]
func_nodes, var_nodes = _separate_func_nodes_and_var_nodes(ordered_nodes)
return func_nodes, var_nodes
|
import os
import sys
import yaml
from pathlib import Path
def GetFilesList(yaml_path):
cfg_f = open(yaml_path,'r')
cfg = yaml.load(cfg_f.read(),Loader=yaml.FullLoader)
cwd = os.getcwd()
# print(cfg)
# key is output file name , value is the array of the amalgamated files.
ret = dict()
for key,value in cfg.items():
# Only the suffix of files name in suffix will be considered.
ret[key] = []
suffix = []
if 'suffix' in value:
suffix = value['suffix']
for root,_,files in os.walk(os.path.join(cwd,value['directory'])):
for f in files:
for suf in suffix:
if f.endswith(suf):
ret[key].append(os.path.join(root,f))
# print(ret)
return ret
class CFile:
def __init__(self,file_path,output_filename,input_output):
self.file_path = file_path
self.file_name = Path(file_path).name
self.file_handle = open(file_path,'r')
self.output_filename = output_filename
self.get_dependencies(input_output)
def get_include_file_name(self,l):
ret = None
if '<' in l:
ret = l[l.find('<')+1:l.find('>')]
elif '\"' in l:
ret = l[l.find('\"')+1:l.rfind('\"')]
else:
print('Unknown #include ?',l)
return None
if '/' in ret:
ret = ret[ret.rfind('/')+1:]
return ret
def get_dependencies(self,input_output):
# dependencies_in is this file depend on other files which will be amalgamated into the same file.
# dependencies_out is this file depend on other files which will NOT be amalgamated into the same file.
self.dependencies_in = set()
self.dependencies_out = set()
for l in self.file_handle:
if l.startswith('#include'):
dep_file_name = self.get_include_file_name(l)
if dep_file_name in input_output:
if input_output[dep_file_name] == self.output_filename:
self.dependencies_in.add(dep_file_name)
else:
self.dependencies_out.add(dep_file_name)
# f is the handle of output file ;
# files2output is a dictionary, from input files name to themselves' output files.
def write(self,f,input_output):
self.file_handle.seek(0)
f.write('\n/*** start of file {0} **/\n'.format(self.file_name))
for l in self.file_handle:
if l.startswith('#include'):
dep = self.get_include_file_name(l)
if dep in self.dependencies_in :
f.write('// ' + l)
continue
elif dep in self.dependencies_out :
f.write('// ' + l)
f.write('#include\"{0}\"\n'.format(input_output[dep]))
else:
f.write(l)
else:
f.write(l)
f.write('\n/*** end of file {0} **/\n'.format(self.file_name))
#files from file name to CFile, all of them should have the same output file name.
def get_handle_sequence(files:dict):
in_degree = dict()
re_dependencies = dict()
queue = []
ret = []
unfinished = set()
for key, value in files.items():
if len(value.dependencies_in) == 0:
ret.append(key)
queue.append(key)
else:
unfinished.add(key)
in_degree[key] = len(value.dependencies_in)
for tmp in value.dependencies_in:
if tmp not in re_dependencies:
re_dependencies[tmp] = []
re_dependencies[tmp].append(key)
while len(queue) > 0:
tmp = queue.pop()
if tmp not in re_dependencies:
continue
for file_name in re_dependencies[tmp]:
in_degree[file_name] -= 1
if in_degree[file_name] == 0:
queue.append(file_name)
unfinished.remove(file_name)
ret.append(file_name)
return ret
if __name__ == '__main__':
yaml_path = sys.argv[1]
#input_output is from output(str) to input(list)
#output_input is from input(str) to output(str) , and this only include the fils name.
output_input = GetFilesList(yaml_path)
input_output = dict()
for key,value in output_input.items():
output_filename = Path(key).name
for tmp in value:
input_filename = Path(tmp).name
input_output[input_filename] = output_filename
# from output file name to an object { input file name : CFile }
output_input_CFile = dict()
for key,value in output_input.items():
output_filename = Path(key).name
output_input_CFile[key] = dict()
tmp_dic = output_input_CFile[output_filename]
for tmp in value:
input_filename = Path(tmp).name
tmp_dic[input_filename] = CFile(tmp,output_filename,input_output)
for key,value in output_input_CFile.items():
seq = get_handle_sequence(value)
if len(seq) != len(value):
print('Not allow files dependencies to each other')
exit(0)
f = open(key,'w+')
for tmp in seq:
value[tmp].write(f,input_output)
f.close()
|
import scrapy
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def parse(self, response):
for title in response.css('h2.entry-title'):
yield {'title': title.css('a ::text').extract_first()}
next_page = response.css('div.prev-post > a ::attr(href)').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback=self.parse)
|
from boa3.builtin import public
@public
def Main() -> bytes:
return (123).to_script_hash()
|
from pytest import mark
from day_8 import (
parse,
count_1_4_7_8,
count_output,
determine_1_4_7_8,
decode_signals,
main,
)
def test_parse_data() -> None:
parsed_data = parse("day_8_test.txt")
assert parsed_data[0] == [
[
"be",
"cfbegad",
"cbdgef",
"fgaecd",
"cgeb",
"fdcge",
"agebfd",
"fecdb",
"fabcd",
"edb",
],
["fdgacbe", "cefdb", "cefbgd", "gcbe"],
]
def test_count_1_4_7_8() -> None:
assert count_1_4_7_8(parse("day_8_test.txt")) == 26
@mark.parametrize(
["signals", "corresponding"],
[
(
[
"acedgfb",
"cdfbe",
"gcdfa",
"fbcad",
"dab",
"cefabd",
"cdfgeb",
"eafb",
"cagedb",
"ab",
],
{1: "ab", 4: "abef", 7: "abd", 8: "abcdefg"},
),
(
[
"abc",
"dabc",
"ba",
"gabcdef",
],
{8: "abcdefg", 7: "abc", 4: "abcd", 1: "ab"},
),
(
[
"bae",
"ecfd",
"ga",
"abcdefg",
],
{8: "abcdefg", 7: "abe", 4: "cdef", 1: "ag"},
),
],
)
def test_determine_1_4_7_8(signals, corresponding) -> None:
assert determine_1_4_7_8(signals) == corresponding
@mark.parametrize(
["signals", "output_tuple"],
[
(
[
[
"acedgfb",
"cdfbe",
"gcdfa",
"fbcad",
"dab",
"cefabd",
"cdfgeb",
"eafb",
"cagedb",
"ab",
],
["cdfeb", "fcadb", "cdfeb", "cdbaf"],
],
(
["cdfeb", "fcadb", "cdfeb", "cdbaf"],
{
8: "abcdefg",
7: "abd",
4: "abef",
1: "ab",
5: "bcdef",
2: "acdfg",
3: "abcdf",
9: "abcdef",
6: "bcdefg",
0: "abcdeg",
},
),
),
],
)
def test_decode_signals(signals, output_tuple) -> None:
assert decode_signals(signals) == output_tuple
@mark.parametrize(
["output_signals", "decoded", "result"],
[
(
["cdfeb", "fcadb", "cdfeb", "cdbaf"],
{
8: "acedgfb",
7: "dab",
4: "eafb",
1: "ab",
5: "bcdef",
2: "acdfg",
3: "abcdf",
9: "abcdef",
6: "bcdefg",
0: "abcdeg",
},
5353,
)
],
)
def test_count_output(output_signals, decoded, result) -> None:
assert count_output(output_signals, decoded) == result
def test_main() -> None:
list_of_signals = parse("day_8_test.txt")
assert main(list_of_signals) == 61229
|
#!/usr/bin/env python
# encoding: utf-8
import json
import os
import os.path
import re
import sys
from collections import OrderedDict
import copy
from lxml import etree
from lxml.etree import CDATA, SubElement, Element, ElementTree, ParseError, parse, strip_elements
from common import utils, logger
from common.exceptions import ApplicationException, InvalidModelException
from CTDopts.CTDopts import _InFile, _OutFile, _OutPrefix, ParameterGroup, _Choices, _NumericRange, _FileFormat, ModelError, _Null
# mapping to CTD types to Galaxy types
TYPE_TO_GALAXY_TYPE = {int: 'integer', float: 'float', str: 'text', bool: 'boolean', _InFile: 'txt',
_OutFile: 'txt', _Choices: 'select', _OutPrefix: 'output-prefix'}
GALAXY_TYPE_TO_TYPE = dict()
for k in TYPE_TO_GALAXY_TYPE:
GALAXY_TYPE_TO_TYPE[TYPE_TO_GALAXY_TYPE[k]] = k
STDIO_MACRO_NAME = "stdio"
REQUIREMENTS_MACRO_NAME = "requirements"
ADVANCED_OPTIONS_NAME = "adv_opts_"
REQUIRED_MACROS = [REQUIREMENTS_MACRO_NAME, STDIO_MACRO_NAME, ADVANCED_OPTIONS_NAME + "macro"]
class ExitCode:
def __init__(self, code_range="", level="", description=None):
self.range = code_range
self.level = level
self.description = description
class DataType:
def __init__(self, extension, galaxy_extension, composite=None):
self.extension = extension
self.galaxy_extension = galaxy_extension
self.composite = composite
def add_specific_args(parser):
"""
add command line arguments specific for galaxy tool generation
@param parser an instance of ArgumentParser
"""
parser.add_argument("-f", "--formats-file", dest="formats_file",
help="File containing the supported file formats. Run with '-h' or '--help' to see a "
"brief example on the layout of this file.", default=None, required=False)
parser.add_argument("-a", "--add-to-command-line", dest="add_to_command_line",
help="Adds content to the command line", default="", required=False)
parser.add_argument("-d", "--datatypes-destination", dest="data_types_destination",
help="Specify the location of a datatypes_conf.xml to modify and add the registered "
"data types. If the provided destination does not exist, a new file will be created.",
default=None, required=False)
parser.add_argument("-c", "--default-category", dest="default_category", default="DEFAULT", required=False,
help="Default category to use for tools lacking a category when generating tool_conf.xml")
parser.add_argument("-t", "--tool-conf-destination", dest="tool_conf_destination", default=None, required=False,
help="Specify the location of an existing tool_conf.xml that will be modified to include "
"the converted tools. If the provided destination does not exist, a new file will"
"be created.")
parser.add_argument("-g", "--galaxy-tool-path", dest="galaxy_tool_path", default=None, required=False,
help="The path that will be prepended to the file names when generating tool_conf.xml")
parser.add_argument("-r", "--required-tools", dest="required_tools_file", default=None, required=False,
help="Each line of the file will be interpreted as a tool name that needs translation. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-s", "--skip-tools", dest="skip_tools_file", default=None, required=False,
help="File containing a list of tools for which a Galaxy stub will not be generated. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-m", "--macros", dest="macros_files", default=[], nargs="*",
action="append", required=None, help="Import the additional given file(s) as macros. "
"The macros stdio, requirements and advanced_options are "
"required. Please see galaxy/macros.xml for an example of a "
"valid macros file. All defined macros will be imported.")
parser.add_argument("--test-macros", dest="test_macros_files", default=[], nargs="*",
action="append", required=None,
help="Import tests from the files given file(s) as macros. "
"The macro names must end with the id of the tools")
parser.add_argument("--test-macros-prefix", dest="test_macros_prefix", default=[], nargs="*",
action="append", required=None, help="The prefix of the macro name in the corresponding trest macros file")
parser.add_argument("--test-test", dest="test_test", action='store_true', default=False, required=False,
help="Generate a simple test for the internal unit tests.")
parser.add_argument("--test-only", dest="test_only", action='store_true', default=False, required=False,
help="Generate only the test section.")
parser.add_argument("--test-unsniffable", dest="test_unsniffable", nargs="+", default=[], required=False,
help="File extensions that can't be sniffed in Galaxy."
"Needs to be the OpenMS extensions (1st column in --formats-file)."
"For testdata with such extensions ftype will be set in the tes according to the file extension")
parser.add_argument("--tool-version", dest="tool_version", required=False, default=None,
help="Tool version to use (if not given its extracted from the CTD)")
parser.add_argument("--tool-profile", dest="tool_profile", required=False, default=None,
help="Tool profile version to use (if not given its not set)")
parser.add_argument("--bump-file", dest="bump_file", required=False,
default=None, help="json file defining tool versions."
"tools not listed in the file default to 0."
"if not given @GALAXY_VERSION@ is used")
def modify_param_for_galaxy(param):
"""
some parameters need galaxy specific modifications
"""
if param.type is _InFile:
# if file default is given (happens for external applications and
# files for which the default is taken from share/OpenMS) set the
# parm to not required and remove the default (external applications
# need to be taken care by hardcoded values and the other cases
# are chosen automatically if not specified on the command line)
if param.required and not (param.default is None or type(param.default) is _Null):
logger.warning("Data parameter %s with default (%s)" % (param.name, param.default), 1)
param.required = False
param.default = _Null()
return param
def convert_models(args, parsed_ctds):
"""
main conversion function
@param args command line arguments
@param parsed_ctds the ctds
"""
# validate and prepare the passed arguments
validate_and_prepare_args(args, parsed_ctds[0].ctd_model)
# parse the given supported file-formats file
supported_file_formats = parse_file_formats(args.formats_file)
# extract the names of the macros and check that we have found the ones we need
macros_to_expand = parse_macros_files(args.macros_files,
tool_version=args.tool_version,
supported_file_types=supported_file_formats,
required_macros=REQUIRED_MACROS,
dont_expand=[ADVANCED_OPTIONS_NAME + "macro", "references",
"list_string_val", "list_string_san",
"list_float_valsan", "list_integer_valsan"])
bump = parse_bump_file(args.bump_file)
check_test_macros(args.test_macros_files, args.test_macros_prefix, parsed_ctds)
# parse the skip/required tools files
skip_tools = parse_tools_list_file(args.skip_tools_file)
required_tools = parse_tools_list_file(args.required_tools_file)
_convert_internal(parsed_ctds,
supported_file_formats=supported_file_formats,
default_executable_path=args.default_executable_path,
add_to_command_line=args.add_to_command_line,
required_tools=required_tools,
skip_tools=skip_tools,
macros_file_names=args.macros_files,
macros_to_expand=macros_to_expand,
parameter_hardcoder=args.parameter_hardcoder,
test_test=args.test_test,
test_only=args.test_only,
test_unsniffable=args.test_unsniffable,
test_macros_file_names=args.test_macros_files,
test_macros_prefix=args.test_macros_prefix,
tool_version=args.tool_version,
tool_profile=args.tool_profile,
bump=bump)
def parse_bump_file(bump_file):
if bump_file is None:
return None
with open(bump_file) as fp:
return json.load(fp)
def parse_tools_list_file(tools_list_file):
"""
"""
tools_list = None
if tools_list_file is not None:
tools_list = []
with open(tools_list_file) as f:
for line in f:
if line is None or not line.strip() or line.strip().startswith("#"):
continue
else:
tools_list.append(line.strip())
return tools_list
def parse_macros_files(macros_file_names, tool_version, supported_file_types, required_macros=[], dont_expand=[]):
"""
"""
macros_to_expand = []
for macros_file_name in macros_file_names:
try:
macros_file = open(macros_file_name)
logger.info("Loading macros from %s" % macros_file_name, 0)
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if name in macros_to_expand:
logger.warning("Macro %s has already been found. Duplicate found in file %s." %
(name, macros_file_name), 0)
continue
logger.info("Macro %s found" % name, 1)
macros_to_expand.append(name)
except ParseError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be parsed. Cause: " + str(e))
except IOError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be opened. Cause: " + str(e))
else:
macros_file.close()
tool_ver_tk = root.find("token[@name='@TOOL_VERSION@']")
galaxy_ver_tk = root.find("token[@name='@GALAXY_VERSION@']")
if tool_ver_tk is None:
tool_ver_tk = add_child_node(root, "token", OrderedDict([("name", "@TOOL_VERSION@")]))
tool_ver_tk.text = tool_version
if galaxy_ver_tk is not None:
if tool_version == tool_ver_tk.text:
galaxy_ver_tk.text = str(int(galaxy_ver_tk.text))
else:
tool_ver_tk.text = tool_version
galaxy_ver_tk.text = "0"
ext_foo = root.find("token[@name='@EXT_FOO@']")
if ext_foo is None:
ext_foo = add_child_node(root, "token", OrderedDict([("name", "@EXT_FOO@")]))
g2o, o2g = get_fileformat_maps(supported_file_types)
# make sure that the backup data type is in the map
if 'txt' not in g2o:
g2o['txt'] = 'txt'
ext_foo.text = CDATA("""#def oms2gxyext(o)
#set m=%s
#return m[o]
#end def
#def gxy2omsext(g)
#set m=%s
#return m[g]
#end def
""" % (str(o2g), str(g2o)))
tree = ElementTree(root)
tree.write(macros_file_name, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# with open(macros_file_name, "w") as macros_file:
# tree = ElementTree(root)
# tree.write(macros_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# we depend on "stdio", "requirements" and "advanced_options" to exist on all the given macros files
missing_needed_macros = []
for required_macro in required_macros:
if required_macro not in macros_to_expand:
missing_needed_macros.append(required_macro)
if missing_needed_macros:
raise ApplicationException(
"The following required macro(s) were not found in any of the given macros files: %s, "
"see galaxy/macros.xml for an example of a valid macros file."
% ", ".join(missing_needed_macros))
# remove macros that should not be expanded
for m in dont_expand:
try:
idx = macros_to_expand.index(m)
del macros_to_expand[idx]
except ValueError:
pass
return macros_to_expand
def check_test_macros(test_macros_files, test_macros_prefix, parsed_ctds):
tool_ids = set()
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
tool_ids.add(model.name.replace(" ", "_"))
for mf, mp in zip(test_macros_files, test_macros_prefix):
macro_ids = set()
try:
with open(mf) as macros_file:
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if not name.startswith(mp):
logger.warning("Testmacro with invalid prefix %s." % (mp), 0)
continue
name = name[len(mp):]
macro_ids.add(name)
except ParseError as e:
raise ApplicationException("The macros file " + mf + " could not be parsed. Cause: " + str(e))
except IOError as e:
raise ApplicationException("The macros file " + mf + " could not be opened. Cause: " + str(e))
for t in tool_ids - macro_ids:
logger.error("missing %s" % t)
add_child_node(root, "xml", OrderedDict([("name", mp + t)]))
if len(macro_ids - tool_ids):
logger.warning("Unnecessary macros in %s: %s" % (mf, macro_ids - tool_ids))
tree = ElementTree(root)
tree.write(mf, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def parse_file_formats(formats_file):
"""
"""
supported_formats = []
if formats_file is not None:
line_number = 0
with open(formats_file) as f:
for line in f:
line_number += 1
if line is None or not line.strip() or line.strip().startswith("#"):
# ignore (it'd be weird to have something like:
# if line is not None and not (not line.strip()) ...
continue
parsed_formats = line.strip().split()
# valid lines contain either one or two columns
if len(parsed_formats) == 1:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[0]))
elif len(parsed_formats) == 2:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[1]))
elif len(parsed_formats) == 3:
composite = [tuple(x.split(":")) for x in parsed_formats[2].split(",")]
supported_formats.append(DataType(parsed_formats[0],
parsed_formats[1],
composite))
else:
logger.warning("Invalid line at line number %d of the given formats file. Line will be ignored:\n%s" % (line_number, line), 0)
return supported_formats
def get_fileformat_maps(supported_formats):
"""
convenience functions to compute dictionaries mapping
Galaxy data types <-> CTD formats
"""
o2g = {}
g2o = {}
for s in supported_formats:
if s.extension not in o2g:
o2g[s.extension] = s.galaxy_extension
if s.galaxy_extension not in g2o:
g2o[s.galaxy_extension] = s.extension
return g2o, o2g
def validate_and_prepare_args(args, model):
"""
check command line arguments
@param args command line arguments
@return None
"""
# check that only one of skip_tools_file and required_tools_file has been provided
if args.skip_tools_file is not None and args.required_tools_file is not None:
raise ApplicationException(
"You have provided both a file with tools to ignore and a file with required tools.\n"
"Only one of -s/--skip-tools, -r/--required-tools can be provided.")
# flatten macros_files to make sure that we have a list containing file names and not a list of lists
utils.flatten_list_of_lists(args, "macros_files")
utils.flatten_list_of_lists(args, "test_macros_files")
utils.flatten_list_of_lists(args, "test_macros_prefix")
# check that the arguments point to a valid, existing path
input_variables_to_check = ["skip_tools_file", "required_tools_file", "macros_files", "formats_file"]
for variable_name in input_variables_to_check:
utils.validate_argument_is_valid_path(args, variable_name)
# check that the provided output files, if provided, contain a valid file path (i.e., not a folder)
output_variables_to_check = ["data_types_destination", "tool_conf_destination"]
for variable_name in output_variables_to_check:
file_name = getattr(args, variable_name)
if file_name is not None and os.path.isdir(file_name):
raise ApplicationException("The provided output file name (%s) points to a directory." % file_name)
if not args.macros_files:
# list is empty, provide the default value
logger.warning("Using default macros from galaxy/macros.xml", 0)
args.macros_files = [os.path.dirname(os.path.abspath(__file__)) + "/macros.xml"]
if args.tool_version is None:
args.tool_version = model.version
def get_preferred_file_extension():
"""
get the file extension for the output files
@return "xml"
"""
return "xml"
def _convert_internal(parsed_ctds, **kwargs):
"""
parse all input files into models using CTDopts (via utils)
@param parsed_ctds the ctds
@param kwargs skip_tools, required_tools, and additional parameters for
expand_macros, create_command, create_inputs, create_outputs
@return a tuple containing the model, output destination, origin file
"""
parameter_hardcoder = kwargs["parameter_hardcoder"]
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
if kwargs["skip_tools"] is not None and model.name in kwargs["skip_tools"]:
logger.info("Skipping tool %s" % model.name, 0)
continue
elif kwargs["required_tools"] is not None and model.name not in kwargs["required_tools"]:
logger.info("Tool %s is not required, skipping it" % model.name, 0)
continue
origin_file = parsed_ctd.input_file
output_file = parsed_ctd.suggested_output_file
# overwrite attributes of the parsed ctd parameters as specified in hardcoded parameterd json
for param in utils.extract_and_flatten_parameters(model):
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(utils.extract_param_name(param), model.name, 'CTD')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
if not hasattr(param, a):
continue
if a == "type":
try:
t = GALAXY_TYPE_TO_TYPE[hardcoded_attributes[a]]
except KeyError:
logger.error("Could not set hardcoded attribute %s=%s for %s" % (a, hardcoded_attributes[a], param.name))
sys.exit(1)
setattr(param, a, t)
elif type(getattr(param, a)) is _FileFormat or (param.type in [_InFile, _OutFile, _OutPrefix] and a == "restrictions"):
setattr(param, a, _FileFormat(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _Choices:
setattr(param, a, _Choices(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _NumericRange:
raise Exception("Overwriting of Numeric Range not implemented")
else:
setattr(param, a, hardcoded_attributes[a])
if "test_only" in kwargs and kwargs["test_only"]:
test = create_test_only(parsed_ctd.ctd_model, **kwargs)
tree = ElementTree(test)
output_file = parsed_ctd.suggested_output_file
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=False, pretty_print=True)
continue
logger.info("Converting %s (source %s)" % (model.name, utils.get_filename(origin_file)), 0)
tool = create_tool(model,
kwargs.get("tool_profile", None),
kwargs.get("bump", None))
write_header(tool, model)
create_description(tool, model)
import_macros(tool, model, **kwargs)
expand_macros(tool, kwargs["macros_to_expand"])
# command, inputs, outputs = create_cio(tool, model, **kwargs)
create_command(tool, model, **kwargs)
create_configfiles(tool, model, **kwargs)
inputs = create_inputs(tool, model, **kwargs)
outputs = create_outputs(tool, model, **kwargs)
if kwargs["test_test"]:
create_tests(tool, inputs=copy.deepcopy(inputs), outputs=copy.deepcopy(outputs))
if kwargs["test_macros_prefix"]:
create_tests(tool, test_macros_prefix=kwargs['test_macros_prefix'], name=model.name)
create_help(tool, model)
# citations are required to be at the end
expand_macro(tool, "references")
# wrap our tool element into a tree to be able to serialize it
tree = ElementTree(tool)
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def write_header(tool, model):
"""
add comments to the tool header
@param tool the tool xml
@param model the ctd model
"""
tool.addprevious(etree.Comment(
"This is a configuration file for the integration of a tools into Galaxy (https://galaxyproject.org/). "
"This file was automatically generated using CTDConverter."))
tool.addprevious(etree.Comment('Proposed Tool Section: [%s]' % model.opt_attribs.get("category", "")))
def create_tool(model, profile, bump):
"""
initialize the tool
@param model the ctd model
"""
tool_id = model.name.replace(" ", "_")
if bump is None:
gxy_version = "@GALAXY_VERSION@"
elif model.name in bump:
gxy_version = str(bump[model.name])
elif tool_id in bump:
gxy_version = str(bump[tool_id])
else:
gxy_version = "@GALAXY_VERSION@"
attrib = OrderedDict([("id", tool_id),
("name", model.name),
("version", "@TOOL_VERSION@+galaxy" + gxy_version)])
if profile is not None:
attrib["profile"] = profile
return Element("tool", attrib)
def create_description(tool, model):
"""
add description to the tool
@param tool the Galaxy tool
@param model the ctd model
"""
if "description" in model.opt_attribs.keys() and model.opt_attribs["description"] is not None:
description = SubElement(tool, "description")
description.text = model.opt_attribs["description"]
def create_configfiles(tool, model, **kwargs):
"""
create
- <configfiles><inputs>
- <configfiles><configfile>
The former will create a json file containing the tool parameter values
that can be accessed in cheetah with $args_json. Note that
data_style="paths" (i.e. input data sets are included in the json) is set
even if input files are given on the CLI. Reason is that in this way
default values in the CTD can be restored for optional input files.
The latter will contain hardcoded parameters.
"""
configfiles_node = add_child_node(tool, "configfiles")
add_child_node(configfiles_node, "inputs",
OrderedDict([("name", "args_json"), ("data_style", "paths")]))
parameter_hardcoder = kwargs.get("parameter_hardcoder")
hc_dict = dict()
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is None:
continue
path = utils.extract_param_path(param)
for i, v in enumerate(path[:-1]):
try:
utils.getFromDict(hc_dict, path[:i + 1])
except KeyError:
utils.setInDict(hc_dict, path[:i + 1], {})
utils.setInDict(hc_dict, path, hardcoded_value)
hc_node = add_child_node(configfiles_node, "configfile",
OrderedDict([("name", "hardcoded_json")]))
hc_node.text = CDATA(json.dumps(hc_dict).replace('$', r'\$'))
# print(json.dumps(hc_dict))
def create_command(tool, model, **kwargs):
"""
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
# main command
final_cmd = OrderedDict([('preprocessing', []), ('command', []), ('postprocessing', [])])
advanced_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
final_cmd['preprocessing'].extend(["@QUOTE_FOO@", "@EXT_FOO@", "#import re", "", "## Preprocessing"])
# - call the executable with -write_ctd to write the ctd file (with defaults)
# - use fill_ctd.py to overwrite the defaults in the ctd file with the
# Galaxy parameters in the JSON file (from inputs config file)
# - feed the ctd file to the executable (with -ini)
# note: input and output file parameters are still given on the command line
# - output file parameters are not included in the JSON file
# - input and output files are accessed through links / files that have the correct extension
final_cmd['command'].extend(["", "## Main program call"])
final_cmd['command'].append("""
set -o pipefail &&
@EXECUTABLE@ -write_ctd ./ &&
python3 '$__tool_directory__/fill_ctd.py' '@EXECUTABLE@.ctd' '$args_json' '$hardcoded_json' &&
@EXECUTABLE@ -ini @EXECUTABLE@.ctd""")
final_cmd['command'].extend(kwargs["add_to_command_line"])
final_cmd['postprocessing'].extend(["", "## Postprocessing"])
advanced_command_start = "#if ${aon}cond.{aon}selector=='advanced':".format(aon=ADVANCED_OPTIONS_NAME)
advanced_command_end = "#end if"
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
param_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
command_line_prefix = utils.extract_command_line_prefix(param, model)
# TODO use utils.extract_param_name(param).replace(":", "_")? Then hardcoding ctd variables (with :) and tool variables (with _) can be distinguished
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
pass # TODO hardcoded values should go to <inputs>
# param_cmd['command'].append("%s %s" % (command_line_prefix, hardcoded_value))
else:
# in the else branch the parameter is neither blacklisted nor hardcoded...
_actual_parameter = get_galaxy_parameter_path(param)
actual_parameter = get_galaxy_parameter_path(param, fix_underscore=True)
# all but bool params need the command line argument (bools have it already in the true/false value)
if param.type is _OutFile or param.type is _OutPrefix or param.type is _InFile:
param_cmd['command'].append(command_line_prefix)
# preprocessing for file inputs:
# - create a dir with name param.name
# - create a link to id.ext in this directory
# rationale: in the autogenerated tests the same file was used as input to multiple parameters
# this leads to conflicts while linking... might also be better in general
if param.type is _InFile:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
if param.is_list:
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + _actual_parameter + ") if f])} && ")
param_cmd['preprocessing'].append("${' '.join([\"ln -s '%s' '" + actual_parameter + "/%s/%s.%s' && \" % (f, i, re.sub('[^\w\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\w\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
else:
param_cmd['preprocessing'].append("ln -s '$" + _actual_parameter + "' '" + actual_parameter + "/${re.sub(\"[^\w\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)' &&")
param_cmd['command'].append("'" + actual_parameter + "/${re.sub(\"[^\w\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)'")
elif param.type is _OutPrefix:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
param_cmd['command'].append(actual_parameter + "/")
elif param.type is _OutFile:
_actual_parameter = get_galaxy_parameter_path(param, separator="_")
actual_parameter = get_galaxy_parameter_path(param, separator="_", fix_underscore=True)
# check if there is a parameter that sets the format
# if so we add an extension to the generated files which will be used to
# determine the format in the output tag
# in all other cases (corresponding input / there is only one allowed format)
# the format will be set in the output tag
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[param.type])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
# print("ci %s ffc %s" % (corresponding_input.name, fmt_from_corresponding))
# print("formats %s" % (formats))
if corresponding_input is not None:
actual_input_parameter = get_galaxy_parameter_path(corresponding_input)
else:
actual_input_parameter = None
# print(len(formats) > 1, (corresponding_input is None or not
# fmt_from_corresponding))
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
else:
type_param_name = None
# print("tp %s" % type_param_name)
param_cmd['preprocessing'].append("mkdir " + actual_parameter + " &&")
# if there is only one format (the outoput node sets format using the format attribute of the data/discover node)
# - single file: write to temp file with oms extension and move this to the actual result file
# - lists: write to files with the oms extension and remove the extension afterwards (discovery with __name__)
if len(formats) == 1:
fmt = formats.pop()
if param.is_list:
logger.info("1 fmt + list %s -> %s" % (param.name, actual_input_parameter), 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\w\-_]', '_', f.element_identifier), $gxy2omsext(\"" + fmt + "\")) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s' '" + _actual_parameter + "/%(bn)s/%(id)s'\"%{\"bn\": i, \"id\": re.sub('[^\w\-_]', '_', f.element_identifier), \"gext\": $gxy2omsext(\"" + fmt + "\")} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("1 fmt + dataset %s" % param.name, 1)
param_cmd['command'].append("'" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}' '$" + _actual_parameter + "'")
# if there is a type parameter then we use the type selected by the user
# - single: write to temp file with the oms extension and mv it to the actual file output which is treated via change_format
# - list: let the command create output files with the oms extensions, postprocessing renames them to the galaxy extensions, output is then discover + __name_and_ext__
elif type_param_name is not None:
if param.is_list:
logger.info("type + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\w\-_]', '_', f.element_identifier), $" + type_param_name + ") for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(omsext)s' '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s'\"%{\"bn\": i, \"id\": re.sub('[^\w\-_]', '_', f.element_identifier), \"omsext\":$" + type_param_name + ", \"gext\": $oms2gxyext(str($" + type_param_name + "))} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("type + dataset %s" % param.name, 1)
# 1st create file with openms extension (often required by openms)
# then move it to the actual place specified by the parameter
# the format is then set by the <data> tag using <change_format>
param_cmd['command'].append("'" + actual_parameter + "/output.${" + type_param_name + "}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + type_param_name + "}' '$" + actual_parameter + "'")
elif actual_input_parameter is not None:
if param.is_list:
logger.info("actual + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\w\-_]', '_', f.element_identifier), f.ext) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("actual + dataset %s %s %s" % (param.name, actual_input_parameter, corresponding_input.is_list), 1)
if corresponding_input.is_list:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}' '$" + _actual_parameter + "'")
else:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}' '$" + _actual_parameter + "'")
else:
if param.is_list:
raise Exception("output parameter itemlist %s without corresponding input")
else:
logger.info("else + dataset %s" % param.name, 1)
param_cmd['command'].append("'$" + _actual_parameter + "'")
# # select with multiple = true
# elif is_selection_parameter(param) and param.is_list:
# param_cmd['command'].append("${' '.join(['\"%s\"'%str(_) for _ in str($" + actual_parameter + ").split(',')])}")
# elif param.is_list:
# param_cmd['command'].append("$quote($%s" % actual_parameter + ")")
# #command += "${' '.join([\"'%s'\"%str(_) for _ in $" + actual_parameter + "])}\n"
# elif is_boolean_parameter(param):
# param_cmd['command'].append("$%s" % actual_parameter + "")
# else:
# param_cmd['command'].append('"$' + actual_parameter + '"')
# add if statement for optional parameters and preprocessing
# - for optional outputs (param_out_x) the presence of the parameter
# depends on the additional input (param_x) -> need no if
# - real string parameters (i.e. ctd type string wo restrictions) also
# need no if (otherwise the empty string could not be provided)
if not (param.required or is_boolean_parameter(param) or (param.type is str and param.restrictions is None)):
# and not(param.type is _InFile and param.is_list):
actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG", fix_underscore=True)
_actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG")
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
# special case for optional itemlists: for those if no option is selected only the parameter must be specified
if is_selection_parameter(param) and param.is_list and param.required is False:
param_cmd[stage] = [param_cmd[stage][0]] + ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage][1:]) + ["#end if"]
elif is_selection_parameter(param) or param.type is _InFile:
param_cmd[stage] = ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage]) + ["#end if"]
elif param.type is _OutFile or param.type is _OutPrefix:
param_cmd[stage] = ["#if \"" + param.name + "_FLAG\" in str($OPTIONAL_OUTPUTS).split(',')"] + utils.indent(param_cmd[stage]) + ["#end if"]
else:
param_cmd[stage] = ["#if str($" + _actual_parameter + "):"] + utils.indent(param_cmd[stage]) + ["#end if"]
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
if param.advanced and hardcoded_value is None and not (param.type is _OutFile or param.type is _OutPrefix):
advanced_cmd[stage].extend(param_cmd[stage])
else:
final_cmd[stage].extend(param_cmd[stage])
for stage in advanced_cmd:
if len(advanced_cmd[stage]) == 0:
continue
advanced_cmd[stage] = [advanced_command_start] + utils.indent(advanced_cmd[stage]) + [advanced_command_end]
final_cmd[stage].extend(advanced_cmd[stage])
out, optout = all_outputs(model, parameter_hardcoder)
if len(optout) > 0 or len(out) + len(optout) == 0:
stdout = ["| tee '$stdout'"]
if len(optout) > 0:
stdout = ["#if len(str($OPTIONAL_OUTPUTS).split(',')) == 0"] + utils.indent(stdout) + ["#end if"]
final_cmd['command'].extend(stdout)
ctd_out = ["#if \"ctd_out_FLAG\" in $OPTIONAL_OUTPUTS"] + utils.indent(["&& mv '@EXECUTABLE@.ctd' '$ctd_out'"]) + ["#end if"]
final_cmd['postprocessing'].extend(ctd_out)
command_node = add_child_node(tool, "command")
command_node.attrib["detect_errors"] = "exit_code"
command_node.text = CDATA("\n".join(sum(final_cmd.values(), [])))
def import_macros(tool, model, **kwargs):
"""
creates the xml elements needed to import the needed macros files
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
macros_node = add_child_node(tool, "macros")
token_node = add_child_node(macros_node, "token")
token_node.attrib["name"] = "@EXECUTABLE@"
token_node.text = utils.extract_tool_executable_path(model, kwargs["default_executable_path"])
# add <import> nodes
for macro_file_name in kwargs["macros_file_names"] + kwargs["test_macros_file_names"]:
macro_file = open(macro_file_name)
import_node = add_child_node(macros_node, "import")
# do not add the path of the file, rather, just its basename
import_node.text = os.path.basename(macro_file.name)
def expand_macro(node, macro):
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = macro
return expand_node
# and to "expand" the macros in a node
def expand_macros(node, macros_to_expand):
# add <expand> nodes
for expand_macro in macros_to_expand:
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = expand_macro
def get_galaxy_parameter_path(param, separator=".", suffix=None, fix_underscore=False):
"""
Get the complete path for a parameter as a string where the path
components are joined by the given separator. A given suffix can
be appended.
"""
p = get_galaxy_parameter_name(param, suffix, fix_underscore)
path = utils.extract_param_path(param, fix_underscore)
if len(path) > 1:
return (separator.join(path[:-1]) + separator + p).replace("-", "_")
elif param.advanced and (param.type is not _OutFile or suffix):
return ADVANCED_OPTIONS_NAME + "cond." + p
else:
return p
def get_galaxy_parameter_name(param, suffix=None, fix_underscore=False):
"""
get the name of the parameter used in the galaxy tool
- replace : and - by _
- add suffix for output parameters if not None
the idea of suffix is to be used for optional outputs (out_x) for
which an additional boolean input (out_x_FLAG) exists
@param param the parameter
@param suffix suffix to append
@return the name used for the parameter in the tool form
"""
p = param.name.replace("-", "_")
if fix_underscore and p.startswith("_"):
p = p[1:]
if param.type is _OutFile and suffix is not None:
return "%s_%s" % (p, suffix)
else:
return "%s" % p
def get_out_type_param(out_param, model, parameter_hardcoder):
"""
check if there is a parameter that has the same name with appended _type
and return it if present, otherwise return None
"""
if parameter_hardcoder.get_blacklist(out_param.name + "_type", model.name):
return None
for param in utils.extract_and_flatten_parameters(model):
if param.name == out_param.name + "_type":
return param
return None
def is_in_type_param(param, model):
return is_type_param(param, model, [_InFile])
def is_out_type_param(param, model):
"""
check if the parameter is output_type parameter
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
return is_type_param(param, model, [_OutFile, _OutPrefix])
def is_type_param(param, model, tpe):
"""
check if the parameter is _type parameter of an in/output
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
if not param.name.endswith("_type"):
return False
for out_param in utils.extract_and_flatten_parameters(model):
if out_param.type not in tpe:
continue
if param.name == out_param.name + "_type":
return True
return False
def get_corresponding_input(out_param, model):
"""
get the input parameter corresponding to the given output
1st try to get the input with the type (single file/list) and same format restrictions
if this fails get the input that has the same type
in both cases there must be only one such input
return the found input parameter and True iff the 1st case applied
"""
c = get_input_with_same_restrictions(out_param, model, True)
if c is None:
return (get_input_with_same_restrictions(out_param, model, False), False)
else:
return (c, True)
def get_input_with_same_restrictions(out_param, model, check_formats):
"""
get the input parameter that has the same restrictions (ctd file_formats)
- input and output must both be lists of both be simple parameters
"""
matching = []
for allow_different_type in [False, True]:
for param in utils.extract_and_flatten_parameters(model):
if param.type is not _InFile:
continue
# logger.error("%s %s %s %s %s %s" %(out_param.name, param.name, param.is_list, out_param.is_list, param.restrictions, out_param.restrictions))
if allow_different_type or param.is_list == out_param.is_list:
if check_formats:
if param.restrictions is None and out_param.restrictions is None:
matching.append(param)
elif param.restrictions is not None and out_param.restrictions is not None and param.restrictions.formats == out_param.restrictions.formats:
matching.append(param)
else:
matching.append(param)
# logger.error("match %s "%([_.name for _ in matching]))
if len(matching) > 0:
break
if len(matching) == 1:
return matching[0]
else:
return None
def create_inputs(tool, model, **kwargs):
"""
create input section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
@return inputs node
"""
inputs_node = SubElement(tool, "inputs")
section_nodes = dict()
section_params = dict()
# some suites (such as OpenMS) need some advanced options when handling inputs
advanced_node = Element("expand", OrderedDict([("macro", ADVANCED_OPTIONS_NAME + "macro")]))
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
# treat all non output-file/advanced/blacklisted/hardcoded parameters as inputs
for param in utils.extract_and_flatten_parameters(model, True):
if type(param) is ParameterGroup:
title, help_text = generate_label_and_help(param.description)
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = Element("section", OrderedDict([("name", param.name), ("title", title), ("help", help_text), ("expanded", "false")]))
continue
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
continue
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
# do not output file type parameters for inputs since file types are
# known by Galaxy and set automatically by extension (which comes from
# the Galaxy data type which is translated to OpenMS datatype as defined
# in filetypes.txt )
if is_in_type_param(param, model):
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent_node = section_nodes[utils.extract_param_name(param.parent)]
elif param.advanced:
parent_node = advanced_node
else:
parent_node = inputs_node
# sometimes special inputs are needed for outfiles:
if param.type is _OutFile or param.type is _OutPrefix:
# if there are multiple possible output formats, but no parameter to choose the type or a
# corresponding input then add a selection parameter
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if len(formats) > 1 and type_param is None and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
fmt_select = add_child_node(parent_node, "param", OrderedDict([("name", param.name + "_type"), ("type", "select"), ("optional", "false"), ("label", "File type of output %s (%s)" % (param.name, param.description))]))
g2o, o2g = get_fileformat_maps(kwargs["supported_file_formats"])
# for f in formats:
# option_node = add_child_node(fmt_select, "option", OrderedDict([("value", g2o[f])]), f)
for choice in param.restrictions.formats:
option_node = add_child_node(fmt_select, "option", OrderedDict([("value", str(choice))]))
option_node.text = o2g[str(choice)]
if choice.lower() != o2g[str(choice)]:
option_node.text += " (%s)" % choice
continue
# create the actual param node and fill the attributes
param_node = add_child_node(parent_node, "param")
create_param_attribute_list(param_node, param, model, kwargs["supported_file_formats"])
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(param.name, model.name, 'XML')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
param_node.attrib[a] = str(hardcoded_attributes[a])
section_parents = [utils.extract_param_name(section_params[sn].parent) for sn in section_nodes]
for sn in section_nodes:
if len(section_nodes[sn]) == 0 and sn not in section_parents:
continue
if utils.extract_param_name(section_params[sn].parent) in section_nodes:
section_nodes[utils.extract_param_name(section_params[sn].parent)].append(section_nodes[sn])
else:
inputs_node.append(section_nodes[sn])
# if there is an advanced section then append it at the end of the inputs
inputs_node.append(advanced_node)
# Add select for optional outputs
out, optout = all_outputs(model, parameter_hardcoder)
attrib = OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("type", "select"),
("optional", "true"),
("multiple", "true"),
("label", "Optional outputs")])
# if len(out) == 0 and len(out) + len(optout) > 0:
# attrib["optional"] = "false"
# else:
# attrib["optional"] = "true"
param_node = add_child_node(inputs_node, "param", attrib)
for o in optout:
title, help_text = generate_label_and_help(o.description)
option_node = add_child_node(param_node, "option",
OrderedDict([("value", o.name + "_FLAG")]),
text="%s (%s)" % (o.name, title))
option_node = add_child_node(param_node, "option",
OrderedDict([("value", "ctd_out_FLAG")]),
text="Output used ctd (ini) configuration file")
return inputs_node
def is_default(value, param):
"""
check if the value is the default of the param or if the value is in the defaults of param
"""
return param.default == value or (type(param.default) is list and value in param.default)
def get_formats(param, model, o2g):
"""
determine format attribute from the CTD restictions (i.e. the OpenMS extensions)
- also check if all listed possible formats are supported in Galaxy and warn if necessary
"""
if param.restrictions is None:
return []
elif type(param.restrictions) is _FileFormat:
choices = param.restrictions.formats
elif is_out_type_param(param, model):
choices = param.restrictions.choices
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
# check if there are formats that have not been registered yet...
formats = set()
for format_name in choices:
if format_name not in o2g:
logger.warning("Ignoring unknown format %s for parameter %s" % (format_name, param.name), 1)
else:
formats.add(format_name)
return sorted(formats)
def get_galaxy_formats(param, model, o2g, default=None):
"""
determine galaxy formats for a parm (i.e. list of allowed Galaxy extensions)
from the CTD restictions (i.e. the OpenMS extensions)
- if there is a single one, then take this
- if there is none than use given default
"""
formats = get_formats(param, model, o2g)
gxy_formats = set([o2g[_] for _ in formats if _ in o2g])
if len(gxy_formats) == 0:
if default is not None:
gxy_formats.add(default)
else:
raise InvalidModelException("No supported formats [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
return sorted(gxy_formats)
def create_param_attribute_list(param_node, param, model, supported_file_formats):
"""
get the attributes of input parameters
@param param_node the galaxy tool param node
@param param the ctd parameter
@param supported_file_formats
"""
g2o, o2g = get_fileformat_maps(supported_file_formats)
# set the name, argument and a first guess for the type (which will be over written
# in some cases .. see below)
# even if the conversion relies on the fact that the param names are identical
# to the ctd ITEM names we replace dashes by underscores because input and output
# parameters need to be treated in cheetah. variable names are currently fixed back
# to dashes in fill_ctd.py. currently there seems to be only a single tool
# requiring this https://github.com/OpenMS/OpenMS/pull/4529
param_node.attrib["name"] = get_galaxy_parameter_name(param)
param_node.attrib["argument"] = "-%s" % utils.extract_param_name(param)
param_type = TYPE_TO_GALAXY_TYPE[param.type]
if param_type is None:
raise ModelError("Unrecognized parameter type %(type)s for parameter %(name)s"
% {"type": param.type, "name": param.name})
# ITEMLIST is rendered as text field (even if its integers or floats), an
# exception is files which are treated a bit below
if param.is_list:
param_type = "text"
if is_selection_parameter(param):
param_type = "select"
if len(param.restrictions.choices) < 5 and not param.is_list:
param_node.attrib["display"] = "radio"
if param.is_list:
param_node.attrib["multiple"] = "true"
if is_boolean_parameter(param):
param_type = "boolean"
if param.type is _InFile:
# assume it's just text unless restrictions are provided
param_node.attrib["type"] = "data"
param_node.attrib["format"] = ",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile]))
# in the case of multiple input set multiple flag
if param.is_list:
param_node.attrib["multiple"] = "true"
else:
param_node.attrib["type"] = param_type
if param_type == "select" and param.default in param.restrictions.choices:
param_node.attrib["optional"] = "false"
else:
param_node.attrib["optional"] = str(not param.required).lower()
# check for parameters with restricted values (which will correspond to a "select" in galaxy)
if param.restrictions is not None or param_type == "boolean":
# it could be either _Choices or _NumericRange, with special case for boolean types
if param_type == "boolean":
create_boolean_parameter(param_node, param)
elif type(param.restrictions) is _Choices:
# TODO if the parameter is used to select the output file type the
# options need to be replaced with the Galaxy data types
# if is_out_type_param(param, model):
# param.restrictions.choices = get_supported_file_types(param.restrictions.choices, supported_file_formats)
# add a nothing selected option to mandatory options w/o default
if param.default is None or type(param.default) is _Null:
if param_node.attrib["optional"] == "true":
option_node = add_child_node(param_node, "option", OrderedDict([("value", "")]), text="default (nothing chosen)")
# else:
# option_node = add_child_node(param_node, "option", OrderedDict([("value", "")]), text="select a value")
# create as many <option> elements as restriction values
if is_out_type_param(param, model):
logger.warning("%s %s" % (param.name, param.type))
formats = get_formats(param, model, o2g)
for fmt in formats:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(fmt))]))
option_node.text = o2g[str(fmt)]
if fmt.lower() != o2g[str(fmt)]:
option_node.text += " (%s)" % fmt
if is_default(fmt, param):
option_node.attrib["selected"] = "true"
else:
for choice in param.restrictions.choices:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(choice))]),
text=str(choice))
if is_default(choice, param):
option_node.attrib["selected"] = "true"
# add validator to check that "nothing selected" is not seletcedto mandatory options w/o default
if param_node.attrib["optional"] == "False" and (param.default is None or type(param.default) is _Null):
validator_node = add_child_node(param_node, "validator", OrderedDict([("type", "expression"), ("message", "A value needs to be selected")]))
validator_node.text = 'value != "select a value"'
# numeric ranges (which appear for int and float ITEMS and ITEMLISTS)
# these are reflected by min and max attributes
# since item lists become text parameters + validator these don't need these attributes
elif type(param.restrictions) is _NumericRange and param_type == "text":
pass
elif type(param.restrictions) is _NumericRange and param_type != "text":
if param.type is not int and param.type is not float:
raise InvalidModelException("Expected either 'int' or 'float' in the numeric range restriction for "
"parameter [%(name)s], but instead got [%(type)s]" %
{"name": param.name, "type": type(param.restrictions)})
# extract the min and max values and add them as attributes
# validate the provided min and max values
if param.restrictions.n_min is not None:
param_node.attrib["min"] = str(param.restrictions.n_min)
if param.restrictions.n_max is not None:
param_node.attrib["max"] = str(param.restrictions.n_max)
elif type(param.restrictions) is _FileFormat:
# has already been handled
pass
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] for parameter [%(name)s]"
% {"type": type(param.restrictions), "name": param.name})
if param_type == "text":
# for repeats (which are rendered as text field in the tool form) that are actually
# integer/floats special validation is necessary (try to convert them and check if
# in the min max range if a range is given)
if TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]:
valsan = expand_macro(param_node, "list_%s_valsan" % TYPE_TO_GALAXY_TYPE[param.type])
if type(param.restrictions) is _NumericRange and not (param.restrictions.n_min is None and param.restrictions.n_max is None):
expression = "len(value.split(' ')) == len([_ for _ in value.split(' ') if "
message = "a space separated list of %s values " % TYPE_TO_GALAXY_TYPE[param.type]
if param.restrictions.n_min is not None and param.restrictions.n_max is not None:
expression += " %s <= %s(_) <= %s" % (param.restrictions.n_min, param.type.__name__, param.restrictions.n_max)
message += "in the range %s:%s " % (param.restrictions.n_min, param.restrictions.n_max)
elif param.restrictions.n_min is not None:
expression += " %s <= %s(_)" % (param.restrictions.n_min, param.type.__name__)
message += "in the range %s: " % (param.restrictions.n_min)
elif param.restrictions.n_max is not None:
expression += " %s(_) <= %s" % (param.type.__name__, param.restrictions.n_max)
message += "in the range :%s " % (param.restrictions.n_min)
expression += "])\n"
message += "is required"
validator_node = SubElement(valsan, "validator", OrderedDict([("type", "expression"), ("message", message)]))
validator_node.text = CDATA(expression)
else:
# add quotes to the default values (only if they include spaces .. then the UI looks nicer)
if not (param.default is None or type(param.default) is _Null) and param.type is not _InFile:
if type(param.default) is list:
for i, d in enumerate(param.default):
if " " in d:
param.default[i] = '"%s"' % d
# elif " " in param.default:
# param.default = '"%s"' %param.default
# add sanitizer nodes to
# - text (only those that are not actually integer selects which are treated above) and
# - select params,
# this is needed for special character like "[" which are used for example by FeatureFinderMultiplex
if ((param_type == "text" and not TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]) or is_selection_parameter(param)) and param.type is not _InFile:
if param.is_list and not is_selection_parameter(param):
valsan = expand_macro(param_node, "list_string_val")
valsan = expand_macro(param_node, "list_string_san")
# check for default value
if not (param.default is None or type(param.default) is _Null):
# defaults of selects are set via the selected attribute of the options (happens above)
if param_type == "select":
pass
elif type(param.default) is list:
# we ASSUME that a list of parameters looks like:
# $ tool -ignore He Ar Xe
# meaning, that, for example, Helium, Argon and Xenon will be ignored
param_node.attrib["value"] = ' '.join(map(str, param.default))
elif param_type != "boolean":
param_node.attrib["value"] = str(param.default)
else:
# simple boolean with a default
if param.default is True:
param_node.attrib["checked"] = "true"
elif param.type is int or param.type is float or param.type is str:
if param_type == "select":
pass
else:
param_node.attrib["value"] = ""
# add label, help, and argument
label = "%s parameter" % param.name
help_text = ""
if param.description is not None:
label, help_text = generate_label_and_help(param.description)
if param.is_list and not is_selection_parameter(param) and param.type is not _InFile:
help_text += " (space separated list, in order to allow for spaces in list items surround them by single quotes)"
if param.type is _InFile:
help_text += " select %s data sets(s)" % (",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile])))
param_node.attrib["label"] = label
param_node.attrib["help"] = help_text
def generate_label_and_help(desc):
help_text = ""
# This tag is found in some descriptions
if not isinstance(desc, str):
desc = str(desc)
# desc = desc.encode("utf8")
desc = desc.replace("#br#", ". ")
# Get rid of dots in the end
if desc.endswith("."):
desc = desc.rstrip(".")
# Check if first word is a normal word and make it uppercase
if str(desc).find(" ") > -1:
first_word, rest = str(desc).split(" ", 1)
if str(first_word).islower():
# check if label has a quotient of the form a/b
if first_word.find("/") != 1:
first_word.capitalize()
desc = first_word + " " + rest
# label = desc.decode("utf8")
label = desc
# split delimiters ".,?!;("
if len(desc) > 50:
m = re.search(r"([.?!] |e\.g\.|\(e\.g\.|i\.e\.|\(i\.e\.)", desc)
if m is not None:
label = desc[:m.start()].rstrip(".?!, ")
help_text = desc[m.start():].lstrip(".?!, ")
# # Try to split the label if it is too long
# if len(desc) > 50:
# # find an example and put everything before in the label and the e.g. in the help
# if desc.find("e.g.") > 1 :
# label, help_text = desc.split("e.g.",1)
# help_text = "e.g." + help_text
# else:
# # find the end of the first sentence
# # look for ". " because some labels contain .file or something similar
# delimiter = ""
# if desc.find(". ") > 1 and desc.find("? ") > 1:
# if desc.find(". ") < desc.find("? "):
# delimiter = ". "
# else:
# delimiter = "? "
# elif desc.find(". ") > 1:
# delimiter = ". "
# elif desc.find("? ") > 1:
# delimiter = "? "
# if delimiter != "":
# label, help_text = desc.split(delimiter, 1)
#
# # add the question mark back
# if delimiter == "? ":
# label += "? "
# remove all linebreaks
label = label.rstrip().rstrip('<br>').rstrip()
return label, help_text
def is_boolean_parameter(param):
"""
determines if the given choices are boolean (basically, if the possible values are true/false)
@param param the ctd parameter
@return True iff a boolean parameter
"""
# detect boolean selects of OpenMS
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) == set(["true", "false"])
else:
return param.type is bool
def is_selection_parameter(param):
"""
determines if there are choices for the parameter and its not bool
@param param the ctd parameter
@return True iff a selection parameter
"""
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) != set(["true", "false"])
else:
return False
def get_lowercase_list(some_list):
return [str(_).lower().strip() for _ in some_list]
def create_boolean_parameter(param_node, param):
"""
creates a galaxy boolean parameter type
this method assumes that param has restrictions, and that only two restictions are present
(either yes/no or true/false)
TODO: true and false values can be way more than 'true' and 'false'
but for that we need CTD support
"""
# in ctd (1.6.2) bools are strings with restriction true,false
# - if the default is false then they are flags
# - otherwise the true or false value needs to be added (where the true case is unnecessary)
# A special case are restrictions false,true which are not treated as flags
if param.type == str:
choices = get_lowercase_list(param.restrictions.choices)
if set(choices) == set(["true", "false"]):
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
else:
param_node.attrib["truevalue"] = choices[0]
param_node.attrib["falsevalue"] = choices[1]
# set the checked attribute
if param.default is not None:
checked_value = "false"
default = param.default.lower().strip()
if default == "yes" or default == "true":
checked_value = "true"
param_node.attrib["checked"] = checked_value
else:
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
param_node.attrib["checked"] = str(param.default).lower()
if "optional" in param_node.attrib:
del param_node.attrib["optional"]
def all_outputs(model, parameter_hardcoder):
"""
return lists of reqired and optional output parameters
"""
out = []
optout = []
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if not (param.type is _OutFile or param.type is _OutPrefix):
continue
if not param.required:
optout.append(param)
else:
out.append(param)
return out, optout
def output_filter_text(param):
"""
get the text or the filter for optional outputs
"""
return '"%s_FLAG" in OPTIONAL_OUTPUTS' % param.name
def create_outputs(parent, model, **kwargs):
"""
create outputs section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
- parameter_hardcoder and
- supported_file_formats ()
"""
outputs_node = add_child_node(parent, "outputs")
parameter_hardcoder = kwargs["parameter_hardcoder"]
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if param.type is not _OutFile and param.type is not _OutPrefix:
continue
create_output_node(outputs_node, param, model, kwargs["supported_file_formats"], parameter_hardcoder)
# If there are no outputs defined in the ctd the node will have no children
# and the stdout will be used as output
out, optout = all_outputs(model, parameter_hardcoder)
if len(out) == 0:
stdout = add_child_node(outputs_node, "data",
OrderedDict([("name", "stdout"), ("format", "txt"),
("label", "${tool.name} on ${on_string}: stdout"),
("format", "txt")]))
add_child_node(stdout, "filter", text="OPTIONAL_OUTPUTS is None")
# manually add output for the ctd file
ctd_out = add_child_node(outputs_node, "data", OrderedDict([("name", "ctd_out"), ("format", "xml"), ("label", "${tool.name} on ${on_string}: ctd")]))
add_child_node(ctd_out, "filter", text='OPTIONAL_OUTPUTS is not None and "ctd_out_FLAG" in OPTIONAL_OUTPUTS')
return outputs_node
def create_output_node(parent, param, model, supported_file_formats, parameter_hardcoder):
g2o, o2g = get_fileformat_maps(supported_file_formats)
# add a data node / collection + discover_datasets
# in the former case we just set the discover_node equal to the data node
# then we can just use this to set the common format attribute
if not param.is_list and param.type is not _OutPrefix:
data_node = add_child_node(parent, "data")
discover_node = data_node
else:
data_node = add_child_node(parent, "collection")
data_node.attrib["type"] = "list"
discover_node = add_child_node(data_node, "discover_datasets",
OrderedDict([("directory", get_galaxy_parameter_path(param, separator="_")),
("recurse", "true")]))
data_node.attrib["name"] = get_galaxy_parameter_path(param, separator="_")
data_node.attrib["label"] = "${tool.name} on ${on_string}: %s" % utils.extract_param_name(param)
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
type_param_choices = get_formats(param, model, o2g) # [_ for _ in type_param.restrictions.choices]
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
type_param_choices = get_formats(param, model, o2g)
else:
type_param_name = None
# if there is only a single possible output format we set this
# logger.error("%s %s %s %s %s" %(param.name, formats, type_param, fmt_from_corresponding, corresponding_input))
if len(formats) == 1:
logger.info("OUTPUT %s 1 fmt %s" % (param.name, formats), 1)
discover_node.attrib["format"] = formats.pop()
if param.is_list:
discover_node.attrib["pattern"] = "__name__"
elif param.type is _OutPrefix:
discover_node.attrib["pattern"] = r"_?(?P<designation>.*)\.[^.]*"
# if there is another parameter where the user selects the format
# then this format was added as file extension on the CLI, now we can discover this
elif type_param_name is not None:
logger.info("OUTPUT %s type" % param.name, 1)
if not param.is_list:
if len(type_param_choices) > 1:
change_node = add_child_node(data_node, "change_format")
for i, r in enumerate(type_param_choices):
f = o2g.get(r, None)
# TODO this should not happen for fully specified fileformats file
if f is None:
f = r
if i == 0:
data_node.attrib["format"] = f
else:
add_child_node(change_node, "when", OrderedDict([("input", type_param_name), ("value", r), ("format", f)]))
else:
discover_node.attrib["pattern"] = "__name_and_ext__"
elif corresponding_input is not None:
logger.info("OUTPUT %s input %s" % (param.name, corresponding_input.name), 1)
if param.is_list:
discover_node.attrib["pattern"] = "__name_and_ext__"
# data_node.attrib["structured_like"] = get_galaxy_parameter_name(corresponding_input)
# data_node.attrib["inherit_format"] = "true"
else:
data_node.attrib["format_source"] = get_galaxy_parameter_path(corresponding_input)
data_node.attrib["metadata_source"] = get_galaxy_parameter_path(corresponding_input)
else:
logger.info("OUTPUT %s else" % (param.name), 1)
if not param.is_list:
data_node.attrib["auto_format"] = "true"
else:
raise InvalidModelException("No way to know the format for"
"for output [%(name)s]" % {"name": param.name})
# # data output has fomat (except if fromat_source has been added already)
# # note .. collection output has no format
# if not param.is_list and not "format_source" in data_node.attrib:
# data_node.attrib["format"] = data_format
# add filter for optional parameters
if not param.required:
filter_node = add_child_node(data_node, "filter")
filter_node.text = "OPTIONAL_OUTPUTS is not None and " + output_filter_text(param)
return data_node
def get_supported_file_types(formats, supported_file_formats):
r = set()
for f in formats:
if f in supported_file_formats:
r.add(supported_file_formats[f].galaxy_extension)
return r
# print f, f in supported_file_formats, supported_file_formats[f].galaxy_extension
# return set([supported_file_formats[_].galaxy_extension
# for _ in formats if _ in supported_file_formats])
def create_change_format_node(parent, data_formats, input_ref):
# <change_format>
# <when input="secondary_structure" value="true" format="txt"/>
# </change_format>
change_format_node = add_child_node(parent, "change_format")
for data_format in data_formats:
add_child_node(change_format_node, "when",
OrderedDict([("input", input_ref), ("value", data_format), ("format", data_format)]))
def create_tests(parent, inputs=None, outputs=None, test_macros_prefix=None, name=None):
"""
create tests section of the Galaxy tool
@param tool the Galaxy tool
@param inputs a copy of the inputs
"""
tests_node = add_child_node(parent, "tests")
if not (inputs is None or outputs is None):
fidx = 0
test_node = add_child_node(tests_node, "test")
strip_elements(inputs, "validator", "sanitizer")
for node in inputs.iter():
if node.tag == "expand" and node.attrib["macro"] == ADVANCED_OPTIONS_NAME + "macro":
node.tag = "conditional"
node.attrib["name"] = ADVANCED_OPTIONS_NAME + "cond"
add_child_node(node, "param", OrderedDict([("name", ADVANCED_OPTIONS_NAME + "selector"), ("value", "advanced")]))
if "type" not in node.attrib:
continue
if (node.attrib["type"] == "select" and "true" in set([_.attrib.get("selected", "false") for _ in node])) or\
(node.attrib["type"] == "select" and node.attrib.get("value", "") != ""):
node.tag = "delete_node"
continue
# TODO make this optional (ie add aparameter)
if node.attrib.get("optional", None) == "true" and node.attrib["type"] != "boolean":
node.tag = "delete_node"
continue
if node.attrib["type"] == "boolean":
if node.attrib["checked"] == "true":
node.attrib["value"] = "true" # node.attrib["truevalue"]
else:
node.attrib["value"] = "false" # node.attrib["falsevalue"]
elif node.attrib["type"] == "text" and node.attrib["value"] == "":
node.attrib["value"] = "1 2" # use a space separated list here to cover the repeat (int/float) case
elif node.attrib["type"] == "integer" and node.attrib["value"] == "":
node.attrib["value"] = "1"
elif node.attrib["type"] == "float" and node.attrib["value"] == "":
node.attrib["value"] = "1.0"
elif node.attrib["type"] == "select":
if node.attrib.get("display", None) == "radio" or node.attrib.get("multiple", "false") == "false":
node.attrib["value"] = node[0].attrib["value"]
elif node.attrib.get("multiple", None) == "true":
node.attrib["value"] = ",".join([_.attrib["value"] for _ in node if "value" in _.attrib])
elif node.attrib["type"] == "data":
node.attrib["ftype"] = node.attrib["format"].split(',')[0]
if node.attrib.get("multiple", "false") == "true":
node.attrib["value"] = "{fidx}test.ext,{fidx}test2.ext".format(fidx=fidx)
else:
node.attrib["value"] = "{fidx}test.ext".format(fidx=fidx)
fidx += 1
for node in inputs.iter():
for a in set(node.attrib) - set(["name", "value", "ftype"]):
del node.attrib[a]
strip_elements(inputs, "delete_node", "option", "expand")
for node in inputs:
test_node.append(node)
outputs_cnt = 0
for node in outputs.iter():
if node.tag == "data" or node.tag == "collection":
# assuming that all filters avaluate to false
has_filter = False
for c in node:
if c.tag == "filter":
has_filter = True
break
if not has_filter:
outputs_cnt += 1
else:
node.tag = "delete_node"
if node.tag == "data":
node.tag = "output"
try:
node.attrib["ftype"] = node.attrib["format"]
except KeyError:
pass
node.attrib["value"] = "outfile.txt"
if node.tag == "collection":
node.tag = "output_collection"
if node.attrib.get("name", None) == "stdout":
node.attrib["lines_diff"] = "2"
for a in set(node.attrib) - set(["name", "value", "ftype", "lines_diff"]):
del node.attrib[a]
strip_elements(outputs, "delete_node", "discover_datasets", "filter", "change_format")
for node in outputs:
test_node.append(node)
# if no optional output is selected the stdout is added as output
if outputs_cnt == 0:
outputs_cnt = 1
test_node.attrib["expect_num_outputs"] = str(outputs_cnt)
elif not (test_macros_prefix is None or name is None):
expand_macros(tests_node, [p + name for p in test_macros_prefix])
def create_test_only(model, **kwargs):
parameter_hardcoder = kwargs["parameter_hardcoder"]
unsniffable = kwargs["test_unsniffable"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
section_nodes = dict()
section_params = dict()
test = Element("test")
advanced = add_child_node(test, "conditional", OrderedDict([("name", "adv_opts_cond")]))
add_child_node(advanced, "param", OrderedDict([("name", "adv_opts_selector"), ("value", "advanced")]))
optout = ["ctd_out_FLAG"]
outcnt = 1
for param in utils.extract_and_flatten_parameters(model, True):
ext = None
# no need to show hardcoded parameters
# except for the test parameter
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value is not None:
if param.name != "test":
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent = section_nodes[utils.extract_param_name(param.parent)]
elif type(param) is not ParameterGroup and param.advanced:
parent = advanced
else:
parent = test
if type(param) is ParameterGroup:
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = add_child_node(parent, "section", OrderedDict([("name", param.name)]))
continue
if param.type is _OutFile:
given = type(param.default) is _OutFile or (type(param.default) is list) and len(param.default) > 0
if not param.required and given:
optout.append("%s_FLAG" % param.name)
if given:
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type(param.default) is _OutFile:
f = param.default
elif type(param.default) is list:
f = param.default[0]
else:
raise Exception("Outfile with non str or list default %s[%s]" % (param, type(param.default)))
# get the file type from the longest possible extension that
# matches the known extensions
# longest: because e.g. pep.xml should be prefered over xml
if f.endswith(".tmp"):
f = f[:-4]
splitted = f.split(".")
ext = None
for i in range(len(splitted)):
check_ext = ".".join(splitted[i:])
if check_ext in o2g:
ext = o2g[check_ext]
break
if ext not in formats:
if ext == "txt" and "csv" in formats:
ext = "csv"
elif ext == "txt" and "tsv" in formats:
ext = "tsv"
elif len(formats) == 1:
ext = formats[0]
if len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
if type_param is None:
try:
print("%s -> %s" % (ext, g2o[ext]))
attrib = OrderedDict([("name", param.name + "_type"), ("value", g2o[ext])])
add_child_node(parent, "param", attrib)
except KeyError:
raise Exception("parent %s name %s ext %s" % (parent, param.name, ext))
if type_param is not None and type(type_param.default) is _Null:
if ext is not None:
type_param.default = ext
if param.required or given:
outcnt += 1
# don't output empty values for bool, and data parameters
if type(param.default) is _Null and not param.required:
if is_boolean_parameter(param):
continue
elif param.type is _OutFile:
continue
elif param.type is _InFile:
continue
# lists need to be joined appropriately
# - special care for outfile lists (ie collections): since we do not know (easily) the names of the collection elements we just use the count
# exception of list parameters that are hardcoded to non-lists (the the default is still a list)
if not param.is_list and type(param.default) is list:
logger.info("Found non-list parameter %s with list default (hardcoded?). Using only first value/" % param.name, 0)
try:
param.default = param.default[0]
except KeyError:
param.default = _Null()
if param.is_list and type(param.default) is not _Null:
if param.type is _InFile:
value = ','.join(map(str, param.default))
elif param.type is _OutFile:
value = str(len(param.default))
elif param.type is str:
if type(param.restrictions) is _Choices:
value = ','.join(map(str, param.default))
else:
value = '"' + '" "'.join(map(str, param.default)) + '"'
else:
value = ' '.join(map(str, param.default))
else:
if type(param.default) is bool:
value = str(param.default).lower()
else:
value = str(param.default)
# use name where dashes are replaced by underscores
# see also create inputs
if param.type is _OutFile:
name = get_galaxy_parameter_path(param, separator="_")
if param.is_list:
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", value)]))
else:
# TODO use delta_frac https://github.com/galaxyproject/galaxy/pull/9425
nd = add_child_node(test, "output", OrderedDict([("name", name), ("file", value), ("compare", "sim_size"), ("delta", "5700")]))
if ext:
nd.attrib["ftype"] = ext
elif param.type is _OutPrefix:
# #for outprefix elements / count need to be added manually
name = get_galaxy_parameter_path(param, separator="_")
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", "")]))
else:
name = get_galaxy_parameter_name(param)
nd = add_child_node(parent, "param", OrderedDict([("name", name), ("value", value)]))
# add format attribute for unsniffable extensions
if param.type is _InFile:
ext = os.path.splitext(value)[1][1:]
if ext in unsniffable and ext in o2g:
nd.attrib["ftype"] = o2g[ext]
add_child_node(test, "param", OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("value", ",".join(optout))]))
ctd_out = add_child_node(test, "output", OrderedDict([("name", "ctd_out"), ("ftype", "xml")]))
ctd_assert = add_child_node(ctd_out, "assert_contents")
add_child_node(ctd_assert, "is_valid_xml")
if outcnt == 0:
outcnt += 1
nd = add_child_node(test, "output", OrderedDict([("name", "stdout"),
("value", "stdout.txt"),
("compare", "sim_size")]))
test.attrib["expect_num_outputs"] = str(outcnt)
# if all_optional_outputs(model, parameter_hardcoder):
return test
def create_help(tool, model):
"""
create help section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
help_node = add_child_node(tool, "help")
help_node.text = CDATA(utils.extract_tool_help_text(model))
def add_child_node(parent_node, child_node_name, attributes=OrderedDict([]), text=None):
"""
helper function to add a child node using the given name to the given parent node
@param parent_node the parent
@param child_node_name the desired name of the child
@param attributes desired attributes of the child
@return the created child node
"""
child_node = SubElement(parent_node, child_node_name, attributes)
if text is not None:
child_node.text = text
return child_node
|
# -*- encoding: utf-8 -*-
# Date: 16/Jan/2022
# Author: Steven Huang, Auckland, NZ
# License: MIT License
"""
Description: Web Configuration class
"""
import os
class Config:
SECRET_KEY = 'e7c794326ea87a59b2cf616809e1efcd' # secrets.token_hex(16)
SQLALCHEMY_DATABASE_URI = 'sqlite:///db/sqlite/website.db'
# SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:123@localhost/my_website'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USER')
MAIL_PASSWORD = os.environ.get('EMAIL_PASSWORD')
# print('MAIL_USERNAME = ', MAIL_USERNAME)
# print('MAIL_PASSWORD = ', MAIL_PASSWORD)
|
#
# Example file for working with functions
#
# define a basic function
def func1():
print("I am a function")
# function that takes arguments
def func2(arg1, arg2):
print(arg1, " ", arg2)
# function that returns a value
def cube(x):
return x * 9
# function with default value for an argument
def power(num, x=1):
result = 1
for i in range(x):
result = result * num
return result
#function with variable number of arguments
def multi_add(*args):
result = 0
for x in args:
result = result + x
return result
# func1()
# print(func1())
# print(func1)
# func2(10, 20)
# print(func2(10, 20))
# print(cube(3))
# print(power(2))
# print(power(2, 3))
# print(power(x=3, num=2))
print(multi_add(2, 3, 4, 5, 6, 7, 8))
|
from firedrake import *
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD
try:
import matplotlib.pyplot as plt
plt.rcParams["contour.corner_mask"] = False
plt.close("all")
except:
warning("Matplotlib not imported")
nx, ny = 20, 20
Lx, Ly = 1.0, 1.0
quadrilateral = True
mesh = RectangleMesh(nx, ny, Lx, Ly, quadrilateral=quadrilateral)
plot(mesh)
plt.axis("off")
degree = 1
k_plus = 0
primal_family = "DG"
tracer_family = "DGT"
U = FunctionSpace(mesh, primal_family, degree + k_plus)
V = VectorFunctionSpace(mesh, "CG", degree + k_plus)
T = FunctionSpace(mesh, tracer_family, degree)
W = U * T
# Trial and test functions
solution = Function(W)
u, lambda_h = split(solution)
v, mu_h = TestFunction(W)
# Mesh entities
n = FacetNormal(mesh)
x, y = SpatialCoordinate(mesh)
# Model parameters
k = Constant(1.0)
mu = Constant(1.0)
rho = Constant(0.0)
g = Constant((0.0, 0.0))
# Exact solution and source term projection
p_exact = sin(2 * pi * x / Lx) * sin(2 * pi * y / Ly)
sol_exact = Function(U).interpolate(p_exact)
sol_exact.rename("Exact pressure", "label")
sigma_e = Function(V, name="Exact velocity")
sigma_e.project(-(k / mu) * grad(p_exact))
plot(sigma_e)
source_expr = div(-(k / mu) * grad(p_exact))
f = Function(U).interpolate(source_expr)
plot(sol_exact)
plt.axis("off")
# BCs
p_boundaries = Constant(0.0)
v_projected = sigma_e
bc_multiplier = DirichletBC(W.sub(1), p_boundaries, "on_boundary")
# DG parameter
s = Constant(1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(u), grad(v)) * dx
L = f * v * dx
# DG terms
a += s * (dot(jump(u, n), avg(grad(v))) - dot(jump(v, n), avg(grad(u)))) * dS
a += (beta / h_avg) * dot(jump(u, n), jump(v, n)) * dS
a += (beta / h) * u * v * ds
# DG boundary condition terms
L += (
s * dot(grad(v), n) * p_boundaries * ds
+ (beta / h) * p_boundaries * v * ds
+ v * dot(sigma_e, n) * ds
)
# Hybridization terms
# a += (-s * jump(grad(v), n) * (lambda_h('+') - avg(u)) + jump(grad(u), n) * (mu_h('+') - avg(v))) * dS
a += (
-s * jump(grad(v), n) * (lambda_h("+") - u("+")) + jump(grad(u), n) * (mu_h("+") - v("+"))
) * dS
# a += (4.0 * beta / h_avg) * (lambda_h('+') - avg(u)) * (mu_h('+') - avg(v)) * dS
a += (4.0 * beta / h_avg) * (lambda_h("+") - u("+")) * (mu_h("+") - v("+")) * dS
F = a - L
# Solving SC below
PETSc.Sys.Print("*******************************************\nSolving...\n")
params = {
"snes_type": "ksponly",
"mat_type": "matfree",
"pmat_type": "matfree",
"ksp_type": "preonly",
"pc_type": "python",
# Use the static condensation PC for hybridized problems
# and use a direct solve on the reduced system for lambda_h
"pc_python_type": "firedrake.SCPC",
"pc_sc_eliminate_fields": "0",
"condensed_field": {
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
}
problem = NonlinearVariationalProblem(F, solution, bcs=bc_multiplier)
solver = NonlinearVariationalSolver(problem, solver_parameters=params)
solver.solve()
# solve(F == 0, solution)
PETSc.Sys.Print("Solver finished.\n")
# Gathering solution
u_h, lambda_h = solution.split()
u_h.rename("Solution", "label")
# Post-processing solution
sigma_h = Function(V, name="Projected velocity")
sigma_h.project(-(k / mu) * grad(u_h))
output = File("ldgd.pvd", project_output=True)
output.write(u_h, sigma_h)
plot(sigma_h)
plot(u_h)
plt.axis("off")
plt.show()
print("\n*** DoF = %i" % W.dim())
|
from flask import Response, request
from db.models import Organism, SecondaryOrganism,Assembly
from flask_restful import Resource
from errors import NotFound,SchemaValidationError,RecordAlreadyExistError,TaxonNotFoundError
from utils.utils import parse_sample_metadata
from utils import ena_client
from datetime import datetime
from services import sample_service
import services.submission_service as service
from flask_jwt_extended import jwt_required
from mongoengine.queryset.visitor import Q
from flask import current_app as app
from utils.constants import SamplePipeline
import json
#CRUD operations on sample
class SamplesApi(Resource):
def get(self,accession=None):
sample = SecondaryOrganism.objects((Q(accession=accession) | Q(tube_or_well_id=accession)))
if len(sample) > 0:
result = sample.aggregate(*SamplePipeline).next()
return Response(json.dumps(result),mimetype="application/json", status=200)
else:
raise NotFound
@jwt_required()
def delete(self):
if 'ids' in request.args.keys() and len(request.args['ids'].split(',')) > 0:
ids = request.args['ids'].split(',')
resp = sample_service.delete_samples(ids)
return Response(json.dumps(resp),mimetype="application/json", status=200)
else:
raise SchemaValidationError
@jwt_required()
def put(self,accession):
data = request.json if request.is_json else request.form
sample = SecondaryOrganism.objects((Q(accession=accession) | Q(tube_or_well_id=accession))).first()
if not sample:
raise NotFound
elif not data:
raise SchemaValidationError
else:
sample.update(**data)
id = sample.tube_or_well_id
return Response(json.dumps(f'sample with id {id} has been saved'),mimetype="application/json", status=204)
#update sample
@jwt_required()
def post(self):
data = request.json if request.is_json else request.form
if 'taxid' in data.keys() and ('tube_or_well_id' in data.keys() or 'accession' in data.keys()):
taxid = str(data['taxid'])
id = data['tube_or_well_id'] if 'tube_or_well_id' in data.keys() else data['accession']
if len(SecondaryOrganism.objects(Q(tube_or_well_id=id) | Q(accession=id))) > 0:
raise RecordAlreadyExistError
else:
#import data from accession number
if 'accession' in data.keys():
metadata = parse_sample_metadata(data['characteristics'])
metadata['accession'] = data['accession']
metadata['taxid'] = taxid
sample = service.create_sample(metadata)
sample_service.get_reads([sample])
assemblies = ena_client.parse_assemblies(sample.accession)
if len(assemblies) > 0:
existing_assemblies=Assembly.objects(accession__in=[ass['accession'] for ass in assemblies])
if len(existing_assemblies) > 0:
assemblies= [ass for ass in assemblies if ass['accession'] not in [ex_as['accession'] for ex_as in existing_assemblies]]
if len(assemblies) > 0:
for ass in assemblies:
if not 'sample_accession' in ass.keys():
ass['sample_accession'] = sample.accession
app.logger.info(assemblies)
assemblies = Assembly.objects.insert([Assembly(**ass) for ass in assemblies])
organism = Organism.objects(taxid=sample.taxid).first()
if not organism:
raise TaxonNotFoundError
organism.assemblies.extend(assemblies)
organism.save()
sample.assemblies.extend(assemblies)
sample.last_checked=datetime.utcnow()
sample.save()
else:
#create local sample
sample = service.create_sample(data)
return Response(json.dumps(f'sample with id {id} has been saved'),mimetype="application/json", status=201)
else:
raise SchemaValidationError
|
from queue import Queue
from functools import wraps
class Async:
'''Aync wrapper for all data'''
def __init__(self, func, args):
self.func = func
self.args = args
def inlined_async(func):
@wraps(func)
def wrapper(*args):
f = func(*args)
result_queue = Queue()
result_queue.put(None)
while True:
result = result_queue.get()
try:
a = f.send(result)
apply_async(a.func, a.args, callback=result_queue.put)
except StopIteration:
break
return wrapper
def apply_async(func, args, *, callback):
result = func(*args)
callback(result)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import context, save_checkpoint, load_checkpoint, load_param_into_net
from mindspore.nn import Softmax, SoftmaxCrossEntropyWithLogits
from mindspore_xai.explanation import RISEPlus, OoDNet
from common.dataset import load_dataset, load_image_tensor
from common.resnet import resnet50
if __name__ == "__main__":
# Preparing
# only PYNATIVE_MODE is supported
context.set_context(mode=context.PYNATIVE_MODE)
num_classes = 20
# classifier training dataset
train_ds = load_dataset('xai_examples_data/train').batch(4)
# load the trained classifier
net = resnet50(num_classes)
param_dict = load_checkpoint('xai_examples_data/ckpt/resnet50.ckpt')
load_param_into_net(net, param_dict)
# Training OoDNet
ood_net = OoDNet(underlying=net, num_classes=num_classes)
# use SoftmaxCrossEntropyWithLogits as loss function if the activation function of
# the classifier is Softmax, use BCEWithLogitsLoss if the activation function is Sigmod
ood_net.train(train_ds, loss_fn=SoftmaxCrossEntropyWithLogits())
save_checkpoint(ood_net, 'ood_net.ckpt')
# Using RISEPlus
ood_net = OoDNet(underlying=resnet50(num_classes), num_classes=num_classes)
param_dict = load_checkpoint('ood_net.ckpt')
load_param_into_net(ood_net, param_dict)
rise_plus = RISEPlus(ood_net=ood_net, network=net, activation_fn=Softmax())
boat_image = load_image_tensor("xai_examples_data/test/boat.jpg")
saliency = rise_plus(boat_image, targets=5)
print(f"saliency.shape:{saliency.shape}")
|
#!/usr/bin/env python2
from setuptools import setup
setup(
name="internalblue",
version="0.4",
description="A Bluetooth Experimentation Framework based on the Broadcom Bluetooth Controller Family.",
url="http://github.com/seemoo-lab/internalblue",
author="The InternalBlue Team",
author_email="jiska@bluetooth.lol",
license="MIT",
packages=[
"internalblue",
"internalblue/fw",
"internalblue/objects",
"internalblue/utils",
],
python_requires='>=3.6',
install_requires=["future", "cmd2", "pure-python-adb"],
extras_require={"macoscore": ["pyobjc"], "binutils": ["pwntools>=4.0.1", "pyelftools"]},
tests_require=["nose", "pytest", "pwntools>=4.2.0.dev0"],
entry_points={
"console_scripts": ["internalblue=internalblue.cli:internalblue_entry_point"]
},
zip_safe=False,
)
|
__author__ = 'Daniyar'
import itertools
from localsys.storage import db
import math
class score_model:
def check_closest_competitor(self, usrid, your_score):
value_risk = 0.0
value_cost = 0.0
value_risk_cost_contender = 2.0
value_cost_risk_contender = 1.0
prev_value_risk = 2.0
prev_value_cost = 1.0
next_value_risk = 2.0
next_value_cost = 1.0
prev_risk_rank = 0
next_risk_rank = 0
prev_cost_rank = 0
next_cost_rank = 0
next_value_risk_date = "2014-01-06"
prev_value_risk_date = "2014-01-06"
prev_value_cost_date = "2014-01-06"
next_value_cost_date = "2014-01-06"
date_risk = "2014-01-06"
date_cost = "2014-01-06"
checked = False
u_rank_risk = 1
u_rank_cost = 1
users_risk = []
users_cost = []
risk_values = []
cost_values = []
contender_id_prev_risk = 1
contender_id_next_risk = 1
contender_id_prev_cost = 1
contender_id_next_cost = 1
scores_1, scores_2 = itertools.tee(your_score)
for row in scores_1:
if row.score_type == 1:
if row.userid == usrid:
if not checked:
value_risk = row.score_value
checked = True
date_risk = row.date
else:
if not checked:
if not row.userid in users_risk:
if not float(row.score_value) in risk_values:
risk_values.append(float(row.score_value))
u_rank_risk += 1
prev_value_risk = row.score_value
prev_value_risk_date = row.date
contender_id_prev_risk = row.userid
users_risk.append(row.userid)
else:
if not row.userid in users_risk:
next_value_risk = row.score_value
next_value_risk_date = row.date
contender_id_next_risk = row.userid
break
checked = False
for row in scores_2:
if row.score_type == 2:
if row.userid == usrid:
if not checked:
value_cost = row.score_value
checked = True
date_cost = row.date
else:
if not checked:
if not row.userid in users_cost:
if not float(row.score_value) in cost_values:
users_cost.append(row.userid)
cost_values.append(float(row.score_value))
u_rank_cost += 1
prev_value_cost = row.score_value
prev_value_cost_date = row.date
contender_id_prev_cost = row.userid
else:
if not row.userid in users_cost:
next_value_cost = row.score_value
next_value_cost_date = row.date
contender_id_next_cost = row.userid
break
u_rank_risk -= risk_values.count(float(value_risk))
u_rank_cost -= cost_values.count(float(value_cost))
prev_risk_rank = u_rank_risk - 1
if prev_risk_rank == 0:
prev_value_risk = 9
prev_cost_rank = u_rank_cost - 1
if prev_cost_rank == 0:
prev_value_cost = 9
if next_value_risk == value_risk:
next_risk_rank = u_rank_risk
else:
next_risk_rank = u_rank_risk + 1
if next_value_cost == value_cost:
next_cost_rank = u_rank_cost
else:
next_cost_rank = u_rank_cost + 1
if prev_value_risk == value_risk:
prev_risk_rank = u_rank_risk
if prev_value_cost == value_cost:
prev_cost_rank = u_rank_cost
if math.fabs(float(value_risk) - float(prev_value_risk)) <= math.fabs(
float(next_value_risk) - float(value_risk)):
closest_score_risk = prev_value_risk
closest_ranking_risk = prev_risk_rank
closest_date_risk = prev_value_risk_date
contender_id_risk = contender_id_prev_risk
else:
closest_score_risk = next_value_risk
closest_ranking_risk = next_risk_rank
closest_date_risk = next_value_risk_date
contender_id_risk = contender_id_next_risk
if math.fabs(float(value_cost) - float(prev_value_cost)) <= math.fabs(
float(next_value_cost) - float(value_cost)):
closest_score_cost = prev_value_cost
closest_ranking_cost = prev_cost_rank
closest_date_cost = prev_value_cost_date
contender_id_cost = contender_id_prev_cost
else:
closest_score_cost = next_value_cost
closest_ranking_cost = next_cost_rank
closest_date_cost = next_value_cost_date
contender_id_cost = contender_id_next_cost
value_risk_cost = db.select('scores', where="date=$date_risk&&score_type=2&&userid=$usrid", vars=locals())[0].score_value
value_cost_risk = db.select('scores', where="date=$date_cost&&score_type=1&&userid=$usrid", vars=locals())[0].score_value
res1 = db.select('scores', where="date=$closest_date_risk&&score_type=2&&userid=$contender_id_risk", vars=locals())
if len(res1) > 0:
value_risk_cost_contender = res1[0].score_value
res2 = db.select('scores', where="date=$closest_date_cost&&score_type=1&&userid=$contender_id_cost", vars=locals())
if len(res2) > 0:
value_cost_risk_contender = res2[0].score_value
return value_risk, value_risk_cost, date_risk, value_cost, value_cost_risk, date_cost, u_rank_risk, u_rank_cost, closest_score_risk, value_risk_cost_contender, \
closest_ranking_risk, closest_date_risk, closest_score_cost, value_cost_risk_contender, closest_ranking_cost, closest_date_cost
def find_best(self, scores):
date_risk = "N/A"
value_risk = 0.0
date_cost = "N/A"
value_cost = 0.0
id_risk = 1
id_cost = 1
scores_1, scores_2 = itertools.tee(scores)
for row in scores_1:
if row.score_type == 1:
date_risk = row.date
value_risk = row.score_value
id_risk = row.userid
break
for row in scores_2:
if row.score_type == 2:
date_cost = row.date
value_cost = row.score_value
id_cost = row.userid
break
value_risk_cost = db.select('scores', where="date=$date_risk&&userid=$id_risk&&score_type=2", vars=locals())[0].score_value
value_cost_risk = db.select('scores', where="date=$date_cost&&userid=$id_cost&&score_type=1", vars=locals())[0].score_value
return value_risk, value_risk_cost, date_risk, value_cost, value_cost_risk, date_cost
def find_avg(self):
average_risk = db.query("SELECT AVG(score_value)as avg FROM scores WHERE score_type =1;")[0]
average_cost = db.query("SELECT AVG(score_value)as avg FROM scores WHERE score_type =2;")[0]
return average_risk.avg, average_cost.avg
@classmethod
def get_scores(cls, id_user):
all_scores = db.select('scores', order="score_value ASC")
length = len(all_scores)
scores_1, scores_2, scores_3, scores_4 = itertools.tee(all_scores, 4)
if len(all_scores) > 0:
b_u_risk, b_u_risk_cost, b_u_risk_date, b_u_cost, b_u_cost_risk, b_u_cost_date, b_u_risk_rank, b_u_cost_rank,\
c_risk, c_risk_cost, c_risk_rank, c_risk_when, c_pc, c_pc_risk, c_pc_rank, c_pc_when = \
score_model().check_closest_competitor(id_user, scores_2)
b_risk, b_risk_cost, b_risk_when, b_pc, b_pc_risk, b_pc_when = score_model().find_best(scores_3)
avg_risk, avg_pc = score_model().find_avg()
msg = {
"b_u_risk": str(b_u_risk),
"b_u_risk_cost": str(b_u_risk_cost),
"b_u_risk_date": str(b_u_risk_date),
"b_u_risk_rank": b_u_risk_rank,
"b_u_cost": str(b_u_cost),
"b_u_cost_risk": str(b_u_cost_risk),
"b_u_cost_date": str(b_u_cost_date),
"b_u_cost_rank": b_u_cost_rank,
"c_risk": str(c_risk),
"c_risk_cost": str(c_risk_cost),
"c_risk_when": str(c_risk_when),
"c_risk_rank": c_risk_rank,
"c_pc": str(c_pc),
"c_pc_risk": str(c_pc_risk),
"c_pc_when": str(c_pc_when),
"c_pc_rank": c_pc_rank,
"b_risk": str(b_risk),
"b_risk_cost": str(b_risk_cost),
"b_risk_when": str(b_risk_when),
"b_pc": str(b_pc),
"b_pc_risk": str(b_pc_risk),
"b_pc_when": str(b_pc_when),
"avg_risk": str(avg_risk),
"avg_pc": str(avg_pc)
}
return msg
def multiple_score(self, policies):
policy_costs_risks = []
sim = simulation()
for policy_entry in policies:
result_entry = {}
for key in policy_entry:
if key == "data":
tmp_value = policy_entry[key]
#sim.set_multi_policy(tmp_value)
result_entry["risk"] = sim.calc_risk_prob(tmp_value)
result_entry["cost"] = sim.calc_prod_cost(tmp_value)
else:
result_entry["id"] = policy_entry[key]
policy_costs_risks.append(result_entry)
# print('return cost '+ policy_costs_risks)
return policy_costs_risks
@classmethod
def insert_score(self, user_id, score_type, score_value, date):
db.insert('scores', userid=user_id, score_type=score_type, score_value=score_value, date=date)
|
import arcpy
structures = "N:\ArcMap_Projects\NG911 Analysis\Gecoding4_7\Geocoding4_7.gdb\Address_Points_4_7"
structureFields = [ "ADDRESS", "DIRPRE","ROADNAME","ROADTYPE","DIRSUF"]
msag = "N:\ArcMap_Projects\NG911 Analysis\Gecoding4_7\Geocoding4_7.gdb\MSAG_4_3"
msagFields = ["Low", "High", "O_E", "Dir", "Street","Community"]
print "Building MSAG List"
msagDict = {}
##msagDict Structures {"W MAIN ST":[[LOW,HIGH,oE],[LOW,HIGH,oE]]}
with arcpy.da.SearchCursor(msag, msagFields) as msagSearchCursor:
for row in msagSearchCursor:
roadName = str(str(row[msagFields.index("Dir")].strip()) + " " + str(row[msagFields.index("Street")].strip())).strip()
oE = row[msagFields.index("O_E")].strip()
try:
LOW = (int(row[msagFields.index("Low")]))
except:
LOW = None
try:
HIGH = (int(row[msagFields.index("High")]))
except:
HIGH = None
if roadName in msagDict:
msagDict[roadName].append([LOW, HIGH, oE])
else:
msagDict[roadName] = []
msagDict[roadName].append([LOW, HIGH, oE])
print "Scanning Structures"
totalStructures = arcpy.GetCount_management(structures).getOutput(0)
with arcpy.da.SearchCursor(structures, structureFields) as structuresSearchCursor:
unmatchedAddresses = []
for currentStructure in structuresSearchCursor:
currentStructureRoadName = (currentStructure[structureFields.index("DIRPRE")].strip() + " " + currentStructure[structureFields.index("ROADNAME")].strip() + " " + currentStructure[structureFields.index("ROADTYPE")].strip() + " " + currentStructure[structureFields.index("DIRSUF")].strip()).strip()
currentStructureNumber = currentStructure[structureFields.index("ADDRESS")].strip()
try:
currentStructureNumber = int(currentStructureNumber)
if currentStructureNumber % 2 == 0:
currentStructureNumberEO = "EVEN"
else:
currentStructureNumberEO = "ODD"
except:
currentStructureNumber = None
currentStructureNumberEO = None
if currentStructureNumber == None:
continue
addressFound = False
if currentStructureRoadName in msagDict:
for msagRow in msagDict[currentStructureRoadName]:
if (currentStructureNumber >= msagRow[0] and currentStructureNumber <= msagRow[1]):
if (msagRow[2] == "BOTH") or (msagRow[2] == currentStructureNumberEO):
addressFound = True
break
if not addressFound:
unmatchedAddresses.append((currentStructureNumber,currentStructureRoadName))
sortedAddresses = sorted(unmatchedAddresses,key=lambda x:(x[1],x[0]))
for address in sortedAddresses:
print str(address[0]) + " " + address[1] + " not found in MSAG"
print "Done"
|
from django.middleware.common import MiddlewareMixin
class ForceDefaultLanguageMiddleware(MiddlewareMixin):
"""
Ignore Accept-Language HTTP headers
This will force the I18N machinery to always choose settings.LANGUAGE_CODE
as the default initial language, unless another one is set via sessions or cookies
Should be installed *before* any middleware that checks request.META['HTTP_ACCEPT_LANGUAGE'],
namely django.middleware.locale.LocaleMiddleware
"""
def process_request(self, request):
# request.META["HTTP_ACCEPT_LANGUAGE"] ="en-US,en;q=0.5"
request.META["HTTP_ACCEPT_LANGUAGE"] ="ru-RU,ru;q=0.5"
|
"""
Basic "generic IDL4" target. Not designed to inherit from anything.
"""
from magpie.targets import base
from magpie.generator.v4generator import V4Generator
class Generator(V4Generator):
pass
class Types(object):
# Methods relating to type declaration, conversion, and marshalling
types_cpp_define_int_constants = 'types/cpp_define_int_constants.template.c'
# Given a type instance AST, output a declaration.
types_declare = 'types/declare.template.c'
types_declare_const = 'types/declare_const.template.c'
# Given a type AST, output the name of the type.
types_name = 'types/name.template.c'
types_name_basic = 'types/name_basic.template.c'
types_name_alias = 'types/name_alias.template.c'
# Given a Python value, output a C constant representing that value.
types_constant = 'types/constant.template.c'
class SharedTemplates(object):
basic_includes = 'v4_generic/basic_includes.template.c'
clientservice_function_params_maker = 'v4_generic/function_params_maker.template.c'
# Headers
header_comments = 'v4_generic/headercomments.template.c'
idl4_defines = 'v4_generic/idl4_defines.template.c'
preamble = 'v4_generic/preamble.template.c'
interface_wrapper = 'v4_generic/interface_wrapper.template.c'
imports = 'v4_generic/import.template.c'
language_specific_remapping = 'v4_generic/null_remapping.template.c'
class ClientTemplates(object):
client_function_wrapper = 'v4_generic/client_function_wrapper.template.c'
client_function_body = 'v4_generic/client_function_body.template.c'
client_function_body_pre_ipc_defs = 'helpers/null.template.c'
client_function_body_pre_ipc = 'helpers/null.template.c'
client_function_body_post_ipc = 'helpers/null.template.c'
client_function_create_id = 'v4_generic/client_function_create_id.template.c'
client_function_marshal = 'v4_generic/client_function_marshal.template.c'
client = 'v4_generic/main_client.template.h'
class ServiceTemplates(object):
service_interface_wrapper = 'v4_generic/service_interface_wrapper.template.c'
service_function = 'v4_generic/service_function.template.c'
service_function_reply = 'v4_generic/service_function_reply.template.c'
service = 'v4_generic/main_service.template.h'
class ServiceTemplateTemplates(object):
servicetemplate_create_getiid_func = 'v4_generic/servicetemplate_getiid.template.c'
servicetemplate = 'v4_generic/main_servicetemplate.template.h'
class ConstantTemplates(object):
constants = 'v4_generic/main_constants.template.c'
class Templates(base.Templates, SharedTemplates, ClientTemplates, ServiceTemplates, ServiceTemplateTemplates, ConstantTemplates, Types):
MAPPING = 'CORBA C'
types = 'v4_generic/main_types.template.h'
public = ['client', 'service', 'servicetemplate', 'types']
class DataStorage(object):
def __init__(self):
self.data_dict = {}
|
# -*- encoding: utf-8 -*-
from . import FixtureTest
class ResidentialTest(FixtureTest):
def test_z16(self):
self._check(zoom=16, expect_surface='fine_gravel')
def test_z15(self):
self._check(zoom=15, expect_surface='fine_gravel')
def test_z14(self):
self._check(zoom=14, expect_surface='unpaved')
def test_z13(self):
self._check(zoom=13, expect_surface='unpaved')
def test_z12(self):
self._check(zoom=12, expect_surface='unpaved')
def setUp(self):
FixtureTest.setUp(self)
import dsl
z, x, y = (16, 0, 0)
full_tags = {
'source': 'openstreetmap.org',
'highway': 'residential',
'surface': 'fine_gravel',
}
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), full_tags),
)
def _check(self, zoom=16, expect_surface=None):
self.assert_has_feature(
zoom, 0, 0, 'roads', {
'id': 1,
'surface': expect_surface,
})
class HighwayTest(FixtureTest):
def test_z16(self):
self._check(zoom=16, expect_surface='asphalt')
def test_z15(self):
self._check(zoom=15, expect_surface='asphalt')
def test_z14(self):
self._check(zoom=14, expect_surface='asphalt')
def test_z13(self):
self._check(zoom=13, expect_surface='asphalt')
def test_z12(self):
self._check(zoom=12, expect_surface='asphalt')
def test_z11(self):
self._check(zoom=11, expect_surface='asphalt')
def test_z10(self):
self._check(zoom=10, expect_surface='paved')
def test_z09(self):
self._check(zoom=9, expect_surface='paved')
def test_z08(self):
self._check(zoom=8, expect_surface='paved')
def setUp(self):
FixtureTest.setUp(self)
import dsl
z, x, y = (16, 0, 0)
full_tags = {
'source': 'openstreetmap.org',
'highway': 'motorway',
'surface': 'asphalt',
}
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), full_tags),
)
def _check(self, zoom=16, expect_surface=None):
self.assert_has_feature(
zoom, 0, 0, 'roads', {
'id': 1,
'surface': expect_surface,
})
|
import os
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
def load_and_scale_images(img_dir_path, extension, img_width, img_height):
images = []
for f in os.listdir(img_dir_path):
filepath = os.path.join(img_dir_path, f)
if os.path.isfile(filepath) and f.endswith(extension):
image = img_to_array(load_img(filepath, target_size=(img_width, img_height)))
image = (image.astype(np.float32) / 255) * 2 - 1
images.append(image)
return np.array(images)
|
import logging
from django.contrib import messages
from django.core.exceptions import BadRequest
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dojo.models import Product, Objects_Product
from dojo.forms import ObjectSettingsForm, DeleteObjectsSettingsForm
from dojo.utils import Product_Tab
from dojo.authorization.roles_permissions import Permissions
from dojo.authorization.authorization_decorators import user_is_authorized
logger = logging.getLogger(__name__)
@user_is_authorized(Product, Permissions.Product_Tracking_Files_Add, 'pid')
def new_object(request, pid):
prod = get_object_or_404(Product, id=pid)
if request.method == 'POST':
tform = ObjectSettingsForm(request.POST)
if tform.is_valid():
new_prod = tform.save(commit=False)
new_prod.product = prod
new_prod.save()
messages.add_message(request,
messages.SUCCESS,
'Added Tracked File to a Product',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_objects', args=(pid,)))
else:
tform = ObjectSettingsForm()
product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings")
return render(request, 'dojo/new_object.html',
{'tform': tform,
'product_tab': product_tab,
'pid': prod.id})
@user_is_authorized(Product, Permissions.Product_Tracking_Files_View, 'pid')
def view_objects(request, pid):
product = get_object_or_404(Product, id=pid)
object_queryset = Objects_Product.objects.filter(product=pid).order_by('path', 'folder', 'artifact')
product_tab = Product_Tab(product, title="Tracked Product Files, Paths and Artifacts", tab="settings")
return render(request,
'dojo/view_objects.html',
{
'object_queryset': object_queryset,
'product_tab': product_tab,
'product': product
})
@user_is_authorized(Product, Permissions.Product_Tracking_Files_Edit, 'pid')
def edit_object(request, pid, ttid):
object = Objects_Product.objects.get(pk=ttid)
product = get_object_or_404(Product, id=pid)
if object.product != product:
raise BadRequest(f'Product {pid} does not fit to product of Object {object.product.id}')
if request.method == 'POST':
tform = ObjectSettingsForm(request.POST, instance=object)
if tform.is_valid():
tform.save()
messages.add_message(request,
messages.SUCCESS,
'Tool Product Configuration Successfully Updated.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_objects', args=(pid,)))
else:
tform = ObjectSettingsForm(instance=object)
product_tab = Product_Tab(product, title="Edit Tracked Files", tab="settings")
return render(request,
'dojo/edit_object.html',
{
'tform': tform,
'product_tab': product_tab
})
@user_is_authorized(Product, Permissions.Product_Tracking_Files_Delete, 'pid')
def delete_object(request, pid, ttid):
object = Objects_Product.objects.get(pk=ttid)
product = get_object_or_404(Product, id=pid)
if object.product != product:
raise BadRequest(f'Product {pid} does not fit to product of Object {object.product.id}')
if request.method == 'POST':
tform = ObjectSettingsForm(request.POST, instance=object)
object.delete()
messages.add_message(request,
messages.SUCCESS,
'Tracked Product Files Deleted.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_objects', args=(pid,)))
else:
tform = DeleteObjectsSettingsForm(instance=object)
product_tab = Product_Tab(product, title="Delete Product Tool Configuration", tab="settings")
return render(request,
'dojo/delete_object.html',
{
'tform': tform,
'product_tab': product_tab
})
|
#!/usr/bin/env python3
"""Given a byte string, split it into rows."""
import os
import sys
def showUsage(msg):
if msg is not None: print(msg)
print("""Usage: split-bytes.py [options...] [infile] [outfile]
options:
-cN: split to N columns
-dNN: byte NN (hex) ends a row (can be used multiple times)
-h: show this message
-n: do not add spaces
If `infile` is missing or '-', read from stdin.
If `outfile` is missing or '-', write to stdout.
Non-hex digits are ignored.
""")
sys.exit(1 if msg is not None else 0)
def main():
inFile, outFile = sys.stdin, sys.stdout
rowLen, rowDelim, sep = None, set(), ' '
try:
args = list(sys.argv[1:])
while len(args) > 0 and args[0].startswith('-') and len(args[0]) > 1:
arg = args.pop(0)
c = arg[1]
if c == 'c': rowLen = int(arg[2:])
elif c == 'd': rowDelim.add(int(arg[2:], 16))
elif c == 'h': return showUsage(None)
elif c == 'n': sep = ''
else: return showUsage("Invalid option.")
except Exception as ex:
return showUsage("Invalid parameter.")
if len(args) > 0:
f = args.pop(0)
if f != '-': inFile = open(f, 'rt')
if len(args) > 0:
f = args.pop(0)
if f != '-': outFile = open(f, 'wt')
byte = ''
curLen = 0
while True:
c = inFile.read(1)
if len(c) == 0: break
if c in '0123456789abcdefABCDEF':
byte += c
if len(byte) == 2:
curLen += 1
val = int(byte, 16)
el = sep
if (val in rowDelim) or (curLen >= rowLen):
el = '\n'
curLen = 0
outFile.write(byte+el)
byte = ''
outFile.write('\n')
if __name__ == '__main__':
main()
|
import yfinance as yf
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import streamlit as st
import pandas as pd
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
Symbol = ["AAPL", "MSFT","AMZN", "FB","GOOG", "GOOGL", "TSLA", "NVDA", "PYPL", "ASML","INTC","CMCSA","NFLX","ADBE","CSCO","PEP","AVGO","TXN","PDD","TMUS","BABA","CSIQ", "XWD.TO", "EEM","HBLK.TO","BND","HTWO.MI"]
Names = ["APPLE","MICROSOFT","AMAZON","FACEBOOK","GOOGLE A","GOOGLE C","TESLA","NVIDIA","PAYPAL","ASML HOLDINGS","INTEL","COMCAST","NETFLIX","ADOBE","CISCO","PEPSI","BROADCOM","TEXAS INSTRUMENTS","PINDUODO","T-MOBILE US","ALIBABA","CANADIAN SOLAR", "ETF WORLD", "ETF EMERGENTI","ETF BLOCKCHAIN", "ETF BOND","ETF IDROGENO"]
#TITLE OF THE WEB APP
st.write("""
# Portfolio App
This app returns the average P/E of your Portfolio!
""")
st.sidebar.header('Build your Portfolio')
#SIDEBAR OF THE WEB APP. THIS TAKES THE INPUT OF THE USER(ETF AND WEIGHTS)
First_etf = st.sidebar.selectbox(
'Select a stock',
(Names)
)
First_etf_money = st.sidebar.slider(First_etf, 0, 5000, 1)
Second_etf = st.sidebar.selectbox(
'Select a stock',
(Names[1:])
)
Second_etf_money = st.sidebar.slider(Second_etf, 0, 5000, 0)
Third_etf = st.sidebar.selectbox(
'Select a stock',
Names[2:]
)
Third_etf_money = st.sidebar.slider(Third_etf, 0, 5000, 0)
Fourth_etf = st.sidebar.selectbox(
'Select a stock',
Names[3:]
)
Fourth_etf_money = st.sidebar.slider(Fourth_etf, 0, 5000, 0)
Fifth_etf = st.sidebar.selectbox(
'Select a stock',
Names[4:]
)
Fifth_etf_money = st.sidebar.slider(Fifth_etf, 0, 5000, 0)
Sixth_etf = st.sidebar.selectbox(
'Select a stock',
Names[5:]
)
Sixth_etf_money = st.sidebar.slider(Sixth_etf, 0, 5000, 0)
Seventh_etf = st.sidebar.selectbox(
'Select a stock',
Names[6:]
)
Seventh_etf_money = st.sidebar.slider(Seventh_etf, 0, 5000, 0)
Eight_etf = st.sidebar.selectbox(
'Select a stock',
Names[7:]
)
Eight_etf_money = st.sidebar.slider(Eight_etf, 0, 5000, 0)
Ninth_etf = st.sidebar.selectbox(
'Select a stock',
Names[8:]
)
Ninth_etf_money = st.sidebar.slider(Ninth_etf, 0, 5000, 0)
Tenth_etf = st.sidebar.selectbox(
'Select a stock',
Names[9:]
)
Tenth_etf_money = st.sidebar.slider(Tenth_etf, 0, 5000, 0)
Total_portfolio = First_etf_money+Second_etf_money+Third_etf_money+Fourth_etf_money+Fifth_etf_money+Sixth_etf_money+Seventh_etf_money+Eight_etf_money+Ninth_etf_money+Tenth_etf_money
First_weight = First_etf_money/Total_portfolio*100
Second_weight = Second_etf_money/Total_portfolio*100
Third_weight = Third_etf_money/Total_portfolio*100
Fourth_weight = Fourth_etf_money/Total_portfolio*100
Fifth_weight = Fifth_etf_money/Total_portfolio*100
Sixth_weight = Sixth_etf_money/Total_portfolio*100
Seventh_weight = Seventh_etf_money/Total_portfolio*100
Eight_weight = Eight_etf_money/Total_portfolio*100
Ninth_weight = Ninth_etf_money/Total_portfolio*100
Tenth_weight = Tenth_etf_money/Total_portfolio*100
res = {}
for key in Names:
for value in Symbol:
res[key] = value
Symbol.remove(value)
break
Portfolio = [First_etf,Second_etf,Third_etf,Fourth_etf,Fifth_etf,Sixth_etf,Seventh_etf,Eight_etf,Ninth_etf,Tenth_etf]
sizes = [First_weight,Second_weight,Third_weight,Fourth_weight,Fifth_weight,Sixth_weight,Seventh_weight,Eight_weight,Ninth_weight,Tenth_weight]
fig = plt.figure(figsize =(10, 7))
plt.pie(sizes, labels = Portfolio)
st.pyplot(fig)
A1 = yf.Ticker(res[First_etf])
PE1 = (A1.info['trailingPE'])
A2 = yf.Ticker(res[Second_etf])
PE2 = (A2.info['trailingPE'])
A3 = yf.Ticker(res[Third_etf])
PE3 = (A3.info['trailingPE'])
A4 = yf.Ticker(res[Fourth_etf])
PE4 = (A4.info['trailingPE'])
A5 = yf.Ticker(res[Fifth_etf])
PE5 = (A5.info['trailingPE'])
A6 = yf.Ticker(res[Sixth_etf])
PE6 = (A6.info['trailingPE'])
A7 = yf.Ticker(res[Seventh_etf])
PE7 = (A7.info['trailingPE'])
A8 = yf.Ticker(res[Eight_etf])
PE8 = (A8.info['trailingPE'])
A9 = yf.Ticker(res[Ninth_etf])
PE9 = (A9.info['trailingPE'])
A10 = yf.Ticker(res[Tenth_etf])
PE10 = (A10.info['trailingPE'])
Average_PE = (PE1 * First_weight/100) +(PE2*Second_weight/100) +(PE3 * Third_weight/100) + (PE4*Fourth_weight/100) +(PE5*Fifth_weight/100) + (PE6*Sixth_weight/100) + (PE7*Seventh_weight/100) + (PE8*Eight_weight/100) + (PE9*Ninth_weight/100) + (PE10*Tenth_weight/100)
st.write("Your average P/E is " + str(Average_PE))
st.write("The P/E of the SP500 is 15.98 ")
st.write(PE1,PE2,PE3,PE4,PE5,PE6,PE7,PE8,PE9,PE10)
|
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
"""
Enum classes to represent cache policies are defined here.
"""
from enum import IntEnum
class associativity(IntEnum):
""" Enum class to represent associativity. """
DIRECT = 0
N_WAY = 1
FULLY = 2
def __str__(self):
if self == associativity.DIRECT:
return "Direct-mapped"
if self == associativity.N_WAY:
return "N-way Set Associative"
if self == associativity.FULLY:
return "Fully Associative"
class replacement_policy(IntEnum):
""" Enum class to represent replacement policies. """
NONE = 0
FIFO = 1
LRU = 2
RANDOM = 3
def __str__(self):
return self.name.lower()
def upper(self):
return self.name
def long_name(self):
""" Get the long name of the replacement policy. """
if self == replacement_policy.NONE:
return "None"
if self == replacement_policy.FIFO:
return "First In First Out"
if self == replacement_policy.LRU:
return "Least Recently Used"
if self == replacement_policy.RANDOM:
return "Random"
def has_sram_array(self):
""" Return True if the replacement policy needs a separate SRAM array. """
return self not in [
replacement_policy.NONE,
replacement_policy.RANDOM
]
@staticmethod
def get_value(name):
""" Get the replacement policy enum value. """
if name is None:
return replacement_policy.NONE
for k, v in replacement_policy.__members__.items():
if name.upper() == k:
return v
class write_policy(IntEnum):
""" Enum class to represent write policies. """
WRITE_BACK = 0
WRITE_THROUGH = 1
def __str__(self):
return self.name.lower()
def upper(self):
return self.name
def long_name(self):
""" Get the long name of the write policy. """
if self == write_policy.WRITE_BACK:
return "Write-back"
if self == write_policy.WRITE_THROUGH:
return "Write-through"
@staticmethod
def get_value(name):
""" Get the write policy enum value. """
if name is None or name == "write-back":
return write_policy.WRITE_BACK
if name == "write-through":
return write_policy.WRITE_THROUGH
|
from PIL import Image
import math
def get_world_coord(img_depth, subj_coordinates, obj_coordinates):
width, height = img_depth.size
uo = width/2
vo = height/2
#FOV (90, 59) given by the depth camera settings
a = 90*math.pi/180
b = 59*math.pi/180
fx = uo / math.tan(a/2)
fy = vo / math.tan(b/2)
#Get the pixel coordinates of the objects that we want to get their real world coords
xs,ys = subj_coordinates
xo,yo = obj_coordinates
#Swift the 0,0 from top left corner to center of the picture (where the camera is)
xss= xs - uo
yss= ys -vo
xoo= xo - uo
yoo= yo - vo
#Get depth of pixels
Zs = img_depth.getpixel(subj_coordinates)
Zo = img_depth.getpixel(obj_coordinates)
#calculate real world coordinates
Xs = (Zs*xss) / fx
Ys = -(Zs*yss) / fy
Xo = (Zo*xoo) / fx
Yo = -(Zo*yoo) / fy
#The final coords
A = (Xs,Ys,Zs)
B = (Xo,Yo,Zo)
#Calculate the distance between the two points in the real world
D = math.sqrt((Xs-Xo)**2 + (Ys-Yo)**2 + (Zs-Zo)**2)
#This function returns Distance, Subject_coords, Object_coords
return D , A, B
#A function getting the depth value from the depthmap
def get_Z(img_depth, coordinates):
z = img_depth.getpixel(coordinates)
return z
|
import logging
import numpy
from data_models.memory_data_models import Image, GainTable, Visibility, SkyModel
from processing_library.image.operations import copy_image
from workflows.serial.imaging.imaging_serial import predict_list_serial_workflow, invert_list_serial_workflow
from wrappers.arlexecute.visibility.base import copy_visibility
from wrappers.arlexecute.calibration.operations import apply_gaintable
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from wrappers.arlexecute.imaging.base import predict_skycomponent_visibility
from wrappers.arlexecute.skycomponent.base import copy_skycomponent
from wrappers.arlexecute.skycomponent.operations import apply_beam_to_skycomponent
from wrappers.arlexecute.visibility.coalesce import convert_blockvisibility_to_visibility, \
convert_visibility_to_blockvisibility
log = logging.getLogger(__name__)
def predict_skymodel_list_arlexecute_workflow(obsvis, skymodel_list, context, vis_slices=1, facets=1,
gcfcf=None, docal=False, **kwargs):
"""Predict from a list of skymodels, producing one visibility per skymodel
:param obsvis: "Observed Visibility"
:param skymodel_list: skymodel list
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (per axis)
:param context: Type of processing e.g. 2d, wstack, timeslice or facets
:param gcfcg: tuple containing grid correction and convolution function
:param docal: Apply calibration table in skymodel
:param kwargs: Parameters for functions in components
:return: List of vis_lists
"""
def ft_cal_sm(ov, sm):
assert isinstance(ov, Visibility), ov
assert isinstance(sm, SkyModel), sm
v = copy_visibility(ov)
v.data['vis'][...] = 0.0 + 0.0j
if len(sm.components) > 0:
if isinstance(sm.mask, Image):
comps = copy_skycomponent(sm.components)
comps = apply_beam_to_skycomponent(comps, sm.mask)
v = predict_skycomponent_visibility(v, comps)
else:
v = predict_skycomponent_visibility(v, sm.components)
if isinstance(sm.image, Image):
if numpy.max(numpy.abs(sm.image.data)) > 0.0:
if isinstance(sm.mask, Image):
model = copy_image(sm.image)
model.data *= sm.mask.data
else:
model = sm.image
v = predict_list_serial_workflow([v], [model], context=context,
vis_slices=vis_slices, facets=facets, gcfcf=gcfcf,
**kwargs)[0]
if docal and isinstance(sm.gaintable, GainTable):
bv = convert_visibility_to_blockvisibility(v)
bv = apply_gaintable(bv, sm.gaintable, inverse=True)
v = convert_blockvisibility_to_visibility(bv)
return v
return [arlexecute.execute(ft_cal_sm, nout=1)(obsvis, sm) for sm in skymodel_list]
def invert_skymodel_list_arlexecute_workflow(vis_list, skymodel_list, context, vis_slices=1, facets=1,
gcfcf=None, docal=False, **kwargs):
"""Calibrate and invert from a skymodel, iterating over the skymodel
The visibility and image are scattered, the visibility is predicted and calibrated on each part, and then the
parts are assembled. The mask if present, is multiplied in at the end.
:param vis_list: List of Visibility data models
:param skymodel_list: skymodel list
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (per axis)
:param context: Type of processing e.g. 2d, wstack, timeslice or facets
:param gcfcg: tuple containing grid correction and convolution function
:param docal: Apply calibration table in skymodel
:param kwargs: Parameters for functions in components
:return: List of (image, weight) tuples)
"""
def ift_ical_sm(v, sm):
assert isinstance(v, Visibility), v
assert isinstance(sm.image, Image), sm.image
if docal and isinstance(sm.gaintable, GainTable):
bv = convert_visibility_to_blockvisibility(v)
bv = apply_gaintable(bv, sm.gaintable)
v = convert_blockvisibility_to_visibility(bv)
result = invert_list_serial_workflow([v], [sm.image], context=context,
vis_slices=vis_slices, facets=facets, gcfcf=gcfcf,
**kwargs)[0]
if isinstance(sm.mask, Image):
result[0].data *= sm.mask.data
return result
return [arlexecute.execute(ift_ical_sm, nout=1)(vis_list[i], sm) for i, sm in enumerate(skymodel_list)]
def crosssubtract_datamodels_skymodel_list_arlexecute_workflow(obsvis, modelvis_list):
"""Form data models by subtracting sum from the observed and adding back each model in turn
vmodel[p] = vobs - sum(i!=p) modelvis[i]
This is the E step in the Expectation-Maximisation algorithm.
:param obsvis: "Observed" visibility
:param modelvis_list: List of Visibility data model predictions
:return: List of (image, weight) tuples)
"""
# Now do the meaty part. We probably want to refactor this for performance once it works.
def vsum(ov, mv):
# Observed vis minus the sum of all predictions
verr = copy_visibility(ov)
for m in mv:
verr.data['vis'] -= m.data['vis']
# Now add back each model in turn
result = list()
for m in mv:
vr = copy_visibility(verr)
vr.data['vis'] += m.data['vis']
result.append(vr)
assert len(result) == len(mv)
return result
return arlexecute.execute(vsum, nout=len(modelvis_list))(obsvis, modelvis_list)
def convolve_skymodel_list_arlexecute_workflow(obsvis, skymodel_list, context, vis_slices=1, facets=1,
gcfcf=None, **kwargs):
"""Form residual image from observed visibility and a set of skymodel without calibration
This is similar to convolving the skymodel images with the PSF
:param vis_list: List of Visibility data models
:param skymodel_list: skymodel list
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (per axis)
:param context: Type of processing e.g. 2d, wstack, timeslice or facets
:param gcfcg: tuple containing grid correction and convolution function
:param docal: Apply calibration table in skymodel
:param kwargs: Parameters for functions in components
:return: List of (image, weight) tuples)
"""
def ft_ift_sm(ov, sm):
assert isinstance(ov, Visibility), ov
v = copy_visibility(ov)
v.data['vis'][...] = 0.0 + 0.0j
if len(sm.components) > 0:
if isinstance(sm.mask, Image):
comps = copy_skycomponent(sm.components)
comps = apply_beam_to_skycomponent(comps, sm.mask)
v = predict_skycomponent_visibility(v, comps)
else:
v = predict_skycomponent_visibility(v, sm.components)
if isinstance(sm.image, Image):
if numpy.max(numpy.abs(sm.image.data)) > 0.0:
if isinstance(sm.mask, Image):
model = copy_image(sm.image)
model.data *= sm.mask.data
else:
model = sm.image
v = predict_list_serial_workflow([v], [model], context=context,
vis_slices=vis_slices, facets=facets, gcfcf=gcfcf,
**kwargs)[0]
assert isinstance(sm.image, Image), sm.image
result = invert_list_serial_workflow([v], [sm.image], context=context,
vis_slices=vis_slices, facets=facets, gcfcf=gcfcf,
**kwargs)[0]
if isinstance(sm.mask, Image):
result[0].data *= sm.mask.data
return result
return [arlexecute.execute(ft_ift_sm, nout=len(skymodel_list))(obsvis, sm) for sm in skymodel_list]
|
# This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Helper method for creating database objects."""
from passlib.hash import pbkdf2_sha256
from typing import Tuple
import os
from flowserv.model.base import GroupObject, RunObject, User, WorkflowObject
from flowserv.model.database import DB
from flowserv.model.files.base import FileStore
from flowserv.model.group import WorkflowGroupManager
from flowserv.model.run import RunManager
from flowserv.model.template.parameter import ParameterIndex
import flowserv.model.workflow.state as st
import flowserv.util as util
def create_group(session, workflow_id, users):
"""Create a new workflow group in the database. Expects a workflow
identifier and a list of user identifier. Returns the identifier for the
created group.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
workflow_id: string
Unique workflow identifier.
users: list
List of unique user identifier.
Returns
-------
string
"""
group_id = util.get_unique_identifier()
group = GroupObject(
group_id=group_id,
workflow_id=workflow_id,
name=group_id,
owner_id=users[0],
parameters=ParameterIndex(),
workflow_spec=dict()
)
# Add users as group members.
for user_id in users:
user = session.query(User).filter(User.user_id == user_id).one()
group.members.append(user)
session.add(group)
return group_id
def create_run(session, workflow_id, group_id):
"""Create a new group run. Returns the run identifier.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
workflow_id: string
Unique workflow identifier.
group_id: string
Unique group identifier.
Returns
-------
string
"""
run_id = util.get_unique_identifier()
run = RunObject(
run_id=run_id,
workflow_id=workflow_id,
group_id=group_id,
state_type=st.STATE_PENDING
)
session.add(run)
return run_id
def create_user(session, active=True):
"""Create a new user in the database. User identifier, name and password
are all the same UUID. Returns the user identifier.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
active: bool, default=True
User activation flag.
Returns
-------
string
"""
user_id = util.get_unique_identifier()
user = User(
user_id=user_id,
name=user_id,
secret=pbkdf2_sha256.hash(user_id),
active=active
)
session.add(user)
return user_id
def create_workflow(session, workflow_spec=dict(), result_schema=None):
"""Create a new workflow handle for a given workflow specification. Returns
the workflow identifier.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
workflow_spec: dict, default=dict()
Optional workflow specification.
result_schema: dict, default=None
Optional result schema.
Returns
-------
string
"""
workflow_id = util.get_unique_identifier()
workflow = WorkflowObject(
workflow_id=workflow_id,
name=workflow_id,
workflow_spec=workflow_spec,
result_schema=result_schema
)
session.add(workflow)
return workflow_id
def success_run(database: DB, fs: FileStore, basedir: str) -> Tuple[str, str, str, str]:
"""Create a successful run with two result files:
- A.json
- run/results/B.json
Returns the identifier of the created workflow, group, run, and user.
"""
# Setup temporary run folder.
tmprundir = os.path.join(basedir, 'tmprun')
tmpresultsdir = os.path.join(tmprundir, 'run', 'results')
os.makedirs(tmprundir)
os.makedirs(tmpresultsdir)
f1 = os.path.join(tmprundir, 'A.json')
util.write_object(f1, {'A': 1})
f2 = os.path.join(tmpresultsdir, 'B.json')
util.write_object(f2, {'B': 1})
with database.session() as session:
user_id = create_user(session, active=True)
workflow_id = create_workflow(session)
group_id = create_group(session, workflow_id, users=[user_id])
groups = WorkflowGroupManager(session=session, fs=fs)
runs = RunManager(session=session, fs=fs)
run = runs.create_run(group=groups.get_group(group_id))
run_id = run.run_id
state = run.state()
runs.update_run(
run_id,
state.start().success(files=['A.json', 'run/results/B.json']),
rundir=tmprundir
)
assert not os.path.exists(tmprundir)
return workflow_id, group_id, run_id, user_id
|
__author__ = 'Serhii Sheiko'
# MIT licence
import subprocess
import sys
# vnstat -tr 3 -i eth0
base_command = ['vnstat']
def help_message():
help_msg = '''Interface usage plugin.
Requirements: vnstat
params:
-i - interface
-t - time for checking in seconds
-rx - limitations for input trafic
-tx - limitations for output trafic
for limitations warning and critical edge sepatated by comma. ex: 10,20
example:
iface_usage.py -i eth0 -t 3 -rx 10,20 -tx 10,20'''
print()
def get_parameters():
all_params = sys.argv
run_params = []
check_params = {}
pass_iteration = False
for i in range(0, len(all_params)):
if pass_iteration == True:
pass_iteration = False
continue
if all_params[i] == '-i':
#run_params.append('-i ' + all_params[i+1])
run_params.append('-i')
run_params.append(all_params[i+1])
pass_iteration = True
elif all_params[i] == '-t':
#run_params.append('-tr ' + all_params[i+1])
run_params.append('-tr')
run_params.append(all_params[i+1])
pass_iteration = True
elif all_params[i] == '-rx':
check_params['rx'] = all_params[i+1]
pass_iteration = True
elif all_params[i] == '-tx':
check_params['tx'] = all_params[i+1]
pass_iteration = True
return run_params, check_params
def get_output(run_params):
run_command = base_command + run_params
p = subprocess.Popen(run_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out_data, out_err = p.communicate()
if out_err is not None:
print(out_err)
sys.exit(128)
splited_data = out_data.splitlines()
result_data = {}
for line in splited_data:
if len(line.split())>1:
if line.split()[0] == 'rx':
if line.split()[2] == 'Mbit/s':
result_data['rx'] = float(line.split()[1].replace(',', '.')) * 1024
else:
result_data['rx'] = float(line.split()[1].replace(',', '.'))
elif line.split()[0] == 'tx':
if line.split()[2] == 'Mbit/s':
result_data['tx'] = float(line.split()[1].replace(',', '.')) * 1024
else:
result_data['tx'] = float(line.split()[1].replace(',', '.'))
return result_data
def check_data(params_in):
run_params, check_params = params_in
output_data = get_output(run_params)
result_string = 'Bandwitch usage: '
error_code = 0
for c_param in check_params:
if not output_data.has_key(c_param):
result_string = result_string + c_param + ' is unknown parameter; '
continue
control_data = check_params[c_param].split(',')
warn_level = float(control_data[0])
critical_level = float(control_data[1])
result_data = float(output_data[c_param])
if result_data > warn_level and result_data < critical_level:
result_string = result_string + c_param + ': ' + str(result_data) + ' kbit/s warning; '
if 1 > error_code:
error_code = 1
elif result_data > critical_level:
result_string = result_string + c_param + ': ' + str(result_data) + ' kbit/s critical; '
if 2 > error_code:
error_code = 2
else:
result_string = result_string + c_param + ': ' + str(result_data) + ' kbit/s OK; '
print(result_string)
sys.exit(error_code)
check_data(get_parameters())
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common Google Storage interface library."""
from __future__ import print_function
import base64
import errno
import os
import re
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib.paygen import filelib
from chromite.lib.paygen import utils
PROTOCOL = 'gs'
RETRY_ATTEMPTS = 2
GS_LS_STATUS_RE = re.compile(r'status=(\d+)')
# Gsutil is filled in by "FindGsUtil" on first invocation.
GSUTIL = None
def FindGsUtil():
"""Find which gsutil executuable to use.
This may download and cache the command if needed, and will return the
version pinned by chromite for general use. Will cache the result after
the first call.
This function is multi-process safe, but NOT THREAD SAFE. If you need
to use gsutil functionality in threads, call this function at least
once before creating the threads. That way the value will be safely
pre-cached.
Returns:
Full path to the gsutil command to use.
"""
# TODO(dgarrett): This is a hack. Merge chromite and crostools to fix.
global GSUTIL # pylint: disable=global-statement
if GSUTIL is None:
GSUTIL = gs.GSContext.GetDefaultGSUtilBin()
return GSUTIL
class GsutilError(Exception):
"""Base exception for errors where gsutil cannot be used for any reason."""
class GsutilMissingError(GsutilError):
"""Returned when the gsutil utility is missing from PATH."""
def __init__(self, msg='The gsutil utility must be installed.'):
GsutilError.__init__(self, msg)
class GSLibError(Exception):
"""Raised when gsutil command runs but gives an error."""
class CopyFail(GSLibError):
"""Raised if Copy fails in any way."""
class MoveFail(GSLibError):
"""Raised if Move fails in any way."""
class RemoveFail(GSLibError):
"""Raised if Remove fails in any way."""
class CatFail(GSLibError):
"""Raised if Cat fails in any way."""
class StatFail(GSLibError):
"""Raised if Stat fails in any way."""
class URIError(GSLibError):
"""Raised when URI does not behave as expected."""
class ValidateGsutilFailure(GSLibError):
"""We are unable to validate that gsutil is working correctly."""
def RetryGSLib(func):
"""Decorator to retry function calls that throw an exception.
If the decorated method throws a GSLibError exception, the exception
will be thrown away and the function will be run again until all retries
are exhausted. On the final attempt, the exception will be thrown normally.
Three attempts in total will be made to run the function (one more
than RETRY_ATTEMPTS).
@RetryGSLib
def MyFunctionHere(): pass
"""
def RetryHandler(*args, **kwargs):
"""Retry func with given args/kwargs RETRY_ATTEMPTS times."""
warning_msgs = []
for i in xrange(0, RETRY_ATTEMPTS + 1):
try:
return func(*args, **kwargs)
except GSLibError as ex:
# On the last try just pass the exception on up.
if i >= RETRY_ATTEMPTS:
raise
error_msg = str(ex)
RESUMABLE_ERROR_MESSAGE = (
gs.GSContext.RESUMABLE_DOWNLOAD_ERROR,
gs.GSContext.RESUMABLE_UPLOAD_ERROR,
'ResumableUploadException',
'ResumableDownloadException',
'ssl.SSLError: The read operation timed out',
)
if (func.__name__ == 'Copy' and
any(x in error_msg for x in RESUMABLE_ERROR_MESSAGE)):
logging.info(
'Resumable download/upload exception occured for %s', args[1])
# Pass the dest_path to get the tracker filename.
tracker_filenames = gs.GSContext.GetTrackerFilenames(args[1])
# This part of the code is copied from chromite.lib.gs with
# slight modifications. This is a temporary solution until
# we can deprecate crostools.lib.gslib (crbug.com/322740).
logging.info('Potential list of tracker files: %s',
tracker_filenames)
for tracker_filename in tracker_filenames:
tracker_file_path = os.path.join(
gs.GSContext.DEFAULT_GSUTIL_TRACKER_DIR,
tracker_filename)
if os.path.exists(tracker_file_path):
logging.info('Deleting gsutil tracker file %s before retrying.',
tracker_file_path)
logging.info('The content of the tracker file: %s',
osutils.ReadFile(tracker_file_path))
osutils.SafeUnlink(tracker_file_path)
else:
if 'AccessDeniedException' in str(ex) or 'NoSuchKey' in str(ex):
raise
# Record a warning message to be issued if a retry actually helps.
warning_msgs.append('Try %d failed with error message:\n%s' %
(i + 1, ex))
else:
# If the func succeeded, then log any accumulated warning messages.
if warning_msgs:
logging.warning('Failed %s %d times before success:\n%s',
func.__name__, len(warning_msgs),
'\n'.join(warning_msgs))
RetryHandler.__module__ = func.__module__
RetryHandler.__name__ = func.__name__
RetryHandler.__doc__ = func.__doc__
return RetryHandler
def RunGsutilCommand(args,
redirect_stdout=True,
redirect_stderr=True,
failed_exception=GSLibError,
generation=None,
headers=None,
get_headers_from_stdout=False,
**kwargs):
"""Run gsutil with given args through RunCommand with given options.
Generally this method is intended for use within this module, see the various
command-specific wrappers provided for convenience. However, it can be called
directly if 'gsutil' needs to be called in specific way.
A few of the options for RunCommand have their default values switched for
this function. Those options are called out explicitly as options here, while
addition RunCommand options can be used through extra_run_command_opts.
Args:
args: List of arguments to use with 'gsutil'.
redirect_stdout: Boolean option passed directly to RunCommand.
redirect_stderr: Boolean option passed directly to RunCommand.
failed_exception: Exception class to raise if CommandFailedException is
caught. It should be GSLibError or a subclass.
generation: Only run the specified command if the generation matches.
(See "Conditional Updates Using Object Versioning" in the gsutil docs.)
headers: Fill in this dictionary with header values captured from stderr.
get_headers_from_stdout: Whether header information is to be parsed from
stdout (default: stderr).
kwargs: Additional options to pass directly to RunCommand, beyond the
explicit ones above. See RunCommand itself.
Returns:
Anything that RunCommand returns, which should be a CommandResult object.
Raises:
GsutilMissingError is the gsutil utility cannot be found.
GSLibError (or whatever is in failed_exception) if RunCommand failed (and
error_code_ok was not True).
"""
# The -d flag causes gsutil to dump various metadata, including user
# credentials. We therefore don't allow users to pass it in directly.
assert '-d' not in args, 'Cannot pass in the -d flag directly'
gsutil = FindGsUtil()
if generation is not None:
args = ['-h', 'x-goog-if-generation-match:%s' % generation] + args
if headers is not None:
args.insert(0, '-d')
assert redirect_stderr
cmd = [gsutil] + args
run_opts = {
'redirect_stdout': redirect_stdout,
'redirect_stderr': redirect_stderr,
}
run_opts.update(kwargs)
try:
result = cros_build_lib.RunCommand(cmd, **run_opts)
except OSError as e:
if e.errno == errno.ENOENT:
raise GsutilMissingError()
raise
except cros_build_lib.RunCommandError as e:
# If headers is set, we have to hide the output here because it may contain
# credentials that we don't want to show in buildbot logs.
raise failed_exception('%r failed' % cmd if headers else e.result.error)
if headers is not None and result is not None:
assert redirect_stdout if get_headers_from_stdout else redirect_stderr
# Parse headers that look like this:
# header: x-goog-generation: 1359148994758000
# header: x-goog-metageneration: 1
headers_source = result.output if get_headers_from_stdout else result.error
for line in headers_source.splitlines():
if line.startswith('header: '):
header, _, value = line.partition(': ')[-1].partition(': ')
headers[header.replace('x-goog-', '')] = value
# Strip out stderr entirely to avoid showing credentials in logs; for
# commands that dump credentials to stdout, clobber that as well.
result.error = '<stripped>'
if get_headers_from_stdout:
result.output = '<stripped>'
return result
def ValidateGsutilWorking(bucket):
"""Validate that gsutil is working correctly.
There is a failure mode for gsutil in which all operations fail, and this
is indistinguishable from all gsutil ls operations matching nothing. We
check that there is at least one file in the root of the bucket.
Args:
bucket: bucket we are about to test.
Raises:
ValidateGsutilFailure: If we are unable to find any files in the bucket.
"""
url = 'gs://%s/' % bucket
if not List(url):
raise ValidateGsutilFailure('Unable to find anything in: %s' % url)
@RetryGSLib
def MD5Sum(gs_uri):
"""Read the gsutil md5 sum from etag and gsutil ls -L.
Note that because this relies on 'gsutil ls -L' it suffers from the
eventual consistency issue, meaning this function could fail to find
the MD5 value for a recently created file in Google Storage.
Args:
gs_uri: An absolute Google Storage URI that refers directly to an object.
No globs are supported.
Returns:
A string that is an md5sum, or None if no object found.
Raises:
GSLibError if the gsutil command fails. If there is no object at that path
that is not considered a failure.
"""
gs_md5_regex = re.compile(r'.*?Hash \(md5\):\s+(.*)', re.IGNORECASE)
args = ['ls', '-L', gs_uri]
result = RunGsutilCommand(args, error_code_ok=True)
# If object was not found then output is completely empty.
if not result.output:
return None
for line in result.output.splitlines():
match = gs_md5_regex.match(line)
if match:
# gsutil now prints the MD5 sum in base64, but we want it in hex.
return base64.b16encode(base64.b64decode(match.group(1))).lower()
# This means there was some actual failure in the command.
raise GSLibError('Unable to determine MD5Sum for %r' % gs_uri)
@RetryGSLib
def Cmp(path1, path2):
"""Return True if paths hold identical files, according to MD5 sum.
Note that this function relies on MD5Sum, which means it also can only
promise eventual consistency. A recently uploaded file in Google Storage
may behave badly in this comparison function.
If either file is missing then always return False.
Args:
path1: URI to a file. Local paths also supported.
path2: URI to a file. Local paths also supported.
Returns:
True if files are the same, False otherwise.
"""
md5_1 = MD5Sum(path1) if IsGsURI(path1) else filelib.MD5Sum(path1)
if not md5_1:
return False
md5_2 = MD5Sum(path2) if IsGsURI(path2) else filelib.MD5Sum(path2)
return md5_1 == md5_2
@RetryGSLib
def Copy(src_path, dest_path, acl=None, **kwargs):
"""Run gsutil cp src_path dest_path supporting GS globs.
e.g.
gsutil cp /etc/* gs://etc/ where /etc/* is src_path with a glob and
gs://etc is dest_path.
This assumes that the src or dest path already exist.
Args:
src_path: The src of the path to copy, either a /unix/path or gs:// uri.
dest_path: The dest of the path to copy, either a /unix/path or gs:// uri.
acl: an ACL argument (predefined name or XML file) to pass to gsutil
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
CopyFail: If the copy fails for any reason.
"""
args = ['cp']
if acl:
args += ['-a', acl]
args += [src_path, dest_path]
RunGsutilCommand(args, failed_exception=CopyFail, **kwargs)
@RetryGSLib
def Move(src_path, dest_path, **kwargs):
"""Run gsutil mv src_path dest_path supporting GS globs.
Note that the created time is changed to now for the moved object(s).
Args:
src_path: The src of the path to move, either a /unix/path or gs:// uri.
dest_path: The dest of the path to move, either a /unix/path or gs:// uri.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
MoveFail: If the move fails for any reason.
"""
args = ['mv', src_path, dest_path]
RunGsutilCommand(args, failed_exception=MoveFail, **kwargs)
@RetryGSLib
def Remove(*paths, **kwargs): # pylint: disable=docstring-misnamed-args
"""Run gsutil rm on path supporting GS globs.
Args:
paths: Local path or gs URI, or list of same.
ignore_no_match: If True, then do not complain if anything was not
removed because no URI match was found. Like rm -f. Defaults to False.
recurse: Remove recursively starting at path. Same as rm -R. Defaults
to False.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
RemoveFail: If the remove fails for any reason.
"""
ignore_no_match = kwargs.pop('ignore_no_match', False)
recurse = kwargs.pop('recurse', False)
args = ['rm']
if recurse:
args.append('-R')
args.extend(paths)
try:
RunGsutilCommand(args, failed_exception=RemoveFail, **kwargs)
except RemoveFail as e:
should_raise = True
msg = str(e.args[0])
# Sometimes Google Storage glitches and complains about failing to remove a
# specific revision of the file. It ends up getting removed anyway, but it
# throws a NotFoundException.
if (ignore_no_match and (('No URLs matched' in msg) or
('NotFoundException:' in msg))):
should_raise = False
if should_raise:
raise
def RemoveDirContents(gs_dir_uri):
"""Remove all contents of a directory.
Args:
gs_dir_uri: directory to delete contents of.
"""
Remove(os.path.join(gs_dir_uri, '**'), ignore_no_match=True)
def CreateWithContents(gs_uri, contents, **kwargs):
"""Creates the specified file with specified contents.
Args:
gs_uri: The URI of a file on Google Storage.
contents: Contents to write to the file.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
CopyFail: If it fails for any reason.
"""
with utils.CreateTempFileWithContents(contents) as content_file:
Copy(content_file.name, gs_uri, **kwargs)
@RetryGSLib
def Cat(gs_uri, **kwargs):
"""Return the contents of a file at the given GS URI
Args:
gs_uri: The URI of a file on Google Storage.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
CatFail: If the cat fails for any reason.
"""
args = ['cat', gs_uri]
result = RunGsutilCommand(args, failed_exception=CatFail, **kwargs)
return result.output
def Stat(gs_uri, **kwargs):
"""Stats a file at the given GS URI (returns nothing).
Args:
gs_uri: The URI of a file on Google Storage.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Raises:
StatFail: If the stat fails for any reason.
"""
args = ['stat', gs_uri]
# IMPORTANT! With stat, header information is dumped to standard output,
# rather than standard error, as with other gsutil commands. Hence,
# get_headers_from_stdout must be True to ensure both correct parsing of
# output and stripping of sensitive information.
RunGsutilCommand(args, failed_exception=StatFail,
get_headers_from_stdout=True, **kwargs)
def IsGsURI(path):
"""Returns true if the path begins with gs://
Args:
path: An absolute Google Storage URI.
Returns:
True if path is really a google storage uri that begins with gs://
False otherwise.
"""
return path and path.startswith(PROTOCOL + '://')
# TODO(mtennant): Rename this "Size" for consistency.
@RetryGSLib
def FileSize(gs_uri, **kwargs):
"""Return the size of the given gsutil file in bytes.
Args:
gs_uri: Google Storage URI (beginning with 'gs://') pointing
directly to a single file.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Returns:
Size of file in bytes.
Raises:
URIError: Raised when URI is unknown to Google Storage or when
URI matches more than one file.
"""
headers = {}
try:
Stat(gs_uri, headers=headers, **kwargs)
except StatFail as e:
raise URIError('Unable to stat file at URI %r: %s' % (gs_uri, e))
size_str = headers.get('stored-content-length')
if size_str is None:
raise URIError('Failed to get size of %r' % gs_uri)
return int(size_str)
def Exists(gs_uri, **kwargs):
"""Return True if object exists at given GS URI.
Args:
gs_uri: Google Storage URI. Must be a fully-specified URI with
no glob expression. Even if a glob expression matches this
method will return False.
kwargs: Additional options to pass directly to RunGsutilCommand, beyond the
explicit ones above. See RunGsutilCommand itself.
Returns:
True if gs_uri points to an existing object, and False otherwise.
"""
try:
Stat(gs_uri, **kwargs)
except StatFail:
return False
return True
@RetryGSLib
def List(root_uri, recurse=False, filepattern=None, sort=False):
"""Return list of file and directory paths under given root URI.
Args:
root_uri: e.g. gs://foo/bar
recurse: Look in subdirectories, as well
filepattern: glob pattern to match against basename of path
sort: If True then do a default sort on paths
Returns:
List of GS URIs to paths that matched
"""
gs_uri = root_uri
if recurse:
# In gs file patterns '**' absorbs any number of directory names,
# including none.
gs_uri = gs_uri.rstrip('/') + '/**'
# Now match the filename itself at the end of the URI.
if filepattern:
gs_uri = gs_uri.rstrip('/') + '/' + filepattern
args = ['ls', gs_uri]
try:
result = RunGsutilCommand(args)
paths = [path for path in result.output.splitlines() if path]
if sort:
paths = sorted(paths)
return paths
except GSLibError as e:
# The ls command will fail under normal operation if there was just
# nothing to be found. That shows up like this to stderr:
# CommandException: One or more URLs matched no objects.
if 'CommandException: One or more URLs matched no objects.' not in str(e):
raise
# Otherwise, assume a normal error.
# TODO(mtennant): It would be more functionally correct to return this
# if and only if the error is identified as a "file not found" error.
# We simply have to determine how to do that reliably.
return []
def ListFiles(root_uri, recurse=False, filepattern=None, sort=False):
"""Return list of file paths under given root URI.
Directories are intentionally excluded.
Args:
root_uri: e.g. gs://foo/bar
recurse: Look for files in subdirectories, as well
filepattern: glob pattern to match against basename of file
sort: If True then do a default sort on paths
Returns:
List of GS URIs to files that matched
"""
paths = List(root_uri, recurse=recurse, filepattern=filepattern, sort=sort)
# Directory paths should be excluded from output, per ListFiles guarantee.
return [path for path in paths if not path.endswith('/')]
|
import os
from flask import send_from_directory
from metric.app import ROOTPATH
from metric.app.resource import Resource
class Public(Resource):
def get(self, d, f):
"""
____This resource get will only get the public with 1 depth path only____
@param d: directory public you wanted to access
@param f: the file you wanted to accessed
@return: the file from directory
"""
return send_from_directory(os.path.join(ROOTPATH, 'public', d), f)
|
class Scope:
def __init__ (self, parent=None):
self.parent = parent
self.elements = dict()
def get_element (self, name, type, current=False):
r = self.elements.get(type, None)
if r != None:
r = r.get(name, None)
if r == None and self.parent != None and not current:
return self.parent.get_element(name, type)
else:
return r
def add_element (self, obj):
_type = type(obj)
if _type not in self.elements:
self.elements[_type] = dict()
self.elements[_type][obj.name] = obj
def subscope (self):
return Scope(self)
def topscope (self):
return self.parent
|
"""https://www.codewars.com/kata/576b93db1129fcf2200001e6"""
def sum_array(arr):
"""Returns sum of list without highest and lowest vlues"""
return 0 if arr is None or arr == [] else sum(sorted(arr)[1:-1])
def test_cases():
"""Sample test cases"""
assert sum_array(None) == 0
assert sum_array([]) == 0
assert sum_array([3]) == 0
assert sum_array([-3]) == 0
assert sum_array([ 3, 5]) == 0
assert sum_array([-3, -5]) == 0
assert sum_array([6, 2, 1, 8, 10]) == 16
assert sum_array([6, 0, 1, 10, 10]) == 17
assert sum_array([-6, -20, -1, -10, -12]) == -28
assert sum_array([-6, 20, -1, 10, -12]) == 3
print("Test Success!")
test_cases()
|
import json
from os import path
from pathlib import Path
class CommonPath:
_instance = None
def __init__(self):
self.PROJECT_ABS_PATH = path.abspath(Path(__file__).parent.parent.absolute())
self.CONFIG_ABS_PATH = self.PROJECT_ABS_PATH + "/config.json"
with open(self.CONFIG_ABS_PATH, "r") as json_file:
content = json.load(json_file)
self.DB_CONFIG_ABS_PATH = self.PROJECT_ABS_PATH + "/" + content["DB_CONFIG_PATH"]
self.APP_CONFIG_ABS_PATH = self.PROJECT_ABS_PATH + "/" + content["APP_CONFIG_PATH"]
self.UPLOAD_CONFIG_ABS_PATH = self.PROJECT_ABS_PATH + "/" + content["UPLOAD_CONFIG_PATH"]
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = CommonPath()
return cls._instance
|
"""Core feature extensions for the project related to Python packaging."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Fredy Wijaya
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Fredy Wijaya'
import argparse, shutil, sys, subprocess, os, zipfile
# put your source packages here
source_packages = [
]
# put your third party packages here
third_party_packages = [
]
all_packages = third_party_packages + source_packages
# put your test packages here
test_packages = [
]
# any target compilation, leave it empty
cross_compilations = [
]
# the name of the executable
executables = []
# the release directory
release_dir = ''
# the release archive file
release_file = ''
files_to_remove = [
'bin',
'pkg',
release_dir,
release_file,
]
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=80))
parser.add_argument('--clean', action='store_true', dest='clean',
help='clean the project')
parser.add_argument('--package', action='store_true', dest='package',
help='create a package')
parser.add_argument('--test', action='store_true', dest='test',
help='run the tests')
parser.add_argument('--test-package', type=str, dest='test_package',
help='run a specific test package')
parser.add_argument('--test-case', type=str, dest='test_case',
help='run a specific test case')
parser.add_argument('--cross-compile', action='store_true', dest='cross_compile',
help='cross-compile the build')
parser.add_argument('--no-govet', action='store_true', dest='no_govet',
help='run go vet')
parser.add_argument('--no-golint', action='store_true', dest='no_golint',
help='run go lint')
args = parser.parse_args()
if args.test_package is not None:
if not args.test:
error_and_exit('--test option is required for --test-package option')
if args.test_case is not None:
if not args.test:
error_and_exit('--test option is required for --test-case option')
if args.test_package is None:
error_and_exit('--test-package option is required for --test-case option')
return args
def error(msg):
print 'Error:', msg
def error_and_exit(msg):
error(msg)
sys.exit(1)
def build_packages(args):
for package in all_packages:
# only do gofmt, golint, and govet on source packages
env_vars = os.environ.copy()
if 'GOPATH' in env_vars:
env_vars['GOPATH'] = os.getcwd() + os.pathsep + env_vars['GOPATH']
else:
env_vars['GOPATH'] = os.getcwd()
if package in source_packages:
gofmt(package, env_vars)
if not args.no_golint:
golint(package, env_vars)
if not args.no_govet:
govet(package, env_vars)
cmd = ['go', 'install', package]
cmd_str = ' '.join(cmd)
if args.cross_compile:
for cross_compilation in cross_compilations:
env_vars['GOOS'] = cross_compilation[0]
env_vars['GOARCH'] = cross_compilation[1]
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
else:
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
def run_tests(args):
if args.test_package is not None:
if args.test_case is not None:
# run all tests in a particular test package
cmd = ['go', 'test', args.test_package, '-run', args.test_case, '-v']
cmd_str = ' '.join(cmd)
env_vars = os.environ.copy()
env_vars['GOPATH'] = os.getcwd()
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
else:
# run a specific test case in a particular test package
cmd = ['go', 'test', args.test_package, '-v']
cmd_str = ' '.join(cmd)
env_vars = os.environ.copy()
env_vars['GOPATH'] = os.getcwd()
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
else:
# run all tests in all test packages
for test_package in test_packages:
cmd = ['go', 'test', test_package, '-v']
cmd_str = ' '.join(cmd)
env_vars = os.environ.copy()
env_vars['GOPATH'] = os.getcwd()
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
def clean():
for f in files_to_remove:
if os.path.exists(f):
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
def create_package():
if not os.path.isdir(release_dir):
os.makedirs(release_dir)
execs = []
for root, dirs, files in os.walk('bin'):
for f in files:
for executable in executables:
if f.startswith(executable):
execs.append(os.path.join(root, f))
for exc in execs:
shutil.copy2(exc, release_dir)
with zipfile.ZipFile(release_file, 'w') as zf:
for root, dirs, files in os.walk(release_dir):
for f in files:
zf.write(os.path.join(root, f))
def gofmt(pkg, env_vars):
cmd = ['go', 'fmt', pkg]
cmd_str = ' '.join(cmd)
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
def govet(pkg, env_vars):
cmd = ['go', 'vet', pkg]
cmd_str = ' '.join(cmd)
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
def golint(pkg, env_vars):
cmd = ['golint', pkg]
cmd_str = ' '.join(cmd)
if subprocess.call(cmd, env=env_vars) != 0:
error_and_exit('Got a non-zero exit code while executing ' + cmd_str)
def main(args):
if args.clean:
clean()
else:
build_packages(args)
if args.test:
run_tests(args)
if args.package:
create_package()
if __name__ == '__main__':
args = parse_args()
main(args)
|
from .chem_objects import ChemObject
"""
Lazy Classes to create useful organic compounds.
"""
class PrimaryAlcohol(ChemObject):
"""
Returns a straight chain alcohol.
chain_length >= 1
TODO: Isn't working currently for some reason.
Will fix later.
"""
def __init__(self, chain_length=1):
compound = methyl + (chain_length-1)*methylene + "-OH"
print(compound)
ChemObject.__init__(self,
compound)
class SecondaryAlcohol(ChemObject):
"""
Use pos >= 2 and chain_length >= 3 for desired results.
"""
def __init__(self, pos=2, chain_length=3):
ChemObject.__init__(self,
methyl + (pos-1)*methylene +
methine + "(-OH)" +
(chain_length-pos-1)*methylene + methyl)
class PrimaryAcid(ChemObject):
"""
Returns a straight chain carboxylic acid.
chain_length >= 1
"""
def __init__(self, chain_length=1):
if chain_length == 1:
ChemObject.__init__(self,
"H[3]-C(-OH[5])=O")
else:
compound = methyl + (chain_length-2)*methylene + "-COOH"
print(compound)
ChemObject.__init__(self,
compound)
"""
These are a few compounds that are very frequently used and
so can be called directly, without having to write the long chemfig code
again and again. Saves time, really.
"""
methyl = "[4]H-C(-[2]H)(-[6]H)"
methylene = "-C(-[2]H)(-[6]H)"
methine = "-C(-[2]H)"
Phenol = "*6(-=-=(-OH)-=)"
Water = "[5]H-\lewis{1:2:,O}-H[-1]"
Ethanol = PrimaryAlcohol(2)
Acetic_Acid = Ethanoic_Acid = PrimaryAcid(2)
Benzene = "*6(-=-=-=)"
Benzene_Diazonium_Chloride = BDC = "*6(-=-=(-\\chemabove{N_2}{\quad\scriptstyle+}\\chemabove{Cl}{\quad\scriptstyle-})-=-)"
Aniline ="*6(-=-=(-NH2)-=-)"
Carbon_Dioxide = CO2 = "[4]O=C=O"
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['n', 'states'])
def glCreateStatesNV(n, states):
pass
@params(api='gl', prms=['n', 'states'])
def glDeleteStatesNV(n, states):
pass
@params(api='gl', prms=['state'])
def glIsStateNV(state):
pass
@params(api='gl', prms=['state', 'mode'])
def glStateCaptureNV(state, mode):
pass
@params(api='gl', prms=['tokenID', 'size'])
def glGetCommandHeaderNV(tokenID, size):
pass
@params(api='gl', prms=['shadertype'])
def glGetStageIndexNV(shadertype):
pass
@params(api='gl', prms=['primitiveMode', 'buffer', 'indirects', 'sizes', 'count'])
def glDrawCommandsNV(primitiveMode, buffer, indirects, sizes, count):
pass
@params(api='gl', prms=['primitiveMode', 'indirects', 'sizes', 'count'])
def glDrawCommandsAddressNV(primitiveMode, indirects, sizes, count):
pass
@params(api='gl', prms=['buffer', 'indirects', 'sizes', 'states', 'fbos', 'count'])
def glDrawCommandsStatesNV(buffer, indirects, sizes, states, fbos, count):
pass
@params(api='gl', prms=['indirects', 'sizes', 'states', 'fbos', 'count'])
def glDrawCommandsStatesAddressNV(indirects, sizes, states, fbos, count):
pass
@params(api='gl', prms=['n', 'lists'])
def glCreateCommandListsNV(n, lists):
pass
@params(api='gl', prms=['n', 'lists'])
def glDeleteCommandListsNV(n, lists):
pass
@params(api='gl', prms=['list'])
def glIsCommandListNV(list):
pass
@params(api='gl', prms=['list', 'segment', 'indirects', 'sizes', 'states', 'fbos', 'count'])
def glListDrawCommandsStatesClientNV(list, segment, indirects, sizes, states, fbos, count):
pass
@params(api='gl', prms=['list', 'segments'])
def glCommandListSegmentsNV(list, segments):
pass
@params(api='gl', prms=['list'])
def glCompileCommandListNV(list):
pass
@params(api='gl', prms=['list'])
def glCallCommandListNV(list):
pass
|
"""Module for the Risk-Exploiting Graph Scenario with variable input parameters
- Network of agents are to act as a connecition between origin and destination terminals of
known (observed).
- This is a simplified, surrogate scenario for the more complex communication relay scenario
since would be expected to produce similar physical behavior of the network without adding
the complexity of message passing to the action space
- Agents can form links to other agents over small distances
- Reward is received by all agents if the two fixed terminals are connected (identical_systemic_reward case)
- An unobserved landmark in the environment is a hazard and can cause agents in its vicinity to be terminated
with some probability. Surviving agents may observed terminated agents in order to deduce location of the hazard
and avoid
- Agents actions are their movements
- Scenario is derived from simple_graph_small, simple_graph_large, and ergo_graph_large, making the following
settable parameters instead of hard coded
- number of agents
- number of hazards (0,1)
- whether rewards are shared or "local"
- whether observations are direct per entity or histogram based
- Hazard failure risk
- Collision failure risk
"""
import numpy as np
from bisect import bisect, insort
from shapely.geometry import LineString, Point
from multiagent.scenario import BaseScenario
from multiagent.core import Landmark
from particle_environments.mager.observation import format_observation, agent_histogram_observation
from particle_environments.mager.world import SensingLimitedMortalAgent, MortalAgent, HazardousWorld, RiskRewardLandmark
from particle_environments.common import is_collision, distance, delta_pos, delta_vel, nearest_point_on_line_segment_2d, check_2way_communicability
from particle_environments.common import RadialPolynomialRewardFunction2D as RadialReward
from particle_environments.common import RadialBernoulliRiskFunction2D as RadialRisk
from particle_environments.common import linear_index_to_lower_triangular, SimpleNetwork, truncate_or_pad
from rl_algorithms.scenariolearning import ScenarioHeuristicAgentTrainer
# Scenario Parameters
#_MAX_CONNECTION_DISTANCE = 0.60 # 4 agents
_TERMINAL_DISTANCE = 2.0
_CONNECTION_MARGIN = 1.2
_MAX_OBSERVATION_DISTANCE = 1.0
_CONNECTION_REWARD = 1.0
_TERMINATION_REWARD = -0.0
_AGENT_SIZE = 0.01
_LANDMARK_SIZE = 0.025
_HAZARD_SIZE = 0.1
_N_RADIAL_BINS = 4
_N_ANGULAR_BINS = 8
_N_OBSERVED_TERMINATIONS = 5
_N_TERMINALS = 2
_ZERO_THRESHOLD = 1e-6
class Scenario(BaseScenario):
def __init__(self, *, num_agents, num_hazards, identical_rewards, observation_type, hazard_risk=0.5, collision_risk=0.0):
'''
Args:
- num_agents [int] number of agents in scenario
- num_hazards [int] number of hazards landmarks in the scenario
- identical_rewards [bool] true if all agents receieve exact same reward, false if rewards are "local" to agents
- observation_type [str] "direct" if observation directly of each entity, "histogram" if bin entities in spacial grid
- hazard_risk [float] max probability of failure caused by hazard landmark
- collision_risk [float] probability of failure caused by collision
'''
# check inputs
assert isinstance(num_agents, int); assert num_agents >= 1
assert isinstance(num_hazards, int); assert (num_hazards == 0 or num_hazards == 1)
assert isinstance(identical_rewards, bool)
assert (observation_type == "direct" or observation_type == "histogram")
assert (hazard_risk >= 0.0 and hazard_risk <= 1.0)
assert (collision_risk >= 0.0 and collision_risk <= 1.0)
# set max connection distance such that there is a 5% probability of connection
# between terminals given random placement of n agents
# equation found emperically, see connection_probability_measurement.py
if not np.isclose(_TERMINAL_DISTANCE, 2.0):
raise Warning('Connection distance formula assumes distance between terminals of 2.0, received {}'.format(_TERMINAL_DISTANCE))
# c_1 = 1.6838; c_2 = 0.18367; c_3 = -0.5316 # 5% probability of connection with random placement
# c_1 = 1.29428202; c_2 = 0.24156174; c_3 = -0.23681555 # 1% probability of connection with random placement
c_1 = 0.9834973; c_2 = 0.34086771; c_3 = -0.01181418 # 0.1% probability of connection with random placement
self.max_connection_distance = c_1 * num_agents**(-c_2) + c_3
assert self.max_connection_distance > 0.0
# set member vars
self.num_agents = num_agents
self.num_hazards = num_hazards
self.identical_rewards = identical_rewards
self.observation_type = observation_type
self.hazard_risk = hazard_risk
self.collision_risk = collision_risk
def make_world(self):
world = HazardousWorld()
# set scenario-specific world parameters
world.collaborative = True
world.systemic_rewards = True
world.identical_rewards = self.identical_rewards
world.dim_c = 0 # observation-based communication
world.connection_reward = _CONNECTION_REWARD
world.termination_reward = _TERMINATION_REWARD
world.render_connections = True
# add landmarks. terminals first then hazards (if any)
# world.origin_terminal_landmark = RiskRewardLandmark( risk_fn=None, reward_fn=RadialReward(1.0, 0.0))
# world.destination_terminal_landmark = RiskRewardLandmark( risk_fn=None, reward_fn=RadialReward(1.0, 0.0))
world.origin_terminal_landmark = Landmark()
world.destination_terminal_landmark = Landmark()
world.landmarks = [world.origin_terminal_landmark, world.destination_terminal_landmark]
world.hazard_landmarks = []
for i in range(self.num_hazards):
lm = RiskRewardLandmark( risk_fn=RadialRisk(_HAZARD_SIZE, self.hazard_risk), reward_fn=RadialReward(_HAZARD_SIZE, 0.0))
lm.silent = True
lm.deaf = True
lm.ignore_connection_rendering = True
world.hazard_landmarks.append(lm)
world.landmarks.append(lm)
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark_%d' % i
landmark.collide = False
landmark.movable = False
landmark.size = _LANDMARK_SIZE
# properties for landmarks
if isinstance(landmark, RiskRewardLandmark) and landmark.is_hazard:
#TODO: make colors heatmap of risk probability over all bounds
landmark.color = np.array([landmark.risk_fn.get_failure_probability(0,0) + .1, 0, 0])
else:
landmark.color = np.array([0.25, 0.25, 0.25])
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
# add agents
world.agents = [SensingLimitedMortalAgent(_MAX_OBSERVATION_DISTANCE, self.max_connection_distance) for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent_%d' % i
agent.collide = True
agent.blind = False
agent.silent = False
agent.deaf = False
agent.terminated = False
agent.size = _AGENT_SIZE
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
agent.color = np.array([0.35, 0.35, 0.85])
agent.previous_observation = None
# place landmarks
for landmark in world.landmarks:
# landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_pos = np.zeros(world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
# randomize terminal locations, but regularize to ensure conistent distances
origin_state, destination_state, hazard_states = self.spawn_landmarks(world)
world.origin_terminal_landmark.state.p_pos = origin_state
world.destination_terminal_landmark.state.p_pos = destination_state
for i in range(self.num_hazards):
world.hazard_landmarks[i].state.p_pos = hazard_states[i]
def spawn_landmarks(self, world):
''' create communication terminals at random positions but regularized distance
Notes:
- regularizing the distance between terminals is important to ensure consistency in
max rewards possible between different episodes
'''
# angle of line connecting terminals
th = np.random.uniform(0, 2.0*np.pi)
# distance between terminals
# d = np.random.normal(2.0, 0.1)
d = _TERMINAL_DISTANCE
dx = d/2.0*np.cos(th)
dy = d/2.0*np.sin(th)
# center of line connecting terminals
xc = yc = 0.0
# hazard state position along connecting line
hazard_states = []
for i in range(self.num_hazards):
dh = np.random.uniform(-0.9*d/2.0, 0.9*d/2.0)
dhx = dh*dx
dhy = dh*dy
hazard_states.append(np.array([xc+dhx, yc+dhy]))
return (np.array([xc-dx, yc-dy]), np.array([xc+dx, yc+dy]), hazard_states)
def benchmark_data(self, agent, world):
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
min_dists += min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
collisions += 1
return (self.reward(agent, world), collisions, min_dists, occupied_landmarks)
def done_callback(self, agent, world):
''' indicate a terminated agent as done (still being decided)
Notes:
- Even though terminated agent cannot take actions, it may be more appropriate
to NOT set the agent is done in order to keep collecting data for training
purposes
'''
# if agent.terminated:
# return True
# else:
# return False
return False
def reward(self, agent, world, systemic_call=False):
if systemic_call:
if self.identical_rewards and world.identical_rewards:
return self._identical_systemic_reward(world)
elif not self.identical_rewards and not world.identical_rewards:
return self._local_systemic_reward(world)
else:
raise Exception(
"Inconsistent reward options: self.identical_rewards={} world.identical_rewards={}".format(
self.identical_rewards, world.identical_rewards))
else:
return 0.0
def _local_systemic_reward(self, world):
''' reward agent if they are part of complete connection between terminals
Notes:
'''
assert self.identical_rewards == False
assert world.identical_rewards == False
comm_net = self._create_network(world)
reward_n = [0.0]*self.num_agents
node_count = 2
for i, a in enumerate(world.agents):
if a.terminated:
reward_n[i] = world.termination_reward
else:
# check ordering has stayed consistent in node list
assert(a==comm_net.nodes[node_count])
reward_n[i] = world.connection_reward*(
comm_net.breadth_first_connectivity_search(node_count, 0) and
comm_net.breadth_first_connectivity_search(node_count, 1)
)
node_count += 1
return reward_n
def _identical_systemic_reward(self, world):
''' reward all agents the same if there is a complete connection between terminals
Notes:
'''
assert self.identical_rewards == True
assert world.identical_rewards == True
comm_net = self._create_network(world)
reward_n = [comm_net.breadth_first_connectivity_search(0,1)]*self.num_agents
return reward_n
def _create_network(self, world):
''' Establish connectivity network at every time step
'''
# define nodes in simple connectivity network
# by construction, node 0 is origin landmark, node 1 is destination landmark
# terminated agents are not part of network
nodes = [world.origin_terminal_landmark, world.destination_terminal_landmark]
nodes.extend([a for a in world.agents if not a.terminated])
n_nodes = len(nodes)
comm_net = SimpleNetwork(nodes)
# init list to hold direct communication distance values between agents
# there is no direct communication between origin and destination
n_pairs = int(n_nodes*(n_nodes+1)/2)
# calculate direct communication resistance between agents
for k in range(n_pairs):
i,j = linear_index_to_lower_triangular(k)
if i==1 and j==0: continue # enforce that origin & destination don't directly connect
if check_2way_communicability(nodes[i], nodes[j]):
comm_net.add_edge(i, j)
# systemic reward is inverse of resistance (conductance)
return comm_net
def observation(self, agent, world):
''' call observation function based on type of observation function '''
if self.observation_type == "direct":
return self._direct_observation(agent, world)
elif self.observation_type == "histogram":
return self._histogram_observation(agent, world)
else:
raise Exception("Unrecognized observation type: {}".format(self.observation_type))
def _direct_observation(self, agent, world):
# get positions of all entities in this agent's reference frame
def communications_observed(other_comm_node):
''' Communication between agents is just the conductance
Notes:
- inverse of comm resistance (i.e. conductance) used so that we can use
zero for out of range comms
- noisy measurement of heading
- TODO: observation of failures
'''
# check if node is terminated
is_terminated = 0
if isinstance(other_comm_node, MortalAgent) and other_comm_node.terminated:
is_terminated = 1
dx = dy = dvx = dvy = 0.
if not is_terminated:
dx, dy = delta_pos(other_comm_node, agent)
dvx, dvy = delta_vel(other_comm_node, agent)
comms = [is_terminated, dx, dy, dvx, dvy]
# set comms to zero if out for range
# if distance(agent, other_comm_node) > agent.max_observation_distance:
if not check_2way_communicability(agent, other_comm_node):
comms = [0] * len(comms)
return comms
# Observe communication terminals
terminals = (world.origin_terminal_landmark.state.p_pos.tolist() +
world.destination_terminal_landmark.state.p_pos.tolist())
# comm_nodes are origin and destination terminals and unterminated agents
comm_nodes = []
comm_nodes.extend([a for a in world.agents if a is not agent])
communications = format_observation(observe = communications_observed,
objects = comm_nodes,
num_observations = self.num_agents-1,
observation_size = 2*world.dim_p + 1)
# package observation
obs = np.asarray([agent.terminated] + agent.state.p_vel.tolist() + agent.state.p_pos.tolist() + terminals + communications)
return obs
def _histogram_observation(self, agent, world):
# get positions of all entities in this agent's reference frame
# Observe communication terminals
terminals = (world.origin_terminal_landmark.state.p_pos.tolist() +
world.destination_terminal_landmark.state.p_pos.tolist())
# get histogram of agent observations
agent_histogram_2d, observed_terminations_2d = agent_histogram_observation(
agent, world.agents, _MAX_OBSERVATION_DISTANCE, _N_RADIAL_BINS, _N_ANGULAR_BINS)
# flatten histogram to 1d list
agent_histogram = [val for sublist in agent_histogram_2d for val in sublist]
# flatten, truncate/pad observed terminations to fixed length
observed_terminations = [val for sublist in observed_terminations_2d for val in sublist]
observed_terminations = truncate_or_pad(observed_terminations, 2*_N_OBSERVED_TERMINATIONS)
# package new observation
new_obs = np.asarray([agent.terminated] + agent.state.p_vel.tolist() + agent.state.p_pos.tolist() + terminals + agent_histogram + observed_terminations)
# append previous observation for velocity estimation
if agent.previous_observation is None:
agent.previous_observation = 0.0*new_obs
obs = np.append(new_obs, agent.previous_observation)
agent.previous_observation = new_obs
return obs
class ScenarioHeuristicComputer(ScenarioHeuristicAgentTrainer):
''' representation of an individual agent's embedded processor and memory tailor
Notes:
- This is meant to be used as a scenario-specific alternative to
the more general purpose, scenario-agnostic "trainers". It can hold an agents model
of the world (transition and reward functions), policy, and learning process, if any.
'''
def __init__(self, name, model, obs_shape_n, act_space_n, agent_index, args, **kwargs):
ScenarioHeuristicAgentTrainer.__init__(self, name, model, obs_shape_n, act_space_n, agent_index, args)
raise NotImplementedError()
def get_initial_policy_distribution(self):
''' method for "jumpstarting" monte carlo group distribution
'''
raise NotImplementedError()
def action(self, obs):
''' maps observation array to action forces in x,y directions
Notes:
- Assumes observation array formated as:
[0:2] = agent.state.p_vel.tolist()
[2:4] = agent.state.p_pos.tolist()
[4:8] = terminals
[8:40] = agent_histogram +
failures)
'''
raise NotImplementedError()
def experience(self, obs, act, rew, new_obs, done, terminal):
''' Monte Carlo learning only record cumulative reward
'''
# record cumulative reward
raise NotImplementedError()
def preupdate(self):
'''unused function handle compatibility with train.py
'''
raise NotImplementedError()
def update(self, agents, t):
'''unused function handle compatibility with train.py
'''
raise NotImplementedError()
def group_policy_update(self, group_policy):
'''update behavior parameters based on group policy
'''
raise NotImplementedError()
|
import mysql.connector as conn
import pandas as pd
import datetime
def show_books(roll,password):
try:
cnx=conn.connect(user='root',password='swapn',host='127.0.0.1',database='library')
cur_book=cnx.cursor()
cur_book.execute("select * from book order by course_id")
temp=cur_book.fetchall()
frame=pd.DataFrame(temp,columns=['BOOK-ID','BOOK-NAME','PRICE','AUTHOR','COURSE-ID'])
print("Total list of books are given below")
print(frame)
cnx.close()
except:
pass
def show_status(roll,password):
try:
cnx=conn.connect(user='root',password='swapn',host='127.0.0.1',database='library')
cur_book_bank=cnx.cursor()
cur_book=cnx.cursor()
cur_book_bank.execute("select book_id,book_amt from book_bank")
BookBank=cur_book_bank.fetchall()
temp=[]
for i in BookBank:
cur_book.execute("select book_name from book where book_id=%s",(i[0],))
bookname=cur_book.fetchone()[0]
a=[i[0],bookname,i[1]]
temp.append(a)
frame=pd.DataFrame(temp,columns=['BOOK-ID','BOOK-NAME','BOOK-AMT'])
print(frame)
cnx.close()
except:
pass
def show_borrow(roll,password):
try:
cnx=conn.connect(user='root',password='swapn',host='127.0.0.1',database='library')
cur_borrow=cnx.cursor()
cur_book=cnx.cursor()
cur_borrow.execute("select * from borrow")
borrow=cur_borrow.fetchall()
temp=[]
for i in borrow:
cur_book.execute("select book_name from book where book_id=%s",(i[1],))
bookname=cur_book.fetchone()[0]
date=datetime.datetime.now()
date1=datetime.date(date.year,date.month,date.day)
date2=i[4]
t=date1-date2
if t.days>0:
late=t.days
fine=late*5
else:
late=0
fine=0
a=[i[0],bookname,i[2],i[4],late,fine,i[3]]
temp.append(a)
frame=pd.DataFrame(temp,columns=['BORROW-ID','BOOK-NAME','BORROW-DATE','RETURN-DATE','LATE','FINE','ROLL'])
print(frame)
cnx.close()
except:
pass
def show_return(roll,password):
try:
cnx=conn.connect(user='root',password='swapn',host='127.0.0.1',database='library')
cur_return=cnx.cursor()
cur_book=cnx.cursor()
cur_return.execute("select * from return_book")
returnbook=cur_return.fetchall()
temp=[]
for i in returnbook:
cur_book.execute("select book_name from book where book_id=%s",(i[1],))
bookname=cur_book.fetchone()[0]
a=[i[0],bookname,i[2],i[3],i[4],i[5],i[6]]
temp.append(a)
frame=pd.DataFrame(temp,columns=['RETURN-ID','BOOK-NAME','DATE-BORROW','DATE-RETURN','LATE','FINE','ROLL'])
print(frame)
cnx.close()
except:
pass
|
import computer
output = ""
total = 0
x = 0
y = 0
def RecieveOutput(out):
global output
global total
if(out == 0):
output += "."
return
if(out == 1):
output += "#"
total += 1
return
inputIndex = 0
def SendInput():
global inputIndex
inputIndex += 1
if(inputIndex % 2 == 1):
return x
else:
return y
for X in range(200):
for Y in range(200):
x = X
y = Y
computer.Run(RecieveOutput, SendInput)
output += "\n"
print(output)
print(total)
|
# -*- coding: utf-8 -*-
import logging
import os
from unittest.mock import Mock, patch
from django.test import TransactionTestCase
from eventkit_cloud.utils.gpkg.sqlite_utils import get_database_connection, Table
logger = logging.getLogger(__name__)
class TestSqliteUtils(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
@patch("eventkit_cloud.utils.gpkg.sqlite_utils.connect")
def test_get_database_connection(self, connect):
from sqlite3 import Row
cursor_mock = Mock()
cursor_mock.fetchone.return_value = "test"
connect().__enter__().cursor.return_value = cursor_mock
# Test that a connection object is returned
with get_database_connection(self.path) as conn:
self.assertEqual(conn.cursor().fetchone(), "test")
# Test that the row_factory property is correctly set to sqlite3.Row
self.assertEqual(get_database_connection(self.path).row_factory, Row)
class TestTableQuery(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
def test_get_table_query_validate(self):
cursor_mock = Mock()
def fill_cursor(*args):
if args[1][0] in ["gpkg_contents"]:
cursor_mock.fetchone.return_value = ("gpkg_contents",)
else:
cursor_mock.fetchone.return_value = tuple()
cursor_mock.execute.side_effect = fill_cursor
passed = True
try:
Table(cursor_mock, "gpkg_contents").validate()
except ValueError:
passed = False
self.assertTrue(passed)
self.assertRaises(ValueError, Table(cursor_mock, "gpkg_metadata").validate)
try:
Table(cursor_mock, "sqlite_master").validate()
except ValueError:
passed = False
self.assertTrue(passed)
class TestTable(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
def test_get_table_exists(self):
cursor_mock = Mock()
def fill_cursor(*args):
if args[1][0] in ["gpkg_contents", "other_table"]:
cursor_mock.fetchone.return_value = ("gpkg_contents",)
else:
cursor_mock.fetchone.return_value = tuple()
cursor_mock.execute.side_effect = fill_cursor
self.assertTrue(Table.exists(cursor_mock, "gpkg_contents"))
self.assertTrue(cursor_mock.execute.called_once)
self.assertTrue(not Table.exists(cursor_mock, "gpkg_metadata"))
self.assertTrue(Table.exists(cursor_mock, "other_table"))
|
import numpy as np
from copy import copy
from .base import Simplifier
from ... import operations
from ...analyzers import SplitAnalysis
class ConvertBatchNorm(Simplifier):
ANALYSES = {"is_split": SplitAnalysis}
def visit_BatchNormalization(self, operation: operations.BatchNormalization):
input_op = operation.x
if (
isinstance(input_op, operations.Conv)
and not self.analysis["is_split"][input_op]
):
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.scale * operation.mean / std
weights = input_op.w
a_w = a[:, None, None, None]
weights = a_w * weights
bias = input_op.b
if bias is None:
bias = np.zeros(weights.shape[0], dtype=weights.dtype)
bias = a * bias + b
new_operation = copy(input_op)
new_operation.w = weights
new_operation.b = bias
return new_operation
elif (
isinstance(input_op, operations.Gemm)
and not self.analysis["is_split"][input_op]
):
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.mean * a
return operations.Gemm(input_op, np.diag(a), b)
elif isinstance(input_op, operations.Input):
input_shape = input_op.shape
input_dtype = input_op.dtype
if len(input_shape) == 2:
std = np.sqrt(operation.variance + operation.epsilon)
a = operation.scale / std
b = operation.bias - operation.mean * a
return operations.Gemm(input_op, np.diag(a), b)
elif len(input_shape) == 4:
c = operation.mean.shape[0]
std = np.sqrt(operation.variance + operation.epsilon)
k = np.zeros(
(c, c, 1, 1), dtype=input_dtype
) # identity kernel (H, W, inC, outC)
for i in range(c):
k[i, i, 0, 0] = 1
W = k * operation.scale / std
b = operation.bias - operation.scale * operation.mean / std
op = operations.Conv(input_op, W, b)
return op
# TODO : in what other scenarios can BatchNorm be converted?
return operation
|
from optuna.logging import get_logger
from optuna.study import Study
from optuna.trial import TrialState
from optuna.visualization._plotly_imports import _imports
if _imports.is_successful():
from optuna.visualization._plotly_imports import go
_logger = get_logger(__name__)
def plot_intermediate_values(study: Study) -> "go.Figure":
"""Plot intermediate values of all trials in a study.
Example:
The following code snippet shows how to plot intermediate values.
.. plotly::
import optuna
def f(x):
return (x - 2) ** 2
def df(x):
return 2 * x - 4
def objective(trial):
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
x = 3
for step in range(128):
y = f(x)
trial.report(y, step=step)
if trial.should_prune():
raise optuna.TrialPruned()
gy = df(x)
x -= gy * lr
return y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=16)
optuna.visualization.plot_intermediate_values(study)
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate
values.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
return _get_intermediate_plot(study)
def _get_intermediate_plot(study: Study) -> "go.Figure":
layout = go.Layout(
title="Intermediate Values Plot",
xaxis={"title": "Step"},
yaxis={"title": "Intermediate Value"},
showlegend=False,
)
target_state = [TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING]
trials = [trial for trial in study.trials if trial.state in target_state]
if len(trials) == 0:
_logger.warning("Study instance does not contain trials.")
return go.Figure(data=[], layout=layout)
traces = []
for trial in trials:
if trial.intermediate_values:
sorted_intermediate_values = sorted(trial.intermediate_values.items())
trace = go.Scatter(
x=tuple((x for x, _ in sorted_intermediate_values)),
y=tuple((y for _, y in sorted_intermediate_values)),
mode="lines+markers",
marker={"maxdisplayed": 10},
name="Trial{}".format(trial.number),
)
traces.append(trace)
if not traces:
_logger.warning(
"You need to set up the pruning feature to utilize `plot_intermediate_values()`"
)
return go.Figure(data=[], layout=layout)
figure = go.Figure(data=traces, layout=layout)
return figure
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import savReaderWriter as rw
from py3k import isPy3k, isCPython
class Test_spss2strDate(unittest.TestCase):
def setUp(self):
self.savFileName = 'test_data/test_dates.sav'
self.reader = rw.SavReader(self.savFileName)
self.convert = self.reader.spss2strDate
def test_time(self):
got = self.convert(1, "%H:%M:%S", None)
self.assertEqual(b'00:00:01', got)
got = self.convert(86399, "%H:%M:%S", None)
self.assertEqual(b'23:59:59', got)
def test_datetime(self):
got = self.convert(11654150400.0, "%Y-%m-%d %H:%M:%S", None)
self.assertEqual(b'1952-02-03 00:00:00', got)
got = self.convert(11654150400.0, "%Y-%m-%d", None)
self.assertEqual(b'1952-02-03', got)
got = self.convert(11654150400.0, "%d-%m-%Y", None)
self.assertEqual(b'03-02-1952', got)
msg = "requires mx package (no Python 3 or Pypy)"
@unittest.skipIf(isPy3k or not isCPython, msg)
def test_datetime_pre1900(self):
got = self.convert(0.0, "%Y-%m-%d %H:%M:%S", None)
self.assertEqual(b'1582-10-14 00:00:00', got)
def test_dtime(self):
got = self.convert(256215.0, "%d %H:%M:%S", None)
self.assertEqual(b'02 23:10:15', got)
got = self.convert(13508553600, "%d %H:%M:%S", None)
self.assertEqual(b'156349 00:00:00', got)
def test_invalid_datetime(self):
got = self.convert(b"invalid", "%Y-%m-%d %H:%M:%S", None)
self.assertEqual(None, got)
got = self.convert(b"invalid", "%Y-%m-%d %H:%M:%S", b"<invalid>")
self.assertEqual(b"<invalid>", got)
def tearDown(self):
self.reader.close()
if __name__ == "__main__":
unittest.main()
|
# -*-coding: utf-8 -
'''
@author: MD. Nazmuddoha Ansary
'''
#--------------------
# imports
#--------------------
import argparse
import os
import json
import random
import cv2
import pandas as pd
import numpy as np
import PIL
import PIL.Image , PIL.ImageDraw , PIL.ImageFont
from tqdm import tqdm
from glob import glob
from ast import literal_eval
from coreLib.dataset import DataSet
from coreLib.utils import create_dir,correctPadding,stripPads,LOG_INFO,lambda_paded_label
from coreLib.words import single
from coreLib.store import genTFRecords
tqdm.pandas()
#--------------------
# main
#--------------------
def main(args):
filename=[]
labels =[]
_path =[]
data_path = args.data_path
main_path = args.save_path
img_height = int(args.img_height)
img_width = int(args.img_width)
nb_train = int(args.nb_train)
max_word_length = int(args.max_word_length)+1
# dataset object
ds=DataSet(data_path)
main_path=create_dir(main_path,"moduled")
# resources
rec_path=create_dir( main_path,"tfrecords")
# pairs
save_path=create_dir(main_path,"bs")
img_dir=create_dir(save_path,"images")
tgt_dir=create_dir(save_path,"targets")
map_dir=create_dir(save_path,"maps")
# data
df=ds.boise_state.df
df["img_path"]=df.filename.progress_apply(lambda x: os.path.join(ds.boise_state.dir,x))
font_path =os.path.join(ds.bangla.fonts,"Bangla.ttf")
# create font
font=PIL.ImageFont.truetype(font_path, size=img_height)
bs_skip=[]
for idx in tqdm(range(len(df))):
try:
# extract
img_path=df.iloc[idx,2]
comps=df.iloc[idx,1]
#-----------------
# image
#-----------------
# image and label
img=cv2.imread(img_path,0)
# resize (heigh based)
h,w=img.shape
width= int(img_height* w/h)
img=cv2.resize(img,(width,img_height),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)
#-----------------
# target and map
#-----------------
# unique values
vals=list(np.unique(img))
# construct target
tgts=[]
maps=[]
# grapheme wise separation
for v,comp in zip(vals,comps):
if v!=0:
idxs = np.where(img==v)
y_min,y_max,x_min,x_max = np.min(idxs[0]), np.max(idxs[0]), np.min(idxs[1]), np.max(idxs[1])
# font
h=y_max-y_min
w=x_max-x_min
min_offset=100
max_dim=h+w+min_offset
# draw
image = PIL.Image.new(mode='L', size=(max_dim,max_dim))
draw = PIL.ImageDraw.Draw(image)
draw.text(xy=(0, 0), text=comp, fill=255, font=font)
# create target
tgt=np.array(image)
tgt=stripPads(tgt,0)
# resize
width= int(img_height* w/h)
tgt=cv2.resize(tgt,(width,img_height),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)
tgts.append(tgt)
#--------------------------------
# maps
#--------------------------------
h,w=tgt.shape
map=np.zeros((h,w))
map[int(h/4):int(3*h/4),int(w/4):int(3*w/4)]=ds.vocab.index(comp)
maps.append(map)
tgt=np.concatenate(tgts,axis=1)
map=np.concatenate(maps,axis=1)
# resize
h,w=img.shape
tgt=cv2.resize(tgt,(w,h),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)
# revalue
img[img<255]=0
tgt=255-tgt
# pad correction
img=correctPadding(img,dim=(img_height,img_width))
tgt=correctPadding(tgt,dim=(img_height,img_width))
map=correctPadding(map,dim=(img_height,img_width),pad_val=0)
# save
cv2.imwrite(os.path.join(img_dir,f"bs{idx}.png"),img)
cv2.imwrite(os.path.join(tgt_dir,f"bs{idx}.png"),tgt)
np.save(os.path.join(map_dir,f"bs{idx}.npy"),map)
filename.append(f"bs{idx}")
labels.append(comps)
_path.append(os.path.join(img_dir,f"bs{idx}.png"))
except Exception as e:
#LOG_INFO(e)
bs_skip.append(idx)
LOG_INFO(f"skipped:{len(bs_skip)}")
# pairs
save_path=create_dir(main_path,"synth")
img_dir=create_dir(save_path,"images")
tgt_dir=create_dir(save_path,"targets")
map_dir=create_dir(save_path,"maps")
# create the images
for i in tqdm(range(nb_train)):
try:
# selection
comp_type =random.choice(["grapheme"])
use_dict =random.choice([True,False])
img,tgt,map,label=single(ds,comp_type,use_dict,(img_height,img_width))
# save
cv2.imwrite(os.path.join(img_dir,f"synth{i}.png"),img)
cv2.imwrite(os.path.join(tgt_dir,f"synth{i}.png"),tgt)
np.save(os.path.join(map_dir,f"synth{i}.npy"),map)
filename.append(f"synth{i}")
labels.append(label)
_path.append(os.path.join(img_dir,f"synth{i}.png"))
except Exception as e:
print(e)
# create dataframe
df_s=pd.DataFrame({"filename":filename,"labels":labels,"img_path":_path})
# length
df_s["label_len"]=df_s.labels.progress_apply(lambda x:len(x))
# label_lenght correction
df_s=df_s.loc[df_s.label_len<max_word_length]
# encode
df_s["encoded"]= df_s.labels.progress_apply(lambda x:[ds.vocab.index(i) for i in x])
df_s["glabel"] = df_s.encoded.progress_apply(lambda x:lambda_paded_label(x,max_word_length))
# save
df_s.to_csv(os.path.join(main_path,"data.csv") ,index=False)
df_s=df_s[["img_path","glabel"]]
genTFRecords(df_s,rec_path)
# config
config={'img_height':img_height,
'img_width':img_width,
'nb_channels':3,
'vocab':ds.vocab,
'synthetic_data':nb_train,
'boise_state_data':len(df),
'max_word_len':max_word_length,
'map_size':int(img_height*img_width),
'seg_size':int(img_height//2*img_width//2)
}
config_json=os.path.join(main_path,"config.json")
with open(config_json, 'w') as fp:
json.dump(config, fp,sort_keys=True, indent=4,ensure_ascii=False)
#-----------------------------------------------------------------------------------
if __name__=="__main__":
'''
parsing and execution
'''
parser = argparse.ArgumentParser("BHOCR Pre-Training Dataset Creating Script")
parser.add_argument("data_path", help="Path of the input data folder from ReadMe.md)")
parser.add_argument("save_path", help="Path of the directory to save the dataset")
parser.add_argument("--img_height",required=False,default=32,help ="height for each grapheme: default=32")
parser.add_argument("--img_width",required=False,default=128,help ="width dimension of word images: default=128")
parser.add_argument("--nb_train",required=False,default=50000,help ="number of images for training:default:50000")
parser.add_argument("--max_word_length",required=False,default=10,help ="maximum word lenght data to keep:default:10")
args = parser.parse_args()
main(args)
|
import datetime
import json
from django.contrib.admin.templatetags.admin_list import ResultList, result_headers
from django.contrib.admin.utils import (
display_for_field,
display_for_value,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.utils import flatatt
from django.template import Library
from django.template.loader import get_template
from django.utils.encoding import force_str
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
register = Library()
def items_for_result(view, result, request):
"""
Generates the actual list of data.
"""
modeladmin = view.model_admin
for field_name in view.list_display:
empty_value_display = modeladmin.get_empty_value_display(field_name)
row_classes = ["field-%s" % field_name, "title"]
try:
f, attr, value = lookup_field(field_name, result, modeladmin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(
attr, "empty_value_display", empty_value_display
)
if f is None or f.auto_created:
allow_tags = getattr(attr, "allow_tags", False)
boolean = getattr(attr, "boolean", False)
if boolean or not value:
allow_tags = True
result_repr = display_for_value(value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append("nowrap")
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(
f, (models.DateField, models.TimeField, models.ForeignKey)
):
row_classes.append("nowrap")
if force_str(result_repr) == "":
result_repr = mark_safe(" ")
row_classes.extend(
modeladmin.get_extra_class_names_for_field_col(result, field_name)
)
row_attrs = modeladmin.get_extra_attrs_for_field_col(result, field_name)
row_attrs["class"] = " ".join(row_classes)
row_attrs_flat = flatatt(row_attrs)
primary_button = None
if field_name == modeladmin.get_list_display_add_buttons(request):
primary_button = view.button_helper.get_primary_button(result)
if primary_button is not None and primary_button.get("url"):
yield format_html(
'<td{}><div class="title-wrapper"><a href="{}" title="{}">{}</a></div></td>',
row_attrs_flat,
primary_button["url"],
primary_button.get("title", ""),
result_repr,
)
else:
yield format_html("<td{}>{}</td>", row_attrs_flat, result_repr)
def results(view, object_list, request):
for item in object_list:
yield ResultList(None, items_for_result(view, item, request))
@register.inclusion_tag("modeladmin/includes/result_list.html", takes_context=True)
def result_list(context):
"""
Displays the headers and data list together
"""
view = context["view"]
object_list = context["object_list"]
headers = list(result_headers(view))
num_sorted_fields = 0
for h in headers:
if h["sortable"] and h["sorted"]:
num_sorted_fields += 1
context.update(
{
"result_headers": headers,
"num_sorted_fields": num_sorted_fields,
"results": list(results(view, object_list, context["request"])),
}
)
return context
@register.simple_tag
def pagination_link_previous(current_page, view):
if current_page.has_previous():
previous_page_number0 = current_page.previous_page_number() - 1
tpl = get_template("wagtailadmin/shared/icon.html")
icon_svg = tpl.render({"name": "arrow-left", "class_name": "default"})
return format_html(
'<li class="prev"><a href="{}">{} {}</a></li>',
view.get_query_string({view.PAGE_VAR: previous_page_number0}),
icon_svg,
_("Previous"),
)
return ""
@register.simple_tag
def pagination_link_next(current_page, view):
if current_page.has_next():
next_page_number0 = current_page.next_page_number() - 1
tpl = get_template("wagtailadmin/shared/icon.html")
icon_svg = tpl.render({"name": "arrow-right", "class_name": "default"})
return format_html(
'<li class="next"><a href="{}">{} {}</a></li>',
view.get_query_string({view.PAGE_VAR: next_page_number0}),
_("Next"),
icon_svg,
)
return ""
@register.inclusion_tag("modeladmin/includes/search_form.html", takes_context=True)
def search_form(context):
context.update({"search_var": context["view"].SEARCH_VAR})
return context
@register.simple_tag
def admin_list_filter(view, spec):
template_name = spec.template
if template_name == "admin/filter.html":
template_name = "modeladmin/includes/filter.html"
tpl = get_template(template_name)
return tpl.render(
{
"title": spec.title,
"choices": list(spec.choices(view)),
"spec": spec,
}
)
@register.inclusion_tag("modeladmin/includes/result_row.html", takes_context=True)
def result_row_display(context, index):
obj = context["object_list"][index]
view = context["view"]
row_attrs_dict = view.model_admin.get_extra_attrs_for_row(obj, context)
row_attrs_dict["data-object-pk"] = obj.pk
odd_or_even = "odd" if (index % 2 == 0) else "even"
if "class" in row_attrs_dict:
row_attrs_dict["class"] += " %s" % odd_or_even
else:
row_attrs_dict["class"] = odd_or_even
context.update(
{
"obj": obj,
"row_attrs": mark_safe(flatatt(row_attrs_dict)),
"action_buttons": view.get_buttons_for_obj(obj),
}
)
return context
@register.inclusion_tag("modeladmin/includes/result_row_value.html", takes_context=True)
def result_row_value_display(context, index):
add_action_buttons = False
item = context["item"]
closing_tag = mark_safe(item[-5:])
request = context["request"]
model_admin = context["view"].model_admin
field_name = model_admin.get_list_display(request)[index]
if field_name == model_admin.get_list_display_add_buttons(request):
add_action_buttons = True
item = mark_safe(item[0:-5])
context.update(
{
"item": item,
"add_action_buttons": add_action_buttons,
"closing_tag": closing_tag,
}
)
return context
@register.filter
def get_content_type_for_obj(obj):
return obj.__class__._meta.verbose_name
@register.inclusion_tag("modeladmin/prepopulated_slugs.html", takes_context=True)
def prepopulated_slugs(context):
"""
Create a list of prepopulated_fields that should render Javascript for
the prepopulated fields for modeladmin forms.
"""
prepopulated_fields = []
if "prepopulated_fields" in context:
prepopulated_fields.extend(context["prepopulated_fields"])
prepopulated_fields_json = []
for field in prepopulated_fields:
prepopulated_fields_json.append(
{
"id": "#%s" % field["field"].auto_id,
"name": field["field"].name,
"dependency_ids": [
"#%s" % dependency.auto_id for dependency in field["dependencies"]
],
"dependency_list": [
dependency.name for dependency in field["dependencies"]
],
"maxLength": field["field"].field.max_length or 50,
"allowUnicode": getattr(field["field"].field, "allow_unicode", False),
}
)
context.update(
{
"prepopulated_fields": prepopulated_fields,
"prepopulated_fields_json": json.dumps(prepopulated_fields_json),
}
)
return context
|
import json
import hug
from operator import itemgetter
def return_json_file_contents(filename):
"""
Simple function for returning the contents of the input JSON file
"""
try:
with open(filename) as json_data:
return json.load(json_data)
except IOError:
print("File not found: "+filename)
return None
@hug.get(examples='api_key=API_KEY&project=project_name_string&top_k=10&sort_target=RAC')
def get_top_users(api_key: hug.types.text, project: hug.types.string, top_k: hug.types.number, sort_target: hug.types.string, hug_timer=3):
"""Retrieve the kth largest active contributors to an individual BOINC project."""
if (api_key == api_auth_key)
# Valid API key!
supported_project_names = []
list_of_supported_projects = return_json_file_contents('./Config/init_projects.json')
for supported_project in list_of_supported_projects:
supported_project_names.append(supported_project['project_name'])
if (top_k < 1 || top_k > 1000):
# Invalid top_k value chosen!
return {'success': False, 'api_key': True, 'error_message': 'Invalid top_k value chosen, please input a number between 1 and 1000.', 'took': float(hug_timer)}
if sort_target not in ['expavg_credit', 'total_credit']:
# sort target is invalid!
return {'success': False, 'api_key': True, 'error_message': 'Invalid sort target chosen. Pick either "expavg_credit" or "total_credit"', 'took': float(hug_timer)}
if project in supported_project_names:
# Match - return the contents!
# TODO: dict key sort based on RAC | TotalCredit! Allow the user to specify?
unordered_results = return_json_file_contents("./STATS_DUMP/"+str(project)+".json")
ordered_results = sorted(unordered_results, key=itemgetter(str(sort_target)), reverse=True) # Sort descending
return {'success': True, 'result': ordered_results[:top_k], 'api_key': True, 'error_message': '', 'took': float(hug_timer)}
else:
# No match - return a rejection.
return {'success': False, 'api_key': True, 'error_message': 'Invalid project name! Pick from: "{}"'.format(supported_project_names), 'took': float(hug_timer)}
else:
# Invalid API key!
return {'success': False, 'api_key': False, 'error_message': 'Invalid API key!', 'took': float(hug_timer)}
|
import numpy as np
import unittest
import pytest
from desc.grid import LinearGrid, Grid
from desc.coils import CoilSet, FourierRZCoil, FourierXYZCoil, FourierPlanarCoil
from desc.geometry import FourierRZCurve
class TestCoil(unittest.TestCase):
def test_biot_savart(self):
R = 2
y = 1
I = 1
By_true = 1e-7 * 2 * np.pi * R ** 2 * I / (y ** 2 + R ** 2) ** (3 / 2)
B_true = np.array([0, By_true, 0])
coil = FourierXYZCoil(I)
coil.grid = LinearGrid(N=100, endpoint=True)
assert coil.grid.num_nodes == 100
B_approx = coil.compute_magnetic_field(Grid([[10, y, 0]]), basis="xyz")[0]
np.testing.assert_allclose(B_true, B_approx, rtol=1e-3, atol=1e-10)
def test_properties(self):
current = 4.34
coil = FourierPlanarCoil(current)
assert coil.current == current
new_current = 3.5
coil.current = new_current
assert coil.current == new_current
class TestCoilSet(unittest.TestCase):
def test_linspaced_linear(self):
"""field from straight solenoid"""
R = 10
z = np.linspace(0, 10, 10)
I = 1
Bz_true = np.sum(1e-7 * 2 * np.pi * R ** 2 * I / (z ** 2 + R ** 2) ** (3 / 2))
B_true = np.array([0, 0, Bz_true])
coil = FourierRZCoil(0.1)
coils = CoilSet.linspaced_linear(
coil, displacement=[0, 0, 10], n=10, endpoint=True
)
coils.current = I
np.testing.assert_allclose(coils.current, I)
coils.grid = 100
assert coils.grid.num_nodes == 100
B_approx = coils.compute_magnetic_field([0, 0, z[-1]], basis="xyz")[0]
np.testing.assert_allclose(B_true, B_approx, rtol=1e-3, atol=1e-10)
def test_linspaced_angular(self):
"""field from uniform toroidal solenoid"""
R = 10
N = 50
I = 1
Bp_true = np.sum(1e-7 * 4 * np.pi * N * I / 2 / np.pi / R)
B_true = np.array([0, Bp_true, 0])
coil = FourierPlanarCoil()
coil.current = I
coils = CoilSet.linspaced_angular(coil, n=N)
coils.grid = 100
assert all([coil.grid.num_nodes == 100 for coil in coils])
B_approx = coils.compute_magnetic_field([10, 0, 0], basis="rpz")[0]
np.testing.assert_allclose(B_true, B_approx, rtol=1e-3, atol=1e-10)
def test_from_symmetry(self):
"""same as above, but different construction"""
R = 10
N = 48
I = 1
Bp_true = np.sum(1e-7 * 4 * np.pi * N * I / 2 / np.pi / R)
B_true = np.array([0, Bp_true, 0])
coil = FourierPlanarCoil()
coils = CoilSet.linspaced_angular(coil, angle=np.pi / 2, n=N // 4)
coils = CoilSet.from_symmetry(coils, NFP=4)
coils.grid = 100
assert all([coil.grid.num_nodes == 100 for coil in coils])
B_approx = coils.compute_magnetic_field([10, 0, 0], basis="rpz")[0]
np.testing.assert_allclose(B_true, B_approx, rtol=1e-3, atol=1e-10)
# with stellarator symmetry
NFP = 4
coil = FourierXYZCoil()
coil.rotate(angle=np.pi / N)
coils = CoilSet.linspaced_angular(
coil, I, [0, 0, 1], np.pi / NFP, N // NFP // 2
)
coils.grid = 100
assert coils.grid.num_nodes == 100
coils2 = CoilSet.from_symmetry(coils, NFP, True)
B_approx = coils2.compute_magnetic_field([10, 0, 0], basis="rpz")[0]
np.testing.assert_allclose(B_true, B_approx, rtol=1e-3, atol=1e-10)
def test_properties(self):
coil = FourierPlanarCoil()
coils = CoilSet.linspaced_linear(coil, n=4)
coils.grid = np.array([[0.0, 0.0, 0.0]])
np.testing.assert_allclose(
coils.compute_coordinates(),
np.array(
[
[12, 0, 0],
[12.5, 0, 0],
[13, 0, 0],
[13.5, 0, 0],
]
).reshape((4, 1, 3)),
)
np.testing.assert_allclose(coils.compute_curvature(), 1 / 2)
np.testing.assert_allclose(coils.compute_torsion(), 0)
TNB = coils.compute_frenet_frame(grid=np.array([[0.0, 0.0, 0.0]]), basis="xyz")
T = [foo[0] for foo in TNB]
N = [foo[1] for foo in TNB]
B = [foo[2] for foo in TNB]
np.testing.assert_allclose(
T,
np.array(
[
[0, 0, -1],
[0, 0, -1],
[0, 0, -1],
[0, 0, -1],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
np.testing.assert_allclose(
N,
np.array(
[
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
[-1, 0, 0],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
np.testing.assert_allclose(
B,
np.array(
[
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
coils.grid = 100
np.testing.assert_allclose(coils.compute_length(), 2 * 2 * np.pi)
coils.translate([1, 1, 1])
np.testing.assert_allclose(coils.compute_length(), 2 * 2 * np.pi)
coils.flip([1, 0, 0])
coils.grid = np.array([[0.0, 0.0, 0.0]])
TNB = coils.compute_frenet_frame(grid=np.array([[0.0, 0.0, 0.0]]), basis="xyz")
T = [foo[0] for foo in TNB]
N = [foo[1] for foo in TNB]
B = [foo[2] for foo in TNB]
np.testing.assert_allclose(
T,
np.array(
[
[0, 0, -1],
[0, 0, -1],
[0, 0, -1],
[0, 0, -1],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
np.testing.assert_allclose(
N,
np.array(
[
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
np.testing.assert_allclose(
B,
np.array(
[
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
]
).reshape((4, 1, 3)),
atol=1e-12,
)
def test_dunder_methods(self):
coil1 = FourierXYZCoil()
coils1 = CoilSet.from_symmetry(coil1, NFP=4)
coil2 = FourierPlanarCoil()
coils2 = coils1 + [coil2]
assert coils2[-1] is coil2
coils2 = coils1 + CoilSet(coil2)
assert coils2[-1] is coil2
with pytest.raises(TypeError):
coils3 = coils1 + FourierRZCurve()
with pytest.raises(TypeError):
coils1[-1] = FourierRZCurve()
coils1[-1] = coil2
assert coils1[-1] is coil2
coils1.insert(-1, coil2)
with pytest.raises(TypeError):
coils1.insert(-1, FourierRZCurve())
assert len(coils1) == 5
assert coils1[-1] is coil2
assert coils1[-2] is coil2
s = coils1[-2:]
assert s[-1] is coil2
del coils1[-2]
assert len(coils1) == 4
assert coils1[-1] is coil2
assert coils1[-2][0].__class__ is coil1.__class__
def test_repr():
coil = FourierRZCoil()
assert "FourierRZCoil" in str(coil)
assert "current=1" in str(coil)
coils = CoilSet.linspaced_angular(coil, n=4)
assert "CoilSet" in str(coils)
assert "4 submembers" in str(coils)
coils.name = "MyCoils"
assert "MyCoils" in str(coils)
|
# -*- coding: utf-8 -*-
__author__ = """Biagio Chirico"""
__email__ = 'biagio.chirico.86@gmail.com'
__version__ = '0.1.0'
from pyscholar import Query, Crawler, Article, Parser
|
def color_add():
pass
def color_delete():
pass
def new():
pass
|
import os
from time import time, sleep
from queue import Queue
from pathlib import Path
from multiprocessing import Process
from threading import Lock, Thread
from tempfile import mkstemp
from subprocess import run
from datetime import datetime
from json import dumps
from bisect import insort
from monitor_gui import CompressionMonitor
def run_compression(in_paths, video_path, metadata_path, fps, delete_images,
pix_fmt):
"""Run MP4 compression by calling FFmpeg.
Parameters
----------
in_paths : list of pathlib.Path
List of source images in the correct order.
video_path : pathlib.Path
Path to which the output video should be stored.
metadata_path : pathlib.Path
Path to which the newline-separated list of source image files
should be stored.
fps : float
Frames per second to be written to the video file's metadata.
delete_images : bool
Whether or not source images should be deleted afterward.
pix_fmt : str
-pix_fmt flag used for the output file in the FFmpeg call.
"""
# Build temporary file containing list of images
# see https://trac.ffmpeg.org/wiki/Concatenate for specs
f, filelist_path = mkstemp(suffix='.txt', text=True)
os.close(f)
print(f' >>> running compression: {video_path.name}')
lines = [f"file '{str(x.absolute())}'" for x in in_paths]
with open(filelist_path, 'w') as f:
f.write('\n'.join(lines))
# Call FFmpeg
run(['ffmpeg', '-r', str(fps), '-loglevel', 'error',
'-f', 'concat', '-safe', '0', '-i', str(filelist_path),
'-pix_fmt', pix_fmt, str(video_path)], check=True)
# Write matadata
with open(metadata_path, 'w') as f:
lines = [str(x.absolute()) for x in in_paths]
f.write('\n'.join(lines))
# Clean up
if delete_images:
for img_file in in_paths:
img_file.unlink()
Path(filelist_path).unlink()
def _guarded_print(lock, *args, **kwargs):
lock.acquire()
print(*args, **kwargs)
lock.release()
def _thread_worker(thread_id, cmpr):
"""Entry point for the worker threads"""
while True:
task_spec = cmpr.job_queue.get()
if task_spec is None:
break
cam, args = task_spec
_guarded_print(
cmpr.log_lock,
f'>>> Thread {thread_id} working on {str(args[1].name)}'
)
cmpr.status_lock.acquire()
cmpr.busy_workers += 1
cmpr.status_lock.release()
# Actual invocation
start_time = time()
run_compression(*args)
end_time = time()
# Increment part count
cmpr.status_lock.acquire()
cmpr.parts_done[cam] += 1
cmpr.busy_workers -= 1
cmpr.last_job_walltime = end_time - start_time
cmpr.status_lock.release()
cmpr.job_queue.task_done()
def _scan_files(cmpr):
"""Entry point for the scanner thread"""
while True:
cmpr.status_lock.acquire()
all_done = cmpr.all_done
cmpr.status_lock.release()
if all_done:
break
cmpr.scan_lock.acquire()
# Detect newly available files
cmpr.add_new_files_to_pending_sets()
# Move pending frames
for cam in range(cmpr.num_cams):
while len(cmpr.pending_frames[cam]) >= cmpr.frames_per_video:
chunk = cmpr.pending_frames[cam].pop_chunk(
cmpr.frames_per_video)
cmpr.add_chunk_to_queue(cam, chunk)
cmpr.scan_lock.release()
sleep(cmpr.refresh_interval_secs)
def _log_status(cmpr):
"""Entry point for the logger thread"""
while True:
cmpr.status_lock.acquire()
all_done = cmpr.all_done
cmpr.status_lock.release()
if all_done:
with open(cmpr.log_path, 'a+') as f:
f.write('ALL DONE\n')
break
curr_time = time()
# get all stats
cmpr.status_lock.acquire()
status = {
'timestamp': str(datetime.now()),
'refresh_interval': cmpr.refresh_interval_secs,
'num_cams': cmpr.num_cams,
'fps': cmpr.fps,
'nprocs': cmpr.num_procs,
'nprocs_running': cmpr.busy_workers,
'qsize': cmpr.job_queue.qsize(),
'last_job_walltime': cmpr.last_job_walltime,
'nvids_made': sum(cmpr.parts_done),
'frames_pending': [len(cmpr.pending_frames[cam])
for cam in range(cmpr.num_cams)]
}
cmpr.status_lock.release()
# write to file
with open(cmpr.log_path, 'a+') as f:
f.write(dumps(status) + '\n')
sleep(curr_time + cmpr.log_interval_secs - time())
class DanglingFrames:
def __init__(self, camera, data_dir):
self.camera = camera
self.data_dir = data_dir
self.frame_ids = []
self.frame_ids_set = set() # same as frame_ids, for easier search
self.discard_frames_before = 0
def __len__(self):
return len(self.frame_ids_set)
def scan(self):
files = (self.data_dir / 'images').glob(f'camera_{self.camera}*.jpg')
for img_path in files:
frame = int(img_path.name.replace('.jpg', '').split('_')[-1])
if frame < self.discard_frames_before:
# arrived way too late, discard
continue
if frame not in self.frame_ids_set:
insort(self.frame_ids, frame)
self.frame_ids_set.add(frame)
def pop_chunk(self, chunk_size, block_time_secs=1):
if chunk_size == -1:
chunk_size = len(self)
if not chunk_size:
return []
if ((len(self) < chunk_size) or
(self.frame_ids[chunk_size - 1] - self.frame_ids[0]
>= chunk_size)):
# there are frames that haven't arrived yet, wait a bit
sleep(block_time_secs)
self.scan()
chunk_ids = self.frame_ids[:chunk_size]
self.frame_ids = self.frame_ids[chunk_size:]
self.frame_ids_set -= set(chunk_ids)
self.discard_frames_before = chunk_ids[-1] + 1
return [self.data_dir / f'images/camera_{self.camera}_img_{x}.jpg'
for x in chunk_ids]
class Mp4Compressor:
"""Manager of the background MP4 compression of camera data. One
``Mp4Compressor`` object should be associated with each recording.
The user specifies, among other misc information, the intended
length of each video part. This manager, in turn, binds the JPEGs
into an MP4 every time a sufficient number of frames is available.
The videos can be generated in parallel, allowing the user to
generate videos using multiple CPU cores when recording with many
cameras at high frame rates. Each compressor also spawns a simple
GUI monior.
Example
-------
The external API of the compressor consists of the ``.start()``
and ``.stop()`` methods.
>>> compressor = Mp4Compressor(...)
>>> compressor.start() # this returns immediately
>>> sleep(100) # compression runs in background
>>> compressor.stop() # this blocks briefly
Implementation
--------------
There are two key data structures in this class: ``pending_frames``
and ``job_queue``. The "lifecycle" of data is as follows:
- Calculate the intended number of frames per video part `N`
(from the intended video part length and the FPS).
- Once the monitor that a camera has written a frame to the file
system, the frame is initially registered at ``pending_frames``.
- The frame stays there until there are `N` frames from the camera
in question in ``pending_frames``.
- The earliest `N` frames are then removed from ``pending_frames``.
A compression task (containing the list of input frames, output
video path, etc) is specified for this batch of frames. The job
spec is added to ``job_queue``.
- Worker threads monitor the ``job_queue``, and execute the
compression jobs that are in the queue.
Each ``Mp4Compressor`` object manages M + 2 threads and up to M + 1
processes (`M` being ``num_procs``):
- Scanner thread (x1): Scans the file system periodically and add
newly arrived frames to ``pending_frames``. If ``pending_frames``
is long enough, make job specs and push the specs to
``job_queue``. Note that since this thread doesn't do any actual
heavy-lifting, it should never block.
- Worker threads (xM): Run the compression jobs posted on
``job_queue`` through systems calls to ``ffmpeg``, which are
managed by ``subprocess.run``.
- Logger thread (x1): Writes the current stats of the compressor
to a log file, which can then be read from the GUI.
- GUI process (x1): Displays the current status. It has to be a
sepearate process becasue Tk doesn't support multithreading.
- FFmpeg processes (x up to M): Spawned by worker threads.
The log is a text file where each line is either:
- a JSON string containing the status of the compressor (including
a timestamp), OR
- the literal string ``"ALL DONE"``, signaling termination.
"""
def __init__(self, fps, data_dir, num_cams, log_path,
delete_images=True, pix_fmt='yuv420p', video_length_secs=300,
refresh_interval_secs=1, num_procs=4, log_interval_secs=1):
# Initialize static properties
self.fps = fps
self.data_dir = data_dir
self.num_cams = num_cams
self.delete_images = delete_images
self.pix_fmt = pix_fmt
self.video_length_secs = video_length_secs
self.refresh_interval_secs = refresh_interval_secs
self.frames_per_video = int(fps * video_length_secs)
self.num_procs = num_procs
self.log_path = log_path
self.log_interval_secs = log_interval_secs
# Initialize dynamic data structures and variables
self.pending_frames = [DanglingFrames(cam, data_dir)
for cam in range(num_cams)]
self.job_queue = Queue()
self.parts_done = [0] * num_cams
self.parts_enqueued = [0] * num_cams
self.latest_frame_per_cam = [-1] * num_cams
self.busy_workers = 0
self.last_job_walltime = None
self.all_done = False
# Initialize parallelization setup
self.status_lock = Lock()
self.log_lock = Lock()
self.scan_lock = Lock()
self.worker_threads = [
Thread(target=_thread_worker, args=(i, self), daemon=True)
for i in range(num_procs)
]
self.scanner_thread = Thread(target=_scan_files, args=(self,),
daemon=True)
self.logger_thread = Thread(target=_log_status, args=(self,),
daemon=True)
def start(self):
for thread in self.worker_threads:
thread.start()
self.scanner_thread.start()
self.logger_thread.start()
def stop(self):
# ASSUME any delay in IO (eg camera grab/save) would be within 1s
sleep(1)
# Add remaining frames to queue
self.scan_lock.acquire()
self.add_new_files_to_pending_sets()
for cam in range(self.num_cams):
chunk = self.pending_frames[cam].pop_chunk(-1)
if chunk:
self.add_chunk_to_queue(cam, chunk)
self.scan_lock.release()
# Now we just wait for everything to finish. This is blocking.
self.job_queue.join()
self.status_lock.acquire()
self.all_done = True
self.status_lock.release()
self.scanner_thread.join()
for _ in range(self.num_procs):
self.job_queue.put(None)
for thread in self.worker_threads:
thread.join()
self.logger_thread.join()
print('>>> All done!')
def add_chunk_to_queue(self, cam, chunk):
video_path = (self.data_dir / 'videos'
/ f'camera_{cam}_part_{self.parts_enqueued[cam]}.mp4')
self.parts_enqueued[cam] += 1
metadata_path = Path(str(video_path).replace('.mp4', '.list'))
args = (chunk, video_path, metadata_path, self.fps,
self.delete_images, self.pix_fmt)
self.job_queue.put((cam, args))
def add_new_files_to_pending_sets(self):
for frames in self.pending_frames:
frames.scan()
def init_monitor_gui(log_path):
monitor = CompressionMonitor(log_path)
monitor.mainloop()
if __name__ == '__main__':
"""The intent to create/destroy ``Mp4Compressor`` objects is
communicated through a named pipe (via a FIFO file) from the C++
recording GUI. Each request takes one line (ends with \\n). The
protocol is as follows:
- ``"START,<data_dir>,<fps>,<num_cams>"``
This literal string (where the base data directory, FPS, number
of cameras are substituted accordingly) signals the initiation
of a compressor.
- ``"STOP,<data_dir>"``
This signals the termination of a compressor.
- ``"EXIT"``
This signals the termination of the program.
"""
FIFO_PATH = '/tmp/mp4CompressorComm.fifo'
if os.path.exists(FIFO_PATH):
os.unlink(FIFO_PATH)
if not os.path.exists(FIFO_PATH):
os.mkfifo(FIFO_PATH)
compressors = {}
monitor_processes = {}
fifo = open(FIFO_PATH, 'r')
while True:
request = fifo.readline().strip()
if not request:
continue
print(f'>>> Got request: "{request}"')
cmd = request.split(',')[0]
if cmd == 'START':
# Parse command
cmd, data_dir, fps, num_cams, video_length = request.split(',')
fps = float(fps)
data_dir = Path(data_dir)
num_cams = int(num_cams)
video_length = int(video_length)
(data_dir / 'videos').mkdir(exist_ok=True)
log_path = data_dir / 'compression_log.txt'
compressors[str(data_dir)] = Mp4Compressor(
fps, data_dir, num_cams, log_path,
num_procs=7, delete_images=False,
video_length_secs=video_length
)
compressors[str(data_dir)].start()
monitor_processes[str(data_dir)] = Process(
target=init_monitor_gui, args=(log_path,), daemon=True
)
monitor_processes[str(data_dir)].start()
elif cmd == 'STOP':
# Parse command
cmd, data_dir = request.split(',')
compressors[data_dir].stop()
monitor_processes[data_dir].join()
elif cmd == 'EXIT':
break
else:
raise ValueError(f'Unrecognized command "{cmd}" from FIFO file')
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnabla_rl.builders.explorer_builder import ExplorerBuilder # noqa
from nnabla_rl.builders.model_builder import ModelBuilder # noqa
from nnabla_rl.builders.preprocessor_builder import PreprocessorBuilder # noqa
from nnabla_rl.builders.replay_buffer_builder import ReplayBufferBuilder # noqa
from nnabla_rl.builders.solver_builder import SolverBuilder # noqa
|
''' This is the main module that runs the api resources & models '''
''' Its an improvement of REST_api_with_DB.py '''
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.users import UserRegistration, UserCheck
from resources.items import Item, Items
from resources.stores import Store, Stores
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SQLALCHEMY_TRACK_MODIFICATION'] = False #Turns off the flask_sqlalchemy obj tracker and leaves the sqlalchemy on
app.secret_key = 'myveryfirstjwtsecuredapiwihadb'
api = Api(app)
''' Using a flask decorator to ensure the db creation function runs before any
request to the app '''
@app.before_first_request
def setup_sqlite_dB():
db_obj.create_all()
jwt = JWT(app, authenticate, identity)
api.add_resource(Item, '/item/<string:name>')
api.add_resource(Items, '/items')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(Stores, '/stores')
api.add_resource(UserRegistration, '/user_reg')
api.add_resource(UserCheck, '/user/<string:name>')
if __name__ == '__main__':
from sqlalchemy_init import db_obj #Import here to avoid circular imports
db_obj.init_app(app)
app.run(debug=True)
|
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import sys
import jpype
import common
class ComparableTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testComparable(self):
a = jpype.java.time.Instant.ofEpochSecond(10000000)
b = jpype.java.time.Instant.ofEpochSecond(10000001)
self.assertFalse(a < a)
self.assertFalse(a > a)
self.assertTrue(a >= a)
self.assertTrue(a <= a)
self.assertTrue(a == a)
self.assertFalse(a != a)
self.assertTrue(a < b)
self.assertFalse(a > b)
self.assertFalse(a >= b)
self.assertTrue(a <= b)
self.assertFalse(a == b)
self.assertTrue(a != b)
def testComparableHash(self):
i = jpype.java.math.BigInteger("1000000000000")
self.assertIsInstance(hash(i), int)
def testComparableNull(self):
Instant = jpype.JClass("java.time.Instant")
i1 = Instant.parse("1970-01-01T00:00:00Z")
i3 = jpype.JObject(None, Instant)
self.assertTrue(i1 == i1)
self.assertFalse(i1 == i3)
self.assertFalse(i3 == i1)
self.assertTrue(i1 != i3)
self.assertTrue(i3 != i1)
with self.assertRaises(ValueError):
print(i1 < i3)
with self.assertRaises(ValueError):
print(i1 <= i3)
with self.assertRaises(ValueError):
print(i1 > i3)
with self.assertRaises(ValueError):
print(i1 >= i3)
with self.assertRaises(ValueError):
print(i3 < i1)
with self.assertRaises(ValueError):
print(i3 <= i1)
with self.assertRaises(ValueError):
print(i3 > i1)
with self.assertRaises(ValueError):
print(i3 >= i1)
with self.assertRaises(ValueError):
print(i3 < i3)
with self.assertRaises(ValueError):
print(i3 <= i3)
with self.assertRaises(ValueError):
print(i3 > i3)
with self.assertRaises(ValueError):
print(i3 >= i3)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from tournament.models import Tournament
class User(AbstractUser):
is_tournament_manager = models.BooleanField(default=False)
managed_tournaments = models.ManyToManyField(Tournament, blank=True)
|
# pacotes essenciais da V1 da API
from rest_framework import generics
from rest_framework.generics import get_object_or_404
from .models import Curso, Avaliacao
from .serializers import CursoSerializer, AvaliacaoSerializer
# pacotes essenciais da V2 da API
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import mixins
from rest_framework import permissions
from .permissions import IsSuperUser
# Versão 1
# ----------------------------------------------------------------------------------------------------
class CursosAPIView(generics.ListCreateAPIView):
queryset = Curso.objects.all()
serializer_class = CursoSerializer
class CursoAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Curso.objects.all()
serializer_class = CursoSerializer
class AvaliacoesAPIView(generics.ListCreateAPIView):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer
def get_queryset(self):
if self.kwargs.get("curso_pk"):
return self.queryset.filter(curso_id=self.kwargs.get("curso_pk"))
return self.queryset.all()
class AvaliacaoAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer
def get_object(self):
if self.kwargs.get("curso_pk"):
return get_object_or_404(self.get_queryset(),
curso_id=self.kwargs.get("curso_id"),
pk=self.kwargs.get("avaliacao_pk"))
return get_object_or_404(self.get_queryset(), pk=self.kwargs.get("avaliacao_pk"))
# Versão 2
# ----------------------------------------------------------------------------------------------------
class CursoViewSet(viewsets.ModelViewSet):
permission_classes = (IsSuperUser, permissions.DjangoModelPermissions, )
queryset = Curso.objects.all()
serializer_class = CursoSerializer
@action(detail=True, methods=['get'])
def avaliacoes(self, request, pk=None):
self.pagination_class.page_size = 1
avaliacoes = Avaliacao.objects.filter(curso_id=pk)
page = self.paginate_queryset(avaliacoes)
if page is not None:
serializer = AvaliacaoSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = AvaliacaoSerializer(avaliacoes, many=True)
return Response(serializer.data)
'''
comentado antigo código:
class AvaliacaoViewSet(viewsets.ModelViewSet):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer
'''
class AvaliacaoViewSet(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer
# ----------------------------------------------------------------------------------------------------
|
import os
import django
from celery import Celery, shared_task
from celery.schedules import crontab
from celery.signals import task_success
from django.conf import settings
from django.utils import timezone
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_celery_demo.settings')
django.setup()
app = Celery('django_celery_demo')
app.config_from_object('django.conf:settings') # celery app 加载 settings中的配置
app.now = timezone.now # 设置时间时区和django一样
# 加载每个django app下的tasks.py中的task任务
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
# 这个是硬编码的定时任务
app.conf.beat_schedule = {
'aa': {
'task': 'add',
'schedule': crontab(minute="*/1"),
'args': (2, 4)
},
}
# 这个一个task
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
# 异步执行这个task
debug_task.delay()
|
# MIT License
# Copyright (c) 2017 Luca Angioloni and Francesco Pegoraro
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import (Qt, QObject, pyqtSignal, QThread)
import time
import speech_recognition as sr
from subprocess import call
from Bot import Bot
class Speech_DialogManager(QThread):
"""
Class that provides an interface for the dialog and actions components.
Attributes:
rec_on Qt signal emitted when the system is ready to listen and starts listening
rec_off Qt signal emitted when the system stops listening
updated Qt signal emitted when a new dialog line needs to be redered
finished Qt signal emitted when the transaction with the user is over
active Boolean status of the dialog process
recognizer Recogniser object (from speech_recognition package)
username current user username
products vending machine products data
bot Bot class object that manages the conversation
"""
rec_on = pyqtSignal() # in order to work it has to be defined out of the contructor
rec_off = pyqtSignal() # in order to work it has to be defined out of the contructor
updated = pyqtSignal(object, object) # in order to work it has to be defined out of the contructor
finished = pyqtSignal() # in order to work it has to be defined out of the contructor
def __init__(self):
super().__init__()
self.active = False
self.recognizer = sr.Recognizer()
self.recognizer.energy_threshold = 2000
self.username = ''
def setProdData(self, products_data):
"""
Method to set the products data
Args:
products_data the data
"""
self.products = products_data
def set_username(self, name):
"""
Method to set the current user name
Args:
name the user name
"""
self.username = name
self.bot = Bot(self.products)
self.bot.set_user_name(self.username)
def record_and_understand(self):
"""Method called to listen, record and understand what users say"""
with sr.Microphone() as source:
print("Say something!")
call(["python3", "bip.py"])
self.rec_on.emit()
audio = self.recognizer.listen(source, timeout=6.0)
print("stopped recording")
self.rec_off.emit()
# Speech recognition using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
said = self.recognizer.recognize_google(audio, language='it')
print("You said: " + said)
return said
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return 'impossibile capire'
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return 'richieste speech-to-text terminate'
def write(self):
"""Method called to input dialog sentences via terminal input (used for debugging)"""
usersays = input("Say something!")
return usersays
def loop(self):
"""Method called to initialize and start the dialog Thread"""
self.start()
def deactivate(self):
"""Method called to stop and deactivate this Thread"""
self.active = False
self.quit()
if self.isRunning():
self.quit()
def sayhi(self, greetings):
"""
Method that starts the speak script to perform text to speech.
Args:
greetings the sentence to pass to the speak script
"""
call(["python3", "speak.py", greetings])
def run(self):
"""Main loop of this Thread"""
self.active = True
greetings = "Ciao "+str(self.username)+" cosa ti serve? Parla dopo il bip."
self.updated.emit(greetings, 0)
self.sayhi(greetings)
while self.active:
user_says = self.record_and_understand() # da sostituire con record_and_understand
#user_says = self.write() # da sostituire con record_and_understand
self.updated.emit(user_says.lower(), 0)
val, reply, bill = self.bot.reply(user_says.lower())
self.updated.emit(reply, bill)
call(["python3", "speak.py", reply])
if val is None:
self.finished.emit()
self.deactivate()
else:
if val:
print("Qui usare API e fare addebito")
self.finished.emit()
self.deactivate()
|
# coding: utf-8
"""
functools モジュールのサンプルです。
functools.lru_cache() について。
"""
import functools
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------------
# lru_cache()
# ---------------------------
# デコレータとして指定した関数をラップして
# 指定された回数分のメモ化を行う。
#
# キャッシュヒットした場合は、本来の処理に入らず
# キャッシュから値を返すため、速い。
#
# 元の関数オブジェクトには __wrapped__ でアクセス可能
# 現在のキャッシュ状態は、cache_info() で確認できる
# キャッシュをクリアする場合は、cache_clear() を呼び出す
# --------------------------------------------------
pr('functools.lru_cache()', type(self.my_sum))
pr('functools.lru_cache()', self.my_sum.__wrapped__)
pr('functools.lru_cache()', self.my_sum.cache_info())
self.my_sum(1, 1) # no hit
self.my_sum(1, 2) # no hit
self.my_sum(1, 3) # no hit
self.my_sum(1, 1) # hit
pr('functools.lru_cache()', self.my_sum.cache_info())
self.my_sum(1, 2) # hit
self.my_sum(1, 3) # hit
self.my_sum(1, 4) # no hit
pr('functools.lru_cache()', self.my_sum.cache_info())
self.my_sum(1, 2) # hit
self.my_sum(1, 3) # hit
self.my_sum(1, 4) # hit
pr('functools.lru_cache()', self.my_sum.cache_info())
self.my_sum.cache_clear()
pr('functools.lru_cache()', self.my_sum.cache_info())
@functools.lru_cache(maxsize=32)
def my_sum(self, x, y):
return x + y
def go():
obj = Sample()
obj.exec()
|
# vim:set tabstop=4 shiftwidth=4 expandtab:
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.websearch
#
# ----------------------------------------------------------------------------
"""
An Enso plugin that makes the web search commands available.
Commands support search suggestions if defined.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import locale
import logging
import threading
import urllib
import urllib2
import webbrowser
from abc import ABCMeta
from contextlib import closing
from functools import partial
try:
import regex as re
except ImportError:
import re
try:
import ujson as jsonlib
except ImportError as e:
logging.warning(
"Consider installing 'ujson' library for JSON parsing performance boost.")
import json as jsonlib
import enso.config
from enso import selection
from enso.commands import CommandManager
from enso.commands.decorators import warn_overriding
from enso.commands.factories import ArbitraryPostfixFactory
from enso.commands.mixins import CommandParameterWebSuggestionsMixin
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.messages import displayMessage
from enso.utils import suppress
from enso.utils.html_tools import strip_html_tags
# Default suggestions polling interval in milliseconds (minimum allowed is 100)
SUGGESTIONS_POLLING_INTERVAL = 100
# Default maximum length of query the site accepts
#
# Google limits their search requests to 2048 bytes, so let's be
# nice and not send them anything longer than that.
#
# See this link for more information:
#
# http://code.google.com/apis/soapsearch/reference.html
MAX_QUERY_LENGTH = 2048
HTTP_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
#'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
}
# ----------------------------------------------------------------------------
# The Google command
# ---------------------------------------------------------------------------
# Here we detect national TLD imposed by Google based on user's location.
# This is used in suggestion-search.
# It offers local suggestions and also speeds up the search.
LOCAL_TLD = "com"
def _get_google_local_domain():
global LOCAL_TLD
try:
with closing(urllib2.urlopen("http://www.google.com/", timeout=4)) as resp:
# Get redirect URL
redirected_url = resp.geturl()
domain = urllib2.urlparse.urlsplit(redirected_url).netloc
LOCAL_TLD = domain[domain.index("google.") + 7:]
except Exception as e:
logging.warning("Error parsing google.com redirect TLS: %s", e)
else:
logging.info("Google local domain has been set to .%s", LOCAL_TLD)
t = threading.Thread(target=_get_google_local_domain)
t.setDaemon(True)
t.start()
@warn_overriding
class AbstractSearchCommandFactory(CommandParameterWebSuggestionsMixin, ArbitraryPostfixFactory):
"""
Implementation of the web-search command.
"""
__metaclass__ = ABCMeta
def __init__(self, command_name, suggest, polling_interval):
"""
Initializes the web-search command.
"""
assert isinstance(command_name, basestring)
assert len(command_name) > 0
super(AbstractSearchCommandFactory, self).__init__()
# super() should call the mixin init properly
# CommandParameterWebSuggestionsMixin.__init__(self)
self.command_name = command_name
self.parameter = None
self.do_suggestions = suggest
self.setSuggestionsPollingInterval(polling_interval)
@safetyNetted
def run(self):
"""
Runs the web-search command.
"""
if self.parameter is not None:
text = self.parameter.decode()
# '...' gets replaced with current selection
if "..." in text:
seldict = selection.get()
to_replace = " %s " % seldict.get(
"text", u"").strip().strip("\0")
text = text.replace("...", to_replace)
text = re.sub(r"\s+", " ", text)
text = re.sub(r"\r\n", " ", text)
text = re.sub(r"\n", " ", text)
else:
seldict = selection.get()
text = seldict.get("text", u"")
text = re.sub(r"\s+", " ", text)
text = text.strip().strip("\0")
if not text:
displayMessage("<p>No text was selected.</p>")
return
if len(text) > MAX_QUERY_LENGTH:
displayMessage("<p>Your query is too long.</p>")
return
# For compatibility with older core, use default locale if setting
# is not used in the config...
languageCode, _ = locale.getdefaultlocale()
if languageCode:
language = languageCode.split("_")[0]
else:
language = "en"
# The following is standard convention for transmitting
# unicode through a URL.
text = urllib.quote_plus(text.encode("utf-8"))
url = self.BASE_URL % {
"local_tld": LOCAL_TLD, # Used just for Google services
"langcode": language,
"query": text,
}
# Catch exception, because webbrowser.open sometimes raises exception
# without any reason
try:
webbrowser.open_new_tab(url)
except Exception as e:
logging.warning(e)
def onSuggestQueryError(self, url_or_request, exception):
pass
def _generateCommandObj(self, parameter=None):
self.parameter = parameter
if self.parameter is not None:
self.setDescription(
u"Performs %s search for \u201c%s\u201d."
% (self.command_name, self.parameter)
)
else:
self.setDescription(
u"Performs %s search on the selected or typed text." % (
self.command_name)
)
return self
class ConfigurableSearchCommandFactory(AbstractSearchCommandFactory):
remove_google_jsonp_wrapper = partial(
re.compile(r"^(?:window.google.ac.h\()?(.*?)\)?$").sub,
r"\1"
)
def __init__(
self,
command_name,
command_prefix,
help_text,
base_url,
suggest,
suggestions_url,
parser,
is_json,
minimum_chars,
polling_interval):
super(ConfigurableSearchCommandFactory, self).__init__(
command_name, suggest, polling_interval)
self.HELP_TEXT = help_text
self.PREFIX = command_prefix
self.NAME = "%s{%s}" % (command_prefix, help_text)
self.BASE_URL = base_url
self.suggestions_url = suggestions_url
self.parser = parser
self.is_json = is_json
self.minimum_chars = max(1, minimum_chars)
self.setCacheId(
"ConfigurableSearch%s" % re.sub(
r"[^A-Za-z0-9]", "", command_prefix.strip()
).title()
)
def getSuggestionsUrl(self, text):
if not self.do_suggestions:
return None
if text is None or len(text.strip()) < self.minimum_chars:
return None
charset = "utf-8"
query = urllib.quote_plus(text.encode(charset))
# For compatibility with older core, use default locale if setting
# is not used in the config...
languageCode, _ = locale.getdefaultlocale()
if languageCode:
language = languageCode.split("_")[0]
else:
language = "en"
url = self.suggestions_url % {
"query": query,
"charset": charset,
"local_tld": LOCAL_TLD,
"langcode": language,
}
request = urllib2.Request(url, headers=HTTP_HEADERS)
return request
def decodeSuggestions(self, data, headers=None):
suggestions = []
charset = "utf-8"
if headers:
with suppress(Exception):
content_type = headers.get(
"Content-Type", headers.get("content-type", "")).lower()
if content_type and "charset=" in content_type:
charset = content_type.split("charset=")[-1]
try:
decoded = data.decode(charset)
except Exception as e:
logging.error(
"%s-suggest query unicode decoding failed: %s", self.name, e)
else:
try:
# Optionally remove JSONP function wrapper (google searches)
decoded = self.remove_google_jsonp_wrapper(decoded)
json = jsonlib.loads(decoded)
except Exception as e:
logging.error(
u"Error parsing JSON data: %s; data: '%s'", e, decoded)
else:
if json:
suggestions = [strip_html_tags(s) for s in self.parser(json)][:10] # Limit number of suggestions
return suggestions
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
PLUGIN_CONFIG_PREFIX = "PLUGIN_WEBSEARCH"
RE_PLUGIN_CONFIG = re.compile(r"^%s_([a-zA-Z0-9]+)" % PLUGIN_CONFIG_PREFIX)
for plugin_name in (RE_PLUGIN_CONFIG.sub(r"\1", e) for e in dir(enso.config) if RE_PLUGIN_CONFIG.match(e)):
config_key = "%s_%s" % (PLUGIN_CONFIG_PREFIX, plugin_name)
try:
conf = getattr(enso.config, config_key)
command = ConfigurableSearchCommandFactory(
command_name=conf["name"],
command_prefix=conf["prefix"],
help_text=conf["argument"],
base_url=conf["base_url"],
suggest=conf["suggest"],
suggestions_url=conf["suggestions_url"],
parser=conf["result_parser"],
is_json=conf["is_json"],
minimum_chars=conf.get(
"minimum_chars", 1),
polling_interval=conf.get(
"polling_interval", SUGGESTIONS_POLLING_INTERVAL),
)
CommandManager.get().registerCommand(
command.NAME,
command
)
if not conf["suggest"]:
logging.info(
"%s command search-suggestions are turned off. "
"To turn it on, modify 'suggest' parameter of %s entry "
"in your .ensorc configuration file." %
(conf["name"], config_key)
)
except Exception as e:
logging.error(
"Error parsing/registering websearch command from enso.config: %s; %s",
config_key, e
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.