content
stringlengths 5
1.05M
|
|---|
import os
from signal import pause
from pirotohomie.logging import setup_logging
import click
import yaml
def get_config_form_file(filename='config.yaml'):
if not os.path.isfile(filename):
raise ValueError('Config file %r does not exist!' % filename)
with open(filename, 'r') as f:
return yaml.safe_load(f.read())
@click.command()
@click.option('--log', '-l', type=str, help=u'log config')
@click.argument('config', type=click.Path(exists=True))
def main(config, log):
setup_logging(log)
config = get_config_form_file(config)
pause()
# except (KeyboardInterrupt, SystemExit):
# print("Quitting.")
|
import json
import requests
shashlik = input("Вставь хеш\n>> ")
i = 0
while True:
r1 = requests.get("http://www.multiliker.com/service/view/?hash=" + shashlik)
middle = (json.loads(r1.text))["picture"]
print(middle)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = (
"hash="
+ shashlik
+ "&media="
+ middle
+ "&answer=%7B%22status%22%3A%20%22ok%22%7D"
)
r = requests.post(
"http://www.multiliker.com/service/like2/", data=payload, headers=headers
)
print(
"Status:" + json.loads(r.text)["result"] + "\n" + json.loads(r.text)["message"]
)
print("Кол-во балласов: " + str(json.loads(r.text)["customer"]["coins"]))
|
from django import forms
from .models import Profile
class LoginForm(forms.Form):
username = forms.CharField(max_length=100)
password = forms.CharField(max_length=100,widget=forms.PasswordInput)
class Resetform(forms.Form):
password = forms.CharField(max_length=100, widget=forms.PasswordInput)
confirm_password = forms.CharField(max_length=100, widget=forms.PasswordInput)
# def is_valid(self):
# self.cleaned_data
# return True
from django.core.exceptions import ValidationError
class ProfileForm(forms.Form):
first_name=forms.CharField(max_length=200,initial = "first name")
last_name=forms.CharField(max_length=200 ,initial = "last name")
# profile_pic = forms.FileField(max_length=200,allow_empty_file=True)
# birth_date=forms.DateField()
mobile_number=forms.IntegerField(initial = 242342342)
address=forms.CharField(max_length=500 ,initial = "your address")
country=forms.CharField(max_length=200 ,initial = "india")
#
# class Meta:
# model=Profile
# # fields=('first_name','last_name','mobile_number','address','country',)
# fields=('mobile_number','address','country',)
# # fields=('first_name','last_name','profile_pic','birth_date','mobile_number','address','country',)
#
# def clean(self):
# super(ProfileForm,self).clean()
# country=self.cleaned_data.get('country')
# if "india" not in country:
# raise ValidationError("Indian citizens ae only allowed")
# return country
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for training GNN models."""
import numpy as np
import sonnet.v2 as snt
import tensorflow.compat.v2 as tf
from absl.testing import absltest, parameterized
from graph_attribution import experiments, featurization
from graph_attribution import graphnet_models as gnn_models
from graph_attribution import graphs as graph_utils
from graph_attribution import templates, training
class TrainingTests(parameterized.TestCase):
"""Basic tests for training a model."""
def _setup_graphs_labels(self, n_graphs):
"""Setup graphs and labels for a binary classification learning task."""
tensorizer = featurization.MolTensorizer()
smiles_pool = ['CO', 'CCC', 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C', 'CCCO']
smiles = np.random.choice(smiles_pool, n_graphs)
graphs = graph_utils.smiles_to_graphs_tuple(smiles, tensorizer)
n_labels = len(graphs.nodes) if n_graphs == 1 else n_graphs
labels = np.random.choice([0, 1], n_labels).reshape(-1, 1)
return graphs, labels
def _setup_model(self, n_graphs):
target_type = templates.TargetType.globals if n_graphs > 1 else templates.TargetType.nodes
model = experiments.GNN(10, 10, 10, 1, gnn_models.BlockType('gcn'), 'relu',
target_type, 3)
return model
@parameterized.named_parameters(('constant', 1024, 256, 4),
('droplast', 1000, 256, 3))
def test_get_batch_indices(self, n, batch_size, expected_n_batches):
batch_indices = training.get_batch_indices(n, batch_size)
self.assertEqual(batch_indices.shape, (expected_n_batches, batch_size))
@parameterized.parameters([0.2, 1.0])
def test_augment_binary_task(self, fraction):
"""Check that data augmention sizes are correct."""
initial_n = 10
x, y = self._setup_graphs_labels(initial_n)
node_vec = np.zeros_like(x.nodes[0])
edge_vec = np.zeros_like(x.edges[0])
initial_positive = int(np.sum(y == 1))
aug_n = int(np.floor(fraction * initial_positive))
expected_n = initial_n + aug_n * 2
x_aug, y_aug = training.augment_binary_task(x, y, node_vec, edge_vec,
fraction)
self.assertEqual(graph_utils.get_num_graphs(x_aug), expected_n)
self.assertLen(y_aug, expected_n)
# Make sure half of the augmented examples are positive labels.
aug_positive = np.sum(y_aug == 1) - initial_positive
self.assertEqual(aug_positive, aug_n)
@parameterized.named_parameters(('onegraph', 1),
('minibatch', 25))
def test_make_tf_opt_epoch_fn(self, batch_size):
"""Make sure tf-optimized epoch gives a valid loss."""
x, y = self._setup_graphs_labels(batch_size)
model = self._setup_model(batch_size)
opt = snt.optimizers.Adam()
loss_fn = tf.keras.losses.BinaryCrossentropy()
opt_fn = training.make_tf_opt_epoch_fn(x, y, batch_size, model, opt,
loss_fn)
loss = opt_fn(x, y).numpy()
self.assertTrue(np.isfinite(loss))
if __name__ == '__main__':
tf.config.experimental_run_functions_eagerly(True)
absltest.main()
|
import pytest
from featureflags.evaluations.clause import Clause
from featureflags.evaluations.constants import (CONTAINS_OPERATOR,
ENDS_WITH_OPERATOR,
EQUAL_OPERATOR,
EQUAL_SENSITIVE_OPERATOR,
GT_OPERATOR, IN_OPERATOR,
STARTS_WITH_OPERATOR)
from featureflags.ftypes import String
@pytest.mark.parametrize(
"op",
[
None,
"NOT_FOUND",
],
)
def test_evaluate_op(op):
clause = Clause(
attribute="email", id="", negate=False, op=op, values=["john@doe.com"]
)
got = clause.evaluate(None, None, String("john@doe.com"))
assert got is False
@pytest.mark.parametrize(
"op,method,expected",
[
(IN_OPERATOR, "in_list", True),
(EQUAL_OPERATOR, "equal", True),
(GT_OPERATOR, "greater_than", False),
(STARTS_WITH_OPERATOR, "starts_with", True),
(ENDS_WITH_OPERATOR, "ends_with", True),
(CONTAINS_OPERATOR, "contains", True),
(EQUAL_SENSITIVE_OPERATOR, "equal_sensitive", True),
],
)
def test_evaluate_string(mocker, op, method, expected):
clause = Clause(
attribute="email", id="", negate=False, op=op, values=["john@doe.com"]
)
m = mocker.patch.object(String, method, return_value=expected)
_string = String("john@doe.com")
got = clause.evaluate(None, None, _string)
assert got == expected
assert m.call_count == 1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch.nn as nn
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class DepthNet(nn.Module):
def __init__(self):
super(DepthNet, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.bn1_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.bn1_2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2 2 layers
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.bn2_1 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.bn2_2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4 2 layers
num_stages = 3
blocks = BasicBlock
num_blocks = [4, 4, 4]
num_channels = [32, 32, 128]
self.stage = self._make_stages(num_stages, blocks, num_blocks, num_channels)
self.transition1 = nn.Sequential(
nn.Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),
nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=True)
)
self.transition2 = nn.Sequential(
nn.Conv2d(32, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=True)
)
def _make_one_stage(self, stage_index, block, num_blocks, num_channels):
layers = []
for i in range(0, num_blocks[stage_index]):
layers.append(
block(
num_channels[stage_index],
num_channels[stage_index]
)
)
return nn.Sequential(*layers)
def _make_stages(self, num_stages, block, num_blocks, num_channels):
branches = []
for i in range(num_stages):
branches.append(
self._make_one_stage(i, block, num_blocks, num_channels)
)
return nn.ModuleList(branches)
def forward(self, d):
#depth branch
d = self.relu1_1(self.bn1_1(self.conv1_1(d)))
d = self.relu1_2(self.bn1_2(self.conv1_2(d)))
d0 = self.pool1(d) # (128x128)*64
d = self.relu2_1(self.bn2_1(self.conv2_1(d0)))
d = self.relu2_2(self.bn2_2(self.conv2_2(d)))
d1 = self.pool2(d) # (64x64)*128
dt2 = self.transition1(d1)
d2 = self.stage[0](dt2)
d3 = self.stage[1](d2)
dt4 = self.transition2(d3)
d4 = self.stage[2](dt4)
return d0, d1, d2, d3, d4
def init_weights(self):
logger.info('=> Depth model init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
|
# terrascript/data/mrcrilly/awx.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:44 UTC)
import terrascript
class awx_credential(terrascript.Data):
pass
class awx_credential_azure_key_vault(terrascript.Data):
pass
class awx_credentials(terrascript.Data):
pass
__all__ = [
"awx_credential",
"awx_credential_azure_key_vault",
"awx_credentials",
]
|
# -*- coding: utf-8 -*-
# file: main.py
# author: JinTian
# time: 11/03/2017 9:53 AM
# Copyright 2017 JinTian. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import os, sys
import time
import tensorflow.compat.v1 as tf
from poems.model import RNNModel, opCollection
from poems.poems import process_poems, generate_batch, generate_add_mat
from progress.bar import Bar
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if len(sys.argv) > 1: model_name = sys.argv[1]
else: model_name = input("select which corpus to use: ")
tf.app.flags.DEFINE_integer('batch_size', 128, 'batch size.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'learning rate.')
# use relative path for portability.
tf.app.flags.DEFINE_string('model_dir', './model/%s' % model_name, 'model save path.')
tf.app.flags.DEFINE_string('file_path', './data/%s.txt' % model_name, 'file name of poems.')
tf.app.flags.DEFINE_string('log_path', "./log/train/%s" % model_name, 'file name of poems.')
tf.app.flags.DEFINE_string('model_prefix', model_name, 'model save prefix.')
tf.app.flags.DEFINE_integer('epochs', 30, 'train how many epochs.')
FLAGS = tf.app.flags.FLAGS
add_feature_dim = {
"sentense": {
"position": 9
},
"word": {
"vowel": 5,
"tune": 1
}
}
def easyTrain(session, endpoints, inputs, label, pos_data):
'''
随机梯度下降. 计算loss(记录于summary内部)
* endpoints: list of `opCollection`s
* param input, label, pos_data: tuple of tensors: (placeholder, data)
* return a summary.
'''
_, _, summary = session.run(
[
endpoints[0].train_op, # train up_model
endpoints[1].train_op, # train down_model
endpoints[1].summary # get summary
], feed_dict = dict([inputs, label, pos_data])
# feed_dict: {
# input_data: batch_input,
# output_data: batch_output,
# pos_data: pos_mat,
# }
)
return summary
def run_training():
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
# poems_vector: 三维ndarray, 语料矩阵, 每层为一行诗, 分上下句(2x?). 其中每个字用对应的序号表示
# word_to_int: pair of dict, 字到对应序号的映射
# vocabularies: pair of list, 单词表, 出现频率由高到低
poems_vector, word_to_int, vocabularies = process_poems(FLAGS.file_path)
_, _, substr_len = poems_vector.shape
# 语料矩阵按batch_size分为若干chunk.
# batches_inputs: 四维ndarray, 每块为一chunk, 其中每层为一个数据(2 * substr_len)
# batches_outputs: 四维ndarray, batches_inputs向左平移一位得到
batches_inputs, batches_outputs = generate_batch(FLAGS.batch_size, poems_vector, word_to_int)
graph = tf.Graph()
with graph.as_default():
# declare placeholders of shape of (batch_size, 2, substr_len)
input_data = tf.placeholder(tf.int32, [FLAGS.batch_size, 2, substr_len], name = "left_word")
output_targets = tf.placeholder(tf.int32, [FLAGS.batch_size, 2, substr_len], name = "right_word")
add_mat = tf.placeholder(tf.int32, [FLAGS.batch_size, 2, substr_len], name = "additional_feature")
# 取得模型
rnn = RNNModel(
model_name, num_layers=2, rnn_size=64, batch_size=64, vocabularies=vocabularies,
add_dim = add_feature_dim, substr_len=substr_len
)
# get 2 endpoints
endpoints = rnn.train(
input_data=input_data, add_data=add_mat, label_data=output_targets, learning_rate=FLAGS.learning_rate
)
# 只保存一个文件
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# session配置
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config, graph = graph) as sess:
# init
sess.run(init_op)
# log
summary_writer = tf.summary.FileWriter(FLAGS.log_path, graph=graph)
# start_epoch, 训练完的趟数
start_epoch = 0
# 建立checkpoint
checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
os.system('cls')
if checkpoint:
# 从检查点中恢复
saver.restore(sess, checkpoint)
print("## restore from checkpoint {0}".format(checkpoint))
start_epoch += int(checkpoint.split('-')[-1])
print('## start training...')
print("## run `tensorboard --logdir %s`, and view localhost:6006." % (os.path.abspath("./log/train/%s" % model_name)))
# n_chunk, chunk大小
n_chunk = len(poems_vector) // FLAGS.batch_size
tf.get_default_graph().finalize()
for epoch in range(start_epoch, FLAGS.epochs):
bar = Bar("epoch%d" % epoch, max=n_chunk)
for batch in range(n_chunk):
# train the both model
summary = easyTrain(
sess, endpoints,
inputs = (input_data, batches_inputs[batch]), label=(output_targets, batches_outputs[batch]),
pos_data = (add_mat, generate_add_mat(batches_inputs[batch], 'binary'))
)
# reduce IO
if batch % 16 == 0:
summary_writer.add_summary(summary, epoch * n_chunk + batch)
bar.next(16)
# save at the end of each epoch
saver.save(sess, os.path.join(FLAGS.model_dir, FLAGS.model_prefix), global_step=epoch)
bar.finish()
# save on exit
saver.save(sess, os.path.join(FLAGS.model_dir, FLAGS.model_prefix), global_step = epoch)
print('## Last epoch were saved, next time will start from epoch {}.'.format(epoch))
def main(argv=None):
timestamp = time.time()
run_training()
timestamp -= time.time()
print("%.1f minutes used." % (-timestamp / 60))
if __name__ == '__main__':
tf.app.run()
|
import logging
from unittest.mock import patch
from dotenv import load_dotenv
from util.logger import init_logger, get_logger
load_dotenv()
job_id = 'job_123'
@patch.dict('os.environ', {'JOB_ID': job_id})
def test_init_logger():
assert job_id not in logging.root.manager.loggerDict, 'job id is not initialized'
init_logger()
assert job_id in logging.root.manager.loggerDict, 'job id is initialized'
@patch.dict('os.environ', {'JOB_ID': job_id})
def test_get_logger():
logger = get_logger()
assert logger == logging.getLogger('job_123'), 'logger instance is retrieved'
|
import requests
import json
from flask import Flask,jsonify
import os
import logging
app = Flask(__name__)
logger = logging.getLogger('Zabbix_integrator')
logger.setLevel({"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR}.get(os.getenv("LOG_LEVEL", "INFO"))) # Default log level: INFO
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
logger.addHandler(stdout_handler)
subscription = os.environ.get("SUBSCRIPTION")
sesam_jwt = os.environ.get("SESAM_JWT")
zabbix_server = os.environ.get("ZABBIX_SERVER")
zabbix_ip = os.environ.get("ZABBIX_IP")
user = os.environ.get("USER")
password = os.environ.get("PASSWORD")
host_name = os.environ.get("HOST_NAME")
@app.route("/notifications", methods=["GET","POST"])
def main():
login_data = get_login_data(user, password)
token = get_token(login_data)
notifications = get_notifications(sesam_jwt)
host_data = create_host_data(token, host_name)
host_id = get_host_id(host_data)
# getting node status from /health API
node_value = get_node_health()
node_item_data = create_item_data(token, "node-health", "node-health", host_id)
create_item(node_item_data)
push_data("node-health", node_value)
for notification in notifications:
if notification['_deleted'] == True:
continue
try:
item_name = notification['pipe_id']
req = get_extended_notification(notification['pipe_id'])
except KeyError:
item_name = "node-health"
continue
value = find_value(notification['status'], item_name, req)
item_data = create_item_data(token, item_name, item_name, host_id)
print(item_data)
create_item(item_data)
push_data(item_name, value)
return jsonify(notifications)
def get_node_health():
req = requests.get(url="https://sesam.bouvet.no/api/health".format(subscription), headers={'Authorization': 'bearer {}'.format(sesam_jwt)}, verify=False).json()
if req["status"] == "ok":
return 1
else:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return 4
def find_value(status, item_name, notification = None):
if status == "ok":
return 1
if notification['name'] == "Read errors time":
return 2
if notification['name'] == "Write errors time":
return 2
if notification['name'] == "Pump failed":
return 3
def get_extended_notification(pipe_name):
req = requests.get(url="https://portal.sesam.io/api/subscriptions/{}/pipes/{}/notification-rules".format(subscription, pipe_name), timeout=180, headers={'Authorization': 'bearer {}'.format(sesam_jwt)}, verify=False)
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return req.json()[0]
def push_data(item_key, value):
os.system('zabbix_sender -z {} -s "{}" -k {} -o {}'.format(zabbix_ip, host_name, item_key, value))
def get_host_id(host_data):
req = requests.get(url= "http://" + zabbix_server + "/zabbix/api_jsonrpc.php", data=json.dumps(host_data),headers={'Content-Type':'application/json'})
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
try:
return json.loads(req.text)['result'][0]['hostid']
except Exception as e:
logger.error("Unexpected error {}".format(e))
raise AssertionError("Unexpected error {}".format(e))
def create_host_data(token, host_name):
return {
"jsonrpc": "2.0",
"method": "host.get",
"params": {
"filter": {
"host": [
host_name
]
}
},
"auth": token,
"id": 1
}
req = requests.get("http://zabbix-test.bouvet.no/zabbix/api_jsonrpc.php",data=json.dumps(host_data),headers={'Content-Type':'application/json'})
def create_item_data(token, item_name, item_key, host_id):
return {
"jsonrpc": "2.0",
"method": "item.create",
"params": {
"name": item_name,
"key_": item_key,
"key": item_key,
"hostid": host_id,
"type": 2,
"value_type": 3,
"interfaceid": "1",
"delay": "10s"
},
"auth": token,
"id": 1
}
def get_login_data(user, password):
return {
"id":1,
"jsonrpc":"2.0",
"method":"user.login",
"auth": None,
"params":{"user":user,"password":password}
}
def get_token(login_data):
req = requests.get(url= "http://" + zabbix_server + "/zabbix/api_jsonrpc.php", data=json.dumps(login_data),headers={'Content-Type':'application/json'})
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return json.loads(req.text)['result']
def get_notifications(sesam_jwt):
req = requests.get(url="https://portal.sesam.io/api/notifications-summary", timeout=180, headers={'Authorization': 'bearer {}'.format(sesam_jwt)}, verify=False)
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return req.json()
def create_item(item_data):
req = requests.post(url = "http://" + zabbix_server + "/zabbix/api_jsonrpc.php",data=json.dumps(item_data),headers={'Content-Type':'application/json'})
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
return None
if __name__ == '__main__':
#main()
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
|
import logging
import os, sys
import splunk
import splunk.entity
import splunk.Intersplunk
import json
logger = logging.getLogger(__name__)
splunkhome = os.environ['SPLUNK_HOME']
sys.path.append(os.path.join(splunkhome, 'etc', 'apps', 'trackme', 'lib'))
import rest_handler
import splunklib.client as client
class TrackMeHandlerLogicalGroups_v1(rest_handler.RESTHandler):
def __init__(self, command_line, command_arg):
super(TrackMeHandlerLogicalGroups_v1, self).__init__(command_line, command_arg, logger)
# Get the entire data sources collection as a Python array
def get_logical_groups_collection(self, request_info, **kwargs):
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
else:
# body is not required in this endpoint, if not submitted do not describe the usage
describe = False
if describe:
response = "{\"describe\": \"This endpoint retrieves the entire Logical Groups collection returned as a JSON array, it requires a GET call with no data required\"}"\
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Render
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Get group
def get_logical_groups_get_grp(self, request_info, **kwargs):
# By object_category and object
object_group_name = None
# query_string to find records
query_string = None
describe = False
# By object_category and object
key = None
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
object_group_name = resp_dict['object_group_name']
else:
# body is required in this endpoint, if not submitted describe the usage
describe = True
if describe:
response = "{\"describe\": \"This endpoint retrieve a specific logial group record, it requires a GET call with the following information:\""\
+ ", \"options\" : [ { "\
+ "\"object_group_name\": \"name of the logical group\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Define the KV query
query_string = '{ "object_group_name": "' + object_group_name + '" }'
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Get the current record
# Notes: the record is returned as an array, as we search for a specific record, we expect one record only
try:
record = collection.data.query(query=str(query_string))
key = record[0].get('_key')
except Exception as e:
key = None
# Render result
if key is not None and len(key)>2:
return {
"payload": json.dumps(collection.data.query_by_id(key), indent=1),
'status': 200 # HTTP status code
}
else:
return {
"payload": 'Warn: resource not found ' + str(key),
'status': 404 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Add a new group
def post_logical_groups_add_grp(self, request_info, **kwargs):
# By object_category and object
object_group_name = None
object_group_members = None
# object_group_min_green_percent is optional and set after data retrieve
# query_string to find records
query_string = None
describe = False
# By object_category and object
key = None
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
object_group_name = resp_dict['object_group_name']
object_group_members = resp_dict['object_group_members']
# object_group_members is expected as a comma separted list of values
# We accept comma with or without a space after the seperator, let's remove any space after the separator
object_group_members = object_group_members.replace(", ", ",")
# Split by the separator
object_group_members = object_group_members.split(",")
# group min percentage is optional and set to 50% if not provided
try:
object_group_min_green_percent = resp_dict['object_group_min_green_percent']
except Exception as e:
object_group_min_green_percent = "50"
else:
# body is required in this endpoint, if not submitted describe the usage
describe = True
if describe:
response = "{\"describe\": \"This endpoint creates a new logical group, it requires a POST call with the following data required:\""\
+ ", \"options\" : [ { "\
+ "\"object_group_name\": \"name of the logical group to be created\", "\
+ "\"object_group_members\": \"comma separated list of the group members\", "\
+ "\"object_group_min_green_percent\": \"OPTIONAL: minimal percentage of hosts that need to be green for the logical group to be green, if unset defaults to 50. Recommended options for this value: 12.5 / 33.33 / 50\", "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Retrieve from data
resp_dict = json.loads(str(request_info.raw_args['payload']))
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
# Define the KV query
query_string = '{ "object_group_name": "' + object_group_name + '" }'
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# update time for the object
import time
object_group_mtime = time.time()
# Get the current record
# Notes: the record is returned as an array, as we search for a specific record, we expect one record only
try:
record = collection.data.query(query=str(query_string))
key = record[0].get('_key')
except Exception as e:
key = None
record = json.dumps({"object_group_name": object_group_name,
"object_group_members": object_group_members,
"object_group_min_green_percent": str(object_group_min_green_percent),
"object_group_mtime": str(object_group_mtime)})
# Render result
if key is not None and len(key)>2:
# Update the record
collection.data.update(str(key), json.dumps({"object_group_name": object_group_name,
"object_group_members": object_group_members,
"object_group_min_green_percent": str(object_group_min_green_percent),
"object_group_mtime": str(object_group_mtime)}))
# Record an audit change
import time
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "Logical group add",
"object": str(object_group_name),
"object_category": "logical_group",
"object_attrs": str(json.dumps(collection.data.query_by_id(key), indent=1)),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": json.dumps(collection.data.query_by_id(key), indent=1),
'status': 200 # HTTP status code
}
else:
# Insert the record
collection.data.insert(json.dumps({"object_group_name": object_group_name,
"object_group_members": object_group_members,
"object_group_min_green_percent": str(object_group_min_green_percent),
"object_group_mtime": str(object_group_mtime)}))
# Record an audit change
import time
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "Logical group add",
"object": str(object_group_name),
"object_category": "logical_group",
"object_attrs": str(record),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": json.dumps(collection.data.query(query=str(query_string)), indent=1),
'status': 200 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Delete group
def delete_logical_groups_del_grp(self, request_info, **kwargs):
# By object_category and object
object_group_name = None
# query_string to find records
query_string = None
describe = False
# By object_category and object
key = None
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
object_group_name = resp_dict['object_group_name']
else:
# body is required in this endpoint, if not submitted describe the usage
describe = True
if describe:
response = "{\"describe\": \"This endpoint deletes a logical group, it requires a DELETE call with the following data required:\""\
+ ", \"options\" : [ { "\
+ "\"object_group_name\": \"name of the logical group to be removed\", "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
# Define the KV query
query_string = '{ "object_group_name": "' + object_group_name + '" }'
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# Get the current record
# Notes: the record is returned as an array, as we search for a specific record, we expect one record only
try:
record = collection.data.query(query=str(query_string))
key = record[0].get('_key')
except Exception as e:
key = None
# Render result
if key is not None and len(key)>2:
# Store the record for audit purposes
record = json.dumps(collection.data.query_by_id(key), indent=1)
# Remove the record
collection.data.delete(json.dumps({"_key":key}))
# Record an audit change
import time
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "Logical group delete",
"object": str(object_group_name),
"object_category": "logical_group",
"object_attrs": str(record),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": "Record with _key " + str(key) + " was deleted from the logical groups collection.",
'status': 200 # HTTP status code
}
else:
return {
"payload": 'Warn: resource not found ' + str(key),
'status': 404 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception2 encountered: ' + str(e) # Payload of the request.
}
# Associate a logical group with a member
def post_logical_groups_associate_group(self, request_info, **kwargs):
# By object
object_name = None
key = None
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
object_name = resp_dict['object']
key = resp_dict['key']
else:
# body is required in this endpoint, if not submitted describe the usage
describe = True
if describe:
response = "{\"describe\": \"This endpoint associates an object (data host or metric host) with an existing logical group (existing members"\
" of the logical groups are preserved and this object membership will be removed), "\
"it requires a POST call with the following data required:\""\
+ ", \"options\" : [ { "\
+ "\"object\": \"the name of the data host or the metric host\", "\
+ "\"key\": \"the KVstore unique key of the logical group\", "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Retrieve from data
resp_dict = json.loads(str(request_info.raw_args['payload']))
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# Get the record
query_string = '{ "_key": "' + key + '" }'
try:
record = collection.data.query(query=str(query_string))
key = record[0].get('_key')
except Exception as e:
record = None
# Render result
if record is not None:
# get object_group_name
try:
object_group_name = record[0].get('object_group_name')
except Exception as e:
object_group_name = None
# get object_group_min_green_percent
try:
object_group_min_green_percent = record[0].get('object_group_min_green_percent')
except Exception as e:
object_group_min_green_percent = None
# update time for the object
import time
object_group_mtime = time.time()
# object: it can be defined, or not, if defined it can be a single item or a list
object_list = None
try:
object_list = record[0].get('object_group_members')
except Exception as e:
object_list = None
if object_list is None:
object_list = [ object_name ]
# Analyse the content
else:
# if is not a list, make it a list
if type(object_list) is not list:
# Split by the separator to convert as a list
object_list = object_list.split(",")
# append
object_list.append(object_name)
# this is a list, append if not in the list
else:
if object_name not in object_list:
# finally append the new object
object_list.append(object_name)
# define the new record
new_record = json.dumps({
"object_group_name": str(object_group_name),
"object_group_members": object_list,
"object_group_min_green_percent": str(object_group_min_green_percent),
"object_group_mtime": str(object_group_mtime)})
# Update the record
try:
collection.data.update(str(key), new_record)
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Record an audit change
import time
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "associate logical group",
"object": str(object_name),
"object_category": "logical_groups",
"object_attrs": str(json.dumps(collection.data.query_by_id(key), indent=1)),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": json.dumps(collection.data.query_by_id(key), indent=1),
'status': 200 # HTTP status code
}
else:
return {
"payload": 'Warn: resource not found ' + str(key),
'status': 404 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Unassociate a member and a logical group
def post_logical_groups_unassociate(self, request_info, **kwargs):
# By object
object_name = None
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
object_name = resp_dict['object']
key = resp_dict['key']
else:
# body is required in this endpoint, if not submitted describe the usage
describe = True
if describe:
response = "{\"describe\": \"This endpoint unassociates an object (data host or metric host) from a logical group it is member of "\
"(existing associations of the logical groups are preserved), "\
"it requires a POST call with the following data required:\""\
+ ", \"options\" : [ { "\
+ "\"object\": \"the object name (data host or metric host) to remove association for\", "\
+ "\"key\": \"KVstore unique identifier of the logical group\", "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Retrieve from data
resp_dict = json.loads(str(request_info.raw_args['payload']))
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_logical_group"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# Get the record
query_string = '{ "_key": "' + key + '" }'
try:
record = collection.data.query(query=str(query_string))
key = record[0].get('_key')
except Exception as e:
record = None
# Render result
if record is not None and key is not None:
# get object_group_name
try:
object_group_name = record[0].get('object_group_name')
except Exception as e:
object_group_name = None
# get object_group_min_green_percent
try:
object_group_min_green_percent = record[0].get('object_group_min_green_percent')
except Exception as e:
object_group_min_green_percent = None
# update time for the object
import time
object_group_mtime = time.time()
# object: it can be defined, or not, if defined it can be a single item or a list
object_list = None
try:
object_list = record[0].get('object_group_members')
except Exception as e:
object_list = None
if object_list is not None and object_name in object_list:
# if is a list
if type(object_list) is not list:
# We accept comma with or without a space after the seperator, let's remove any space after the separator
object_list = object_list.replace(", ", ",")
# Split by the separator to convert as a list
object_list = object_list.split(",")
# finally append the new object
object_list.remove(object_name)
# define the new record
new_record = json.dumps({
"object_group_name": str(object_group_name),
"object_group_members": object_list,
"object_group_min_green_percent": str(object_group_min_green_percent),
"object_group_mtime": str(object_group_mtime)})
# Update the record
try:
collection.data.update(str(key), new_record)
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Record an audit change
import time
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "unassociate logical group",
"object": str(object_name),
"object_category": "logical_groups",
"object_attrs": str(json.dumps(collection.data.query_by_id(key), indent=1)),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# end of work, return
return {
"payload": json.loads('{ "response": "object ' + str(object_name) + ' has been unassociated from logical group record key: ' + str(key) + "\" }"),
'status': 200 # HTTP status code
}
# no association, nothing to do
else:
return {
"payload": json.loads('{ "response": "object ' + str(object_name) + ' has no active association with logical group record key: ' + str(key) + "\" }"),
'status': 200 # HTTP status code
}
else:
return {
"payload": 'Warn: resource not found ' + str(key),
'status': 404 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
|
from typing import TYPE_CHECKING, Optional, Union, Dict, Any
from dis_snek.client.const import MISSING, Absent
from dis_snek.client.utils.attr_utils import define, field
from dis_snek.client.utils.converters import optional as optional_c
from dis_snek.client.utils.converters import timestamp_converter
from dis_snek.models.discord.application import Application
from dis_snek.models.discord.enums import InviteTargetTypes
from dis_snek.models.discord.guild import GuildPreview
from dis_snek.models.discord.snowflake import to_snowflake
from dis_snek.models.discord.stage_instance import StageInstance
from dis_snek.models.discord.timestamp import Timestamp
from .base import ClientObject
if TYPE_CHECKING:
from dis_snek.client import Snake
from dis_snek.models import TYPE_GUILD_CHANNEL
from dis_snek.models.discord.user import User
from dis_snek.models.discord.snowflake import Snowflake_Type
__all__ = ["Invite"]
@define()
class Invite(ClientObject):
code: str = field(repr=True)
"""the invite code (unique ID)"""
# metadata
uses: int = field(default=0, repr=True)
"""the guild this invite is for"""
max_uses: int = field(default=0)
"""max number of times this invite can be used"""
max_age: int = field(default=0)
"""duration (in seconds) after which the invite expires"""
created_at: Timestamp = field(default=MISSING, converter=optional_c(timestamp_converter), repr=True)
"""when this invite was created"""
temporary: bool = field(default=False, repr=True)
"""whether this invite only grants temporary membership"""
# target data
target_type: Optional[Union[InviteTargetTypes, int]] = field(
default=None, converter=optional_c(InviteTargetTypes), repr=True
)
"""the type of target for this voice channel invite"""
approximate_presence_count: Optional[int] = field(default=MISSING)
"""approximate count of online members, returned from the `GET /invites/<code>` endpoint when `with_counts` is `True`"""
approximate_member_count: Optional[int] = field(default=MISSING)
"""approximate count of total members, returned from the `GET /invites/<code>` endpoint when `with_counts` is `True`"""
scheduled_event: Optional["Snowflake_Type"] = field(default=None, converter=optional_c(to_snowflake), repr=True)
"""guild scheduled event data, only included if `guild_scheduled_event_id` contains a valid guild scheduled event id"""
expires_at: Optional[Timestamp] = field(default=None, converter=optional_c(timestamp_converter), repr=True)
"""the expiration date of this invite, returned from the `GET /invites/<code>` endpoint when `with_expiration` is `True`"""
stage_instance: Optional[StageInstance] = field(default=None)
"""stage instance data if there is a public Stage instance in the Stage channel this invite is for (deprecated)"""
target_application: Optional[dict] = field(default=None)
"""the embedded application to open for this voice channel embedded application invite"""
guild_preview: Optional[GuildPreview] = field(default=MISSING)
"""the guild this invite is for"""
# internal for props
_channel_id: "Snowflake_Type" = field(converter=to_snowflake, repr=True)
_inviter_id: Optional["Snowflake_Type"] = field(default=None, converter=optional_c(to_snowflake), repr=True)
_target_user_id: Optional["Snowflake_Type"] = field(default=None, converter=optional_c(to_snowflake))
@property
def channel(self) -> "TYPE_GUILD_CHANNEL":
"""The channel the invite is for."""
return self._client.cache.get_channel(self._channel_id)
@property
def inviter(self) -> Optional["User"]:
"""The user that created the invite or None."""
return self._client.cache.get_user(self._inviter_id) if self._inviter_id else None
@property
def target_user(self) -> Optional["User"]:
"""The user whose stream to display for this voice channel stream invite or None."""
return self._client.cache.get_user(self._target_user_id) if self._target_user_id else None
@classmethod
def _process_dict(cls, data: Dict[str, Any], client: "Snake") -> Dict[str, Any]:
if "stage_instance" in data:
data["stage_instance"] = StageInstance.from_dict(data, client)
if "target_application" in data:
data["target_application"] = Application.from_dict(data, client)
if "target_event_id" in data:
data["scheduled_event"] = data["target_event_id"]
if channel := data.pop("channel", None):
# invite metadata does not contain enough info to create a channel object
data["channel_id"] = channel["id"]
if guild := data.pop("guild", None):
data["guild_preview"] = GuildPreview.from_dict(guild, client)
if inviter := data.pop("inviter", None):
inviter = client.cache.place_user_data(inviter)
data["inviter_id"] = inviter.id
return data
def __str__(self) -> str:
return self.link
@property
def link(self) -> str:
"""The invite link."""
if self.scheduled_event:
return f"https://discord.gg/{self.code}?event={self.scheduled_event}"
return f"https://discord.gg/{self.code}"
async def delete(self, reason: Absent[str] = MISSING) -> None:
"""
Delete this invite.
!!! Note
You must have the `manage_channels` permission on the channel this invite belongs to.
Note:
With `manage_guild` permission, you can delete any invite across the guild.
Args:
reason: The reason for the deletion of invite.
"""
await self._client.http.delete_invite(self.code, reason=reason)
|
from amadeus.client.decorator import Decorator
from amadeus.safety.safety_rated_locations._by_square import BySquare
class SafetyRatedLocations(Decorator, object):
def __init__(self, client):
Decorator.__init__(self, client)
self.by_square = BySquare(client)
def get(self, **params):
'''
Returns the overall safety ranking and a detailed safety
ranking of all the districts within the given radius.
.. code-block:: python
amadeus.safety.safety_rated_locations.get(
longitude=2.160873,
latitude=41.397158
)
:param latitude: latitude of the location to safety ranking search.
For example: ``41.397158``
:param longitude: longitude of the location to safety ranking search.
For example: ``2.160873``
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get(
'/v1/safety/safety-rated-locations', **params)
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
#########################################################################
from flask_wtf.csrf import CSRFProtect
from flask import request, current_app
class _PGCSRFProtect(CSRFProtect):
def __init__(self, *args, **kwargs):
super(_PGCSRFProtect, self).__init__(*args, **kwargs)
def init_app(self, app):
super(_PGCSRFProtect, self).init_app(app)
self._pg_csrf_exempt(app)
def _pg_csrf_exempt(self, app):
"""Exempt some of the Views/blueprints from CSRF protection
"""
exempt_views = [
'flask.helpers.send_static_file',
'flask_security.views.login',
'flask_security.views.logout',
'pgadmin.tools.translations',
app.blueprints['redirects'],
'pgadmin.browser.server_groups.servers.supported_servers-js',
'pgadmin.tools.datagrid.initialize_query_tool',
'pgadmin.tools.datagrid.panel',
'pgadmin.tools.debugger.initialize_target',
'pgadmin.tools.debugger.direct_new',
'pgadmin.tools.schema_diff.panel',
'pgadmin.tools.schema_diff.ddl_compare',
'pgadmin.authenticate.login'
]
for exempt in exempt_views:
self.exempt(exempt)
pgCSRFProtect = _PGCSRFProtect()
|
#---------------------------------------------------------------------------------------------------
#
#---------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
"""
Class: Parameter()
Parameter description used for modelling.
name - short name tag, need to keep unique to distinguish in dictionary
plot_name - axis label type of description
value - parameter value
std - parameter standard deviation
min - minimum value
max - maximum value
"""
class Parameter:
#-----------------------------------------------------------------------------------------------
# constructor
#-----------------------------------------------------------------------------------------------
def __init__(self,name,plot_name,value,std=0,min=0,max=0):
self.name = name
self.plot_name = plot_name
self.value = value
self.std = std
self.min = min
self.max = max
#-----------------------------------------------------------------------------------------------
# summary as a string
#-----------------------------------------------------------------------------------------------
def summary(self):
return " %s: %f +- %f [%f,%f] -- %s"%\
(self.name,self.value,self.std,self.min,self.max,self.plot_name)
#-----------------------------------------------------------------------------------------------
# range
#-----------------------------------------------------------------------------------------------
def range(self):
return self.min,self.max
#-----------------------------------------------------------------------------------------------
# set range
#-----------------------------------------------------------------------------------------------
def set_range(self,min,max):
self.min = min
self.max = max
return
|
arquivo = open('db.csv','r')
linhas = arquivo.readlines()
for linha in linhas:
palavras = linha.split(',')
arquivo.close()
|
from discord import Embed, Member
from discord.ext import commands
from discord.utils import get
from random import choice, randint
from requests import get as rget
class Random(commands.Cog, name='Random'):
"""
Utilisable par tout le monde et contient des "jeux" et de l'aléatoire.
"""
def __init__(self, bot):
self.bot = bot
@commands.command(brief='!pof [pile/face]', description='Faire un pile ou face contre le bot')
async def pof(self, ctx, arg):
if arg.lower() == 'pile' or arg.upper() == 'face':
piece = choice(['pile', 'face'])
if arg.lower() in piece:
await ctx.send(f':white_check_mark: {piece}! GG, tu as gagné.')
else:
await ctx.send(f':negative_squared_cross_mark: {piece}! Tu as perdu.')
else:
await ctx.send('❌ Tu dois entrer "pile" ou "face"!')
@commands.command(brief='!ping [random/pseudo]', description="Mentionner quelqu'un")
async def ping(self, ctx, arg: Member):
members = [x for x in ctx.guild.members if not x.bot]
if arg.lower() == 'random':
await ctx.send(f'Hey {choice(members).mention} !')
else:
await ctx.send(f'Hey {arg.mention} !')
@commands.command(aliases=['r'], brief='!roll [x]', description="Lancer un dé de [x] faces")
async def roll(self, ctx, faces: int):
number = randint(1, faces)
await ctx.send(f'🎲 Tu as obtenu un {number} !')
@commands.command(brief='!meme', description='Regarder un meme aléatoire')
async def meme(self, ctx):
data = rget('https://meme-api.herokuapp.com/gimme').json()
embed = (Embed(title=f":speech_balloon: r/{data['subreddit']} :", color=0x3498db)
.set_image(url=data['url'])
.set_footer(text=data['postLink']))
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Random(bot))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import time
import ctypes
import numpy as np
import routines as myr
def fortranize(m):
return np.array(m,order='F')
def ccf(m,n_p):
(npixr,mean,var) = myr.make_standard(m,0)
m = fortranize(m)
(cor,vcor) = myr.cross_cf(m,m,n_p,0)
return cor
#def ppcf(m,th,nfmax,rmax,n_random=None):
# lg = m.shape[0]
# (npixr,mean,var) = myr.make_standard(m,0)
# (nf1,fl1) = myr.peak_f(m,th,100)
# if nf1>nfmax:
# rlist = np.random.randint(0,nf1,size=nfmax)
# nf1 = nfmax
# fl1 = fl1[:,rlist]
# if n_random is None:
# n_random = 5*nf1
# fl1 = fortranize(fl1[:,:nf1])
# (ksi,vksi) = myr.ffcf(1,lg,fl1,fl1,n_random,1,rmax)
# return ksi
def cccf(m,th,nfmax,rmax):
lg = m.shape[0]
(npixr,mean,var) = myr.make_standard(m,0)
(nf1,nf2,fl1,fl2) = myr.cross_f(m,th,th+0.01)
if nf1>nfmax:
rlist = np.random.randint(0,nf1,size=nfmax)
nf1 = nfmax
fl1 = fl1[:,rlist]
if nf2>nfmax:
rlist = np.random.randint(0,nf2,size=nfmax)
nf2 = nfmax
fl2 = fl2[:,rlist]
fl1 = fortranize(fl1[:,:nf1])
fl2 = fortranize(fl2[:,:nf2])
(ksi1,vksi) = myr.ffcf(1,lg,fl1,fl1,5*nf1,1,rmax)
(ksi2,vksi) = myr.ffcf(1,lg,fl2,fl2,5*nf2,1,rmax)
ksi = (ksi1+ksi2)/2
return ksi
def pccf(m,th,nfmax,rmax):
lg = m.shape[0]
(npixr,mean,var) = myr.make_standard(m,0)
(nf1,fl1) = myr.peak_f(m,th,100)
(nf2,nf3,fl2,fl3) = myr.cross_f(m,th,th+0.01)
if nf1>nfmax:
rlist = np.random.randint(0,nf1,size=nfmax)
nf1 = nfmax
fl1 = fl1[:,rlist]
if nf2>nfmax:
rlist = np.random.randint(0,nf2,size=nfmax)
nf2 = nfmax
fl2 = fl2[:,rlist]
if nf3>nfmax:
rlist = np.random.randint(0,nf3,size=nfmax)
nf3 = nfmax
fl3 = fl3[:,rlist]
fl1 = fortranize(fl1[:,:nf1])
fl2 = fortranize(fl2[:,:nf2])
fl3 = fortranize(fl3[:,:nf3])
(ksi1,vksi) = myr.ffcf(1,lg,fl1,fl2,5*nf1,1,rmax)
(ksi2,vksi) = myr.ffcf(1,lg,fl1,fl3,5*nf2,1,rmax)
ksi = (ksi1+ksi2)/2
return ksi
def der(data):
lx = data.shape[0]
ly = data.shape[1]
datap = np.zeros((lx+1,ly+1))
datap[:lx,:ly] = data
datap[:lx,ly:] = data[:,:1]
datap[lx:,:ly] = data[:1,:]
datap[lx,ly] = data[0,0]
dx = np.diff(datap,axis=0)
dy = np.diff(datap,axis=1)
strg = np.sqrt(dx[:,:ly]**2+dy[:lx,:]**2)
mstrg = np.mean(strg)
strg = np.where(strg>mstrg,strg,0)
# strg = np.where(strg<2*mstrg,1/2,0)
ornt = np.arctan2(dx[:,:ly],dy[:lx,:])
return [strg,ornt]
def canny(d,R,meth,edd):
if (R!=0):
dt = np.fft.fft2(d)
if meth=='g':
for i in range(sz):
for j in range(sz):
k2 = 1.*(i*i+j*j)/d.shape[0]
dt[i,j]=dt[i,j]*np.exp(-k2*R*R/2)
if meth=='tp':
for i in range(sz):
for j in range(sz):
k = np.sqrt(0.001+i*i+j*j)/sz
dt[i,j]=dt[i,j]* 3*(np.sin(k*R)-k*R*np.cos(k*R))/(k*R)**3
d = np.fft.ifft2(dt)
d = abs(d)
if edd=='lap':
d = cv2.Laplacian(d,cv2.CV_64F)
if edd=='sob':
sobelx = cv2.Sobel(d,cv2.CV_64F,1,0,ksize=3)
sobely = cv2.Sobel(d,cv2.CV_64F,0,1,ksize=3)
d =np.sqrt(sobelx**2+sobely**2)
if edd=='sch':
scharrx = cv2.Scharr(d,cv2.CV_64F,1,0)
scharry = cv2.Scharr(d,cv2.CV_64F,0,1)
d =np.sqrt(scharrx**2+scharry**2)
# do = (d-np.min(d))/(np.max(d)-np.min(d))
# dt = np.where((do < tl) | (do > tu), 0, 1)
# if False:
# for i in range(1,dt.shape[0]-1):
# for j in range(1,dt.shape[1]-1):
# if dt[i,j]==1:
# if (dt[i+1,j]==0) and dt[i,j+1]==0 and dt[i,j-1]==0 and dt[i-1,j]==0:# and (dt[i+1,j+1]==0) and (dt[i-1,j+1]==0) and (dt[i+1,j-1]==0) and (dt[i-1,j-1]==0):
# dt[i,j]==1
return d
#def badcurvelet(name,c1,c2):
# np.savetxt('curvelet_input',name)
# crv_path='/home/gf/work/pakages/curvelet'
# crn_path=os.getcwd()
# os.system("cat <<EOF | matlab -nodesktop -nosplash -nodisplay\n"+"mn="+str(c1)+";\n"+"mx="+str(c2)+";\n"+"CC = dlmread('curvelet_input');\n"+"cd "+crv_path+";\n"+"C = fdct_wrapping(CC);\n"+"CC=C;\n"+"for m=mn:mx\n"+" C=CC;\n"+"%Making other components zero\n"+" for s=1:length(C)\n"+" for w=1:length(C{s})\n"+" C{s}{w}=C{s}{w}.*(s==m);\n"+" end\n"+" end\n"+" y=ifdct_wrapping(C);\n"+" out =['c' int2str(m) '_' 'curvelet_input'];\n"+" y=real(y);\n"+" cd "+crn_path+";\n"+" dlmwrite(out,y,' ');\n"+" cd "+crv_path+";\n"+"end % first\n"+"exit\n"+"EOF\n")
# print '\n'
# res = {i:None for i in range(c1,c2)}
# for i in range(c1,c2+1):
# res[i]=np.loadtxt('c'+str(i)+'_curvelet_input')
# os.remove('c'+str(i)+'_curvelet_input')
# return res
def imshow(ax,strg,tlt,rotation=False):
im = ax.imshow(strg, cmap='spectral')
# Titling
ax.set_title(tlt, y=1.04)
if rotation:
ax.set_title(ttl, rotation='vertical',x=-0.1,y=0.5)
ax.axis('off')
# ax1.set_xticks([])
# ax1.set_yticks([])
def plot(ax,x,y,tlt,
clrs=['b','r','lime','c','indigo','gold','plum','k'],
xlab=[False,''],ylab=[False,''],
logx=False,logy=False,xlim=[False,0,1],
ylim=[False,0,1]):
# x, y and tlt must be inputed as list of plots
for i in range(len(y)):
ax.plot(x[i],y[i],clrs[i],label=tlt[i])
if xlab[0]:
ax.set_xlabel(xlab[1],fontsize=25)
if ylab[0]:
ax.set_ylabel(ylab[1],fontsize=25)
if logx:
ax.set_xscale("log", nonposx='clip')
if logy:
ax.set_yscale("log", nonposx='clip')
if xlim[0]:
ax.set_xlim(xlim[1],xlim[2])
if ylim[0]:
ax.set_ylim(ylim[1],ylim[2])
ax.tick_params(axis='both', which='major', labelsize=18)
def histax(ax,m,n_c,tlt=[False,''],fc='b',alpha=0.75):
n, bins, patches = ax.hist(m.flatten('F'), n_c, facecolor=fc,alpha=alpha)
# bin_centers = 0.05 * np.diff(bins) + bins[:-1]
# for count, x in zip(n, bin_centers):
# Label the raw counts
# ax1.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'),
# xytext=(0, -18), textcoords='offset points', va='top', ha='center')
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
tks=[round(x,1) for x in np.linspace(np.min(m),np.max(m),5)]
ax.set_xticks(tks)
ax.tick_params(axis='both', which='major', labelsize=18)
if tlt[0]:
ax.set_title(tlt[1], fontsize=30,x=0.5,y=1.1)
def pdf_ax(ax,m,n_c,color='b',tlt=[False,'']):
n, bins = np.histogram(m.flatten('F'), n_c)
dx = (bins[1]-bins[0])/2
bins = bins[:-1]+dx
nt = 0.0001*np.size(m)
bins = bins[n>nt]
n = n[n>nt]
ax.plot(bins,n,color=color)
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
tks=[round(x,1) for x in np.linspace(bins[0],bins[-1],5)]
ax.set_xticks(tks)
ax.tick_params(axis='both', which='major', labelsize=18)
if tlt[0]:
ax.set_title(tlt[1], fontsize=30,x=0.5,y=1.1)
def pdf(m,n_c):
m = np.array(m)
n, bins = np.histogram(m.flatten('F'), n_c)
dx = (bins[1]-bins[0])/2
bins = bins[:-1]+dx
# nt = np.size(m)
# bins = bins[n>nt]
# n = n[n>nt]
return [bins,n]
def curvelet(m,n_scales,r_scale,n_wedges,ac):
dir_path = os.path.dirname(os.path.realpath(__file__))
curlib = ctypes.cdll.LoadLibrary(dir_path+"/curvelet.so")
m = np.array(m, dtype=np.double)
nx = m.shape[0]
ny = m.shape[1]
aptr = m.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_double)))
curlib.curvelet(aptr,nx,ny,n_scales,r_scale-1,n_wedges,ac)
return m
#def d_pdf(ax,m1,m2,n_c,tlt=[False,'']):
# n1, bins1 = np.histogram(m1.flatten('F'), n_c)
# dx1 = (bins1[1]-bins1[0])/2
# n2, bins2 = np.histogram(m2.flatten('F'), n_c)
# dx2 = (bins2[1]-bins2[0])/2
# ax.plot(bins1[:-1]+dx1,n1)
# ax.plot(bins2[:-1]+dx2,n2)
# ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
# ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# tks=[round(x,1) for x in np.linspace(np.min(m),np.max(m),5)]
# ax.set_xticks(tks)
# ax.tick_params(axis='both', which='major', labelsize=18)
# if tlt[0]:
# ax.set_title(tlt[1], fontsize=30,x=0.5,y=1.1)
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def stat_describe(m,m_max):
mean = np.mean(m)
std = np.std(m)
out = [mean,std]
m = m - mean
for n in range(3,m_max+1):
m_n = np.mean(m**n)
if m_n >= 0:
m_n = m_n**(1./n)
elif m_n < 0:
m_n = -(abs(m_n)**(1./n))
m_n = m_n/std
out.append(m_n)
return np.array(out)
|
# -*- coding: utf-8 -*-
"""The task-based multi-process processing engine."""
import os
import shutil
import tempfile
import redis
from plaso.lib import definitions
from plaso.multi_process import engine
from plaso.storage import factory as storage_factory
from plaso.storage import merge_reader
from plaso.storage.redis import redis_store
class TaskMultiProcessEngine(engine.MultiProcessEngine):
"""Task-based multi-process engine base.
This class contains functionality to:
* manage task storage used to store task results.
"""
# pylint: disable=abstract-method
def __init__(self):
"""Initializes a task-based multi-process engine."""
super(TaskMultiProcessEngine, self).__init__()
self._merge_task_storage_path = None
self._processing_configuration = None
self._processed_task_storage_path = None
self._redis_client = None
self._storage_file_path = None
self._task_storage_path = None
# TODO: remove, currently only used by psort.
def _CheckTaskReadyForMerge(self, task_storage_format, task):
"""Checks if a task is ready for merging with this session storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
bool: True if the task is ready to be merged.
Raises:
IOError: if the size of the SQLite task storage file cannot be determined.
OSError: if the size of the SQLite task storage file cannot be determined.
"""
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
stat_info = os.stat(processed_storage_file_path)
except (IOError, OSError):
return False
task.storage_file_size = stat_info.st_size
return True
return False
def _GetMergeTaskStorageRedisHashName(self, task):
"""Retrieves the Redis hash name of a task store that should be merged.
Args:
task (Task): task the storage changes are part of.
Returns:
str: Redis hash name of a task store.
"""
return '{0:s}-merge'.format(task.session_identifier)
def _GetMergeTaskStorageFilePath(self, task_storage_format, task):
"""Retrieves the path of a task storage file in the merge directory.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
str: path of a task storage file file in the merge directory or None if
not set.
"""
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._merge_task_storage_path, filename)
return None
def _GetProcessedRedisHashName(self, session_identifier):
"""Retrieves the Redis hash name of a processed task store.
Args:
session_identifier (str): the identifier of the session the tasks are
part of.
Returns:
str: Redis hash name of a task store.
"""
return '{0:s}-processed'.format(session_identifier)
def _GetProcessedStorageFilePath(self, task):
"""Retrieves the path of a task storage file in the processed directory.
Args:
task (Task): task the storage changes are part of.
Returns:
str: path of a task storage file in the processed directory.
"""
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._processed_task_storage_path, filename)
def _GetProcessedTaskIdentifiers(
self, task_storage_format, session_identifier):
"""Identifiers for tasks which have been processed.
Args:
task_storage_format (str): storage format used to store task results.
session_identifier (str): the identifier of the session the tasks are
part of.
Returns:
list[str]: task identifiers that are processed.
Raises:
IOError: if the temporary path for the task storage does not exist.
OSError: if the temporary path for the task storage does not exist.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_name = self._GetProcessedRedisHashName(session_identifier)
try:
task_identifiers = self._redis_client.hkeys(redis_hash_name)
task_identifiers = [
identifier.decode('utf-8') for identifier in task_identifiers]
except redis.exceptions.TimeoutError:
# If there is a timeout fetching identifiers, we assume that there are
# no processed tasks.
task_identifiers = []
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if not self._processed_task_storage_path:
raise IOError('Missing processed task storage path.')
task_identifiers = [
path.replace('.plaso', '')
for path in os.listdir(self._processed_task_storage_path)]
return task_identifiers
def _PrepareMergeTaskStorage(self, task_storage_format, task):
"""Prepares a task storage for merging.
Moves the task storage file from the processed directory to the merge
directory.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if the SQLite task storage file cannot be renamed.
OSError: if the SQLite task storage file cannot be renamed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
# TODO: use number of attribute containers instead of file size?
task.storage_file_size = 1000
redis_hash_name = self._GetProcessedRedisHashName(task.session_identifier)
number_of_results = self._redis_client.hdel(
redis_hash_name, task.identifier)
if number_of_results == 0:
raise IOError('Task identifier {0:s} was not processed'.format(
task.identifier))
redis_hash_name = self._GetMergeTaskStorageRedisHashName(task)
# TODO: set timestamp as value.
self._redis_client.hset(
redis_hash_name, key=task.identifier, value=b'true')
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
task.storage_file_size = os.path.getsize(processed_storage_file_path)
try:
os.rename(processed_storage_file_path, merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to rename task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception))
def _RemoveMergeTaskStorage(self, task_storage_format, task):
"""Removes a merge task storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if a SQLite task storage file cannot be removed.
OSError: if a SQLite task storage file cannot be removed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-{1:s}-*'.format(
task.session_identifier, task.identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
try:
os.remove(merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to remove merge task storage file: {0:s} with error: '
'{1!s}').format(merge_storage_file_path, exception))
def _RemoveProcessedTaskStorage(self, task_storage_format, task):
"""Removes a processed task storage.
Args:
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Raises:
IOError: if a SQLite task storage file cannot be removed.
OSError: if a SQLite task storage file cannot be removed.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-{1:s}-*'.format(
task.session_identifier, task.identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
os.remove(processed_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to remove processed task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception))
def _StartMergeTaskStorage(self, storage_writer, task_storage_format, task):
"""Starts a merge of a task store with the session storage.
Args:
storage_writer (StorageWriter): storage writer for a session storage.
task_storage_format (str): storage format used to store task results.
task (Task): task the storage changes are part of.
Returns:
StorageMergeReader: storage merge reader of the task storage.
Raises:
IOError: if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file.
OSError: if the temporary path for the task storage does not exist or
if the temporary path for the task storage doe not refers to a file.
"""
merge_storage_file_path = self._GetMergeTaskStorageFilePath(
task_storage_format, task)
if task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if not self._merge_task_storage_path:
raise IOError('Missing merge task storage path.')
if not os.path.isfile(merge_storage_file_path):
raise IOError('Merge task storage path is not a file.')
path = self._GetMergeTaskStorageFilePath(task_storage_format, task)
task_storage_reader = (
storage_factory.StorageFactory.CreateTaskStorageReader(
task_storage_format, task, path))
task_storage_reader.SetStorageProfiler(self._storage_profiler)
return merge_reader.StorageMergeReader(
storage_writer, task_storage_reader, task.identifier)
def _StartTaskStorage(self, task_storage_format):
"""Starts the task storage.
Args:
task_storage_format (str): storage format used to store task results.
Raises:
IOError: if the temporary path for the SQLite task storage already exists.
OSError: if the temporary path for the SQLite task storage already exists.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
url = redis_store.RedisStore.DEFAULT_REDIS_URL
self._redis_client = redis.from_url(url=url, socket_timeout=60)
self._redis_client.client_setname('task_engine')
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if self._task_storage_path:
raise IOError('SQLite task storage path already exists.')
output_directory = os.path.dirname(self._storage_file_path)
self._task_storage_path = tempfile.mkdtemp(dir=output_directory)
self._merge_task_storage_path = os.path.join(
self._task_storage_path, 'merge')
os.mkdir(self._merge_task_storage_path)
self._processed_task_storage_path = os.path.join(
self._task_storage_path, 'processed')
os.mkdir(self._processed_task_storage_path)
self._processing_configuration.task_storage_path = self._task_storage_path
def _StopTaskStorage(
self, task_storage_format, session_identifier, abort=False):
"""Stops the task storage.
The results of tasks will be lost on abort.
Args:
task_storage_format (str): storage format used to store task results.
session_identifier (str): the identifier of the session the tasks are
part of.
abort (Optional[bool]): True to indicate the stop is issued on abort.
"""
if task_storage_format == definitions.STORAGE_FORMAT_REDIS:
redis_hash_pattern = '{0:s}-*'.format(session_identifier)
for redis_hash_name in self._redis_client.keys(redis_hash_pattern):
self._redis_client.delete(redis_hash_name)
self._redis_client = None
elif task_storage_format == definitions.STORAGE_FORMAT_SQLITE:
if os.path.isdir(self._merge_task_storage_path):
if abort:
shutil.rmtree(self._merge_task_storage_path)
else:
os.rmdir(self._merge_task_storage_path)
if os.path.isdir(self._processed_task_storage_path):
if abort:
shutil.rmtree(self._processed_task_storage_path)
else:
os.rmdir(self._processed_task_storage_path)
if os.path.isdir(self._task_storage_path):
if abort:
shutil.rmtree(self._task_storage_path)
else:
os.rmdir(self._task_storage_path)
self._merge_task_storage_path = None
self._processed_task_storage_path = None
self._task_storage_path = None
self._processing_configuration.task_storage_path = None
|
from west.commands import WestCommand
from west import log
from west import util
from pathlib import Path
import os
import subprocess
class Yocto(WestCommand):
def __init__(self):
super().__init__(
"yocto",
"",
None)
self.top_dir = util.west_topdir()
self.project_dir = Path(os.path.dirname(os.path.realpath(__file__))).parent.parent
def do_add_parser(self, parser_adder):
return parser_adder.add_parser(self.name,
add_help=True,
description=self.description)
def do_run(self, args, unknown_args):
env = os.environ.copy()
del env['ZEPHYR_BASE']
args = [
os.path.join(self.project_dir, 'scripts/yoctoshell.sh'),
]
p = subprocess.Popen(args, cwd=self.top_dir, env=env)
p.communicate()
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, args)
|
import sys
from copy import deepcopy as copy
from hqca.operators import *
def InverseJordanWignerTransform(op):
'''
transforms a Pauli string into a Fermionic Operator
'''
Nq = len(op.s)
pauli = ['I'*Nq]
new = Operator()
new+= FermiString(
coeff=op.c,s='i'*Nq)
# define paulis ops
for qi,o in enumerate(op.s[::-1]):
# reversed is because of the order in which we apply cre/ann ops
q = Nq-qi-1
if o=='I':
continue
if o in ['X','Y']:
# # #
s1 = 'i'*q +'+'+(Nq-q-1)*'i'
s2 = 'i'*q +'-'+(Nq-q-1)*'i'
if o=='X':
c1,c2 = 1,1
elif o=='Y':
c1,c2 = 1j,-1j
tem1 = Operator()
tem1+= FermiString(s=s1,coeff=c1)
tem1+= FermiString(s=s2,coeff=c2)
for qj in range(q):
r = Nq-qj-1-1
t1 = 'i'*r +'h'+(Nq-r-1)*'i'
t2 = 'i'*r +'p'+(Nq-r-1)*'i'
d1,d2 = 1,-1
tem2 = Operator()
tem2+= FermiString(s=t1,coeff=d1)
tem2+= FermiString(s=t2,coeff=d2)
tem1 = tem2*tem1
elif o in ['Z']:
s1 = 'i'*q+'h'+(Nq-q-1)*'i'
s2 = 'i'*q+'p'+(Nq-q-1)*'i'
c1,c2 = 1,-1
tem1 = Operator()
tem1+= FermiString(s=s1,coeff=c1)
tem1+= FermiString(s=s2,coeff=c2)
new = tem1*new
return new
def InverseJordanWigner(operator,
**kw
):
if isinstance(operator,type(QuantumString())):
return InverseJordanWignerTransform(operator)
else:
new = Operator()
for op in operator:
new+= InverseJordanWignerTransform(op)
return new
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
import time
import numpy as np
from google.protobuf import text_format
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
import os
file_ext = os.path.splitext(model_file)[1]
with open(model_file, "rb") as f:
if file_ext == '.pbtxt':
text_format.Merge(f.read(), graph_def)
else:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
tf.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False)
return graph
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_graph", default=None,
help="graph/model to be executed")
parser.add_argument("--input_height", default=None,
type=int, help="input height")
parser.add_argument("--input_width", default=None,
type=int, help="input width")
parser.add_argument("--batch_size", default=32,
type=int, help="batch size")
parser.add_argument("--input_layer", default="input",
help="name of input layer")
parser.add_argument("--output_layer", default="resnet_v1_101/SpatialSqueeze",
help="name of output layer")
args = parser.parse_args()
if args.input_graph:
model_file = args.input_graph
else:
sys.exit("Please provide a graph file.")
if args.input_height:
input_height = args.input_height
else:
input_height = 224
if args.input_width:
input_width = args.input_width
else:
input_width = 224
batch_size = args.batch_size
input_layer = args.input_layer
output_layer = args.output_layer
graph = load_graph(model_file)
input_tensor = graph.get_tensor_by_name(input_layer + ":0")
output_tensor = graph.get_tensor_by_name(output_layer + ":0")
|
# (C) unresolved-external@singu-lair.com released under the MIT license (see LICENSE)
import datetime
import sys
import time
from multiprocessing import Queue # hack
import common
class cmd_log:
def log(self, message, info = False):
print(message, file = sys.stdout if info else sys.stderr)
def log_ln(self, message, info = False):
self.log(message.rstrip('\r\n'), info)
def log_ts(self, message, info = False):
self.log_ln('{}: {}'.format(datetime.datetime.fromtimestamp(int(time.time())), message), info)
class cmd_app:
def __init__(self):
self.log = cmd_log()
def run(self):
env = common.load_env(self.log)
if env['game_dir'] is None:
return
ca_bundle_path = common.locate_ca_bundle(env['script_dir'])
if ca_bundle_path is None:
common.get_node({'src': 'https://curl.haxx.se/ca/cacert.pem', 'dest': 'cacert.pem'}, False, False, env['script_dir'], self.log)
self.log.log_ts('CA Bundle updated', info = True)
addons = common.list_addons(env, self.log)
update_context = {'launch': True, 'error': False}
common.update_addons(env, addons, self.log, False, update_context)
if update_context['error']:
pass
elif update_context['launch']:
common.launch(env['game_dir'], env['game_args'])
a = cmd_app()
a.run()
|
from numpy.core.fromnumeric import size
from text_models import Vocabulary
from collections import Counter
import numpy as np
# Collocations
date = dict(year=2022, month=1, day=10)
voc = Vocabulary(date, lang='En')
bigrams = Counter({k: v for k, v in voc.voc.items() if k.count("~")})
co_occurrence = np.zeros((5, 5))
index = {'the': 0, 'to': 1, 'of': 2, 'in': 3, 'and': 4}
for bigram, cnt in bigrams.most_common():
a, b = bigram.split('~')
if a in index and b in index:
co_occurrence[index[a], index[b]] = cnt
co_occurrence[index[b], index[a]] = cnt
keys = list(index.items())
keys.sort(key=lambda x: x[1])
print(' | ' + ' | '.join([k for k, _ in keys]) + ' | ')
for c, (k, _) in zip(co_occurrence, keys):
_ = " | ".join(map(lambda x: '{: 7d}'.format(int(x)), c))
print('{} | {} |'.format(k, _))
for bigram, cnt in bigrams.items():
a, b = bigram.split('~')
for x in [a, b]:
if x not in index:
index[x] = len(index)
len(index)
# Bernoulli Distribution
x = np.random.binomial(1, 0.3, size=1000)
hp = x.mean()
# Categorical distribution
X = np.random.multinomial(1, [1/6] * 6, size=100)
x = X.argmax(axis=1)
var, counts = np.unique(x, return_counts=True)
N = counts.sum()
p = counts / N
co_occurrence = np.zeros((len(index), len(index)))
for bigram, cnt in bigrams.most_common():
a, b = bigram.split('~')
if a in index and b in index:
co_occurrence[index[a], index[b]] = cnt
co_occurrence[index[b], index[a]] = cnt
co_occurrence = co_occurrence / co_occurrence.sum()
keys = list(index.items())
keys.sort(key=lambda x: x[1])
print(' | ' + ' | '.join([k for k, _ in keys[:5]]) + ' | ')
for c, (k, _) in zip(co_occurrence, keys[:5]):
_ = " | ".join(map(lambda x: '{: 0.5f}'.format(x), c[:5]))
print('{} | {} |'.format(k, _))
# Independent Random Variables
d = 6
R = np.random.multinomial(1, [1/d] * d, size=10000).argmax(axis=1)
C = np.random.multinomial(1, [1/d] * d, size=10000).argmax(axis=1)
Z = [[r, c] for r, c in zip(R, C)]
Z = [[r, c] for r, c in zip(R, C) if r != c]
Z = [[2 if c == 1 and np.random.rand() < 0.1 else r, c] for r, c in zip(R, C)]
W = np.zeros((d, d))
for r, c in Z:
W[r, c] += 1
W = W / W.sum()
for w in (W):
_ = " & ".join(map(lambda x: "{:0.4f}".format(x), w))
print(r"{} \\".format(_))
R_m = W.sum(axis=1)
C_m = W.sum(axis=0)
ind = np.dot(np.atleast_2d(R_m).T, np.atleast_2d(C_m))
for w in (W-ind):
_ = " & ".join(map(lambda x: "{:0.4f}".format(x), w))
print(r"{} \\".format(_))
|
"""
ptime.format
~~~~~~~~~~~~
:copyright: (c) 2013 by Marat Ibadinov.
:license: MIT, see LICENSE for more details.
"""
import re
class FormatError(Exception):
pass
class Format(object):
TEMPLATES = {
# day #
'd': (r'\d{2}', 'day'),
'D': (r'[a-z]{3}', 'weekday'),
'j': (r'(?:[1-9])|(?:[1-3][0-9])', 'day'),
'l': (r'[a-zа-я]+', 'weekday'),
'N': (r'[1-7]', 'weekday'),
'w': (r'[0-6]', 'weekday'),
'z': (r'\d{3}', 'yearday'),
# week #
# 'W': (r'\d{1,2}', None),
# month #
'F': (r'[a-zа-я]+', 'month_name'),
'm': (r'\d{2}', 'month'),
'M': (r'[a-zа-я]{3}', 'month_abbr'),
'n': (r'(?:[1-9])|(?:1[0-2])', 'month'),
# year #
# 'o': (r'\d{4}', 'year'), # should correlate with W
'Y': (r'\d{4}', 'year'),
'y': (r'\d{2}', 'year'),
'C': (r'\d{2}', 'century'),
# time #
'a': (r'(?:am)|(?:pm)', 'ampm'),
'A': (r'(?:am)|(?:pm)', 'ampm'),
'g': (r'\d{1,2}', 'hour'),
'G': (r'\d{1,2}', 'hour'),
'h': (r'\d{2}', 'hour'),
'H': (r'\d{2}', 'hour'),
'i': (r'\d{2}', 'minute'),
's': (r'\d{2}', 'second'),
'u': (r'\d{6}', 'microsecond'),
# timezones #
'e': (r'[a-z\/]+', 'timezone'),
'O': (r'[+-]\d{4}', 'offset_hours'),
'P': (r'[+-]\d{2}:\d{2}', 'offset_hours'),
'R': (r'[+-]\d{2}:?\d{2}', 'offset_hours'),
'T': (r'[a-z]+', 'timezone'),
'Z': (r'(?:+?)\d+', 'offset_seconds'),
# relative #
'L': (r'(?:[a-zа-яіїєґ]+\s?)+', 'relative_day'),
'K': (r'\d+\s+(?:[a-zа-я]+\s?)+', 'days_ago')
}
def __init__(self, template):
self.template = template
regexp, attributes = self.parse_template(template)
self.regexp = re.compile(r'^%s$' % regexp, re.IGNORECASE | re.UNICODE)
self.attributes = attributes
def parse_template(self, template):
regexp = []
attributes = []
had_percent = False
for character in template:
if character == '%':
if had_percent:
regexp.append(character)
had_percent = not had_percent
continue
if had_percent:
if character not in self.TEMPLATES:
raise FormatError(
f"'%{character}' is not a valid template specifier")
pattern, attribute = self.TEMPLATES[character]
regexp.extend(['(', pattern, ')'])
attributes.append(attribute)
had_percent = False
else:
regexp.append(character)
return ''.join(regexp), attributes
def __eq__(self, other):
if not isinstance(other, Format):
return False
return self.__dict__ == other.__dict__
@classmethod
def iso8601(cls):
# not all variations of ISO-8601 datetime are supported currently
return cls(r'%Y-%m-%d(?:T|\s)%H:%i(?::%s)?(?:%R)')
@classmethod
def rfc822(cls):
return cls(r'%D, %d %M %Y %H:%i:%s %O')
@classmethod
def rfc3339(cls):
return cls(r'%Y-%m-%dT%H:%i:%s(?:\.%u)?%P')
@classmethod
def rfc850(cls):
return cls(r'%l, %d-%M-%y %H:%i:%s %T')
@classmethod
def mysql(cls):
return cls(r'%Y-%m-%d %H:%i:%s')
# RFC 822 aliases
rfc1036 = rfc822
rfc1123 = rfc822
rfc2822 = rfc822
rss = rfc822
# RFC 850 aliases
cookie = rfc850
# RFC 3339 aliases
w3c = rfc3339
atom = rfc3339
|
# coding=utf-8
from sqlalchemy import create_engine, Column, String, DATETIME, DATE, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import redis
import pymysql
pymysql.install_as_MySQLdb()
# 初始化数据库连接:
engine = create_engine('mysql+pymysql://root:12345678@localhost:3306/notice_information')
#=create_engine('sqlite:///universitys.db')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
Base = declarative_base()
class Notification(Base):
"""the class map to table of crystal.notifications"""
__tablename__ = 'notifications'
url = Column(String(100), primary_key=True) #通知全文的url
title = Column(Text) #讲座题目
college = Column(String(100)) #讲座所在大学
speaker = Column(String(150)) #讲座演讲人
venue = Column(String(100)) #讲座地点
time = Column(String(50)) #讲座时间
notify_time = Column(String(50))
class Seed(Base):
"""the class map to table of crystal.seeds"""
__tablename__ = 'seeds'
start_url = Column(String(100), primary_key=True) #待爬取学校学院的学术通知网址
college = Column(String(100)) #待爬取学校学院的名称
url_xpath = Column(String(100)) #总的通知网页下,每个通知的超链接的xpath选择路径
nextpage_xpath=Column(String(100)) #总的通知网页下,下一页标签的xpath选择路径
title_word = Column(String(100)) #选取有关我们需要的通知的关键词
notice_time_xpath=Column(String(150)) #通知时间的xpath选择器规则
title= Column(String(100)) #通知标题匹配格式
speaker = Column(String(100)) #讲座演讲人匹配格式
venue = Column(String(100)) #讲座地点匹配格式
time = Column(String(300)) #讲座时间匹配格式
text_xpath = Column(String(100)) #具体通知页面下,爬取通知正文的xpath选择器规则
Base.metadata.create_all(engine)
|
def splitInteger(num, parts):
""" split_integer == PEP8 (forced mixedCase by CodeWars) """
quo, rem = divmod(num, parts)
if rem == 0:
return [quo] * parts
return [quo if a > rem else quo + 1 for a in xrange(parts, 0, -1)]
|
import logging
from zentral.core.events import event_cls_from_type, register_event_type
from zentral.core.events.base import BaseEvent, EventMetadata, EventRequest
logger = logging.getLogger('zentral.contrib.inventory.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "machine"}
class EnrollmentSecretVerificationEvent(BaseEvent):
event_type = 'enrollment_secret_verification'
register_event_type(EnrollmentSecretVerificationEvent)
# Inventory update events
class InventoryHeartbeat(BaseEvent):
event_type = 'inventory_heartbeat'
namespace = "machine"
tags = ['heartbeat', 'machine']
register_event_type(InventoryHeartbeat)
class AddMachine(BaseEvent):
event_type = 'add_machine'
namespace = "machine"
tags = ['machine']
register_event_type(AddMachine)
for attr in ('link',
'business_unit',
'group',
'os_version',
'system_info',
'disk',
'network_interface',
'osx_app_instance',
'deb_package',
'program_instance',
'teamviewer',
'puppet_node',
'principal_user',
'certificate'):
for action in ("add", "remove"):
event_type = f"{action}_machine_{attr}"
event_class_name = "".join(s.title() for s in event_type.split('_'))
event_class = type(
event_class_name,
(BaseEvent,),
{'event_type': event_type,
'namespace': 'machine',
'tags': ['machine', 'machine_update']}
)
register_event_type(event_class)
def post_inventory_events(msn, events):
for index, (event_type, created_at, data) in enumerate(events):
event_cls = event_cls_from_type(event_type)
metadata = EventMetadata(event_cls.event_type,
namespace=event_cls.namespace,
machine_serial_number=msn,
index=index,
created_at=created_at,
tags=event_cls.tags)
event = event_cls(metadata, data)
event.post()
def post_enrollment_secret_verification_failure(model,
user_agent, public_ip_address, serial_number,
err_msg, enrollment_secret):
event_cls = EnrollmentSecretVerificationEvent
metadata = EventMetadata(event_cls.event_type,
machine_serial_number=serial_number,
request=EventRequest(user_agent, public_ip_address),
tags=event_cls.tags)
payload = {"status": "failure",
"reason": err_msg,
"type": model}
if enrollment_secret:
obj = getattr(enrollment_secret, model)
payload.update(obj.serialize_for_event())
event = event_cls(metadata, payload)
event.post()
def post_enrollment_secret_verification_success(request, model):
obj = getattr(request.enrollment_secret, model)
event_cls = EnrollmentSecretVerificationEvent
metadata = EventMetadata(event_cls.event_type,
machine_serial_number=request.serial_number,
request=EventRequest(request.user_agent, request.public_ip_address),
tags=event_cls.tags)
payload = {"status": "success",
"type": model}
payload.update(obj.serialize_for_event())
event = event_cls(metadata, payload)
event.post()
|
from .components import *
from .window import *
from .factory import *
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import views
from author.models import *
from Followers.models import *
from posts.models import *
from comment.models import *
from logging import exception
from django.http import response
from django.http import request
from django.http.request import HttpRequest
# Create your tests here.
from django.contrib.auth import get_user_model
User = get_user_model()
class CommentTestCase(TestCase):
def setUp(self):
response = self.client.get("")
self.request = response.wsgi_request
self.usr1 = self.create_author("test_user1", "test_user1",
"test_user1@github", "", "test_password1")
self.usr2 = self.create_author("test_user2", "test_user2",
"test_user2@github", "", "test_password2")
self.usr3 = self.create_author("test_user3", "test_user3",
"test_user3@github", "", "test_password3")
self.post = self.create_post(self.usr1, "test_title", "test_content")
self.comment = self.create_comment(self.usr2, self.post, "test_content")
def create_author(self, username, display_name, github, profile_image, password):
author = Author.objects.create_superuser(userName=username, displayName=display_name, github=github,
profileImage=profile_image, password=password)
author.save()
return author
def create_post(self, author, title, content):
post = Post.objects.create(author=author, title=title, content=content)
post.save()
return post
def create_comment(self, author, post, content):
comment = Comment.objects.create(author=author, post=post, content=content)
comment.save()
return comment
def get_comments_of_post(self, post):
str_id1 = str(self.usr1.id)
post_id = str(self.post.id)
response = self.client.get(f"/api/author/{str_id1}/posts/{post_id}")
assert 100 < response.status_code < 300
def create_comment_through_request(self):
str_id1 = str(self.usr1.id)
post_id = str(self.post.id)
data = {
"type":"comment",
"author":{
"type":"author",
# ID of the Author (UUID)
"id":str_id1,
# url to the authors information
"url":str_id1,
"host": "https://project-api-404.herokuapp.com/api",
"displayName":str(self.usr1.displayName),
# HATEOS url for Github API
"github": str(self.usr1.github),
# Image from a public domain
"profileImage": str(self.usr1.github)
},
"comment":"Sick Olde English",
"contentType":"text/markdown",
# ISO 8601 TIMESTAMP
"published":"2015-03-09T13:07:04+00:00",
# ID of the Comment (UUID)
"id": self.comment.id,
}
response = self.client.post(f"/api/author/{str_id1}/posts/{post_id}/comments", data)
assert 100 < response.status_code < 300
|
from __future__ import print_function # For printing no newline
import sympy
from sympy import Rational
from sympy import factorial
import numpy as np
def Taylor( n, dx ):
"""Compyute n terms in the Taylor expansion for a function centered 'dx'
away from where the terms will be evaluated."""
return [ (dx)**j/factorial(j) for j in range(n) ]
def compute_derivs( u_stencil, index, dx ):
"""Compute all of the derivatives of u_stencil. Index refers to the
location where these values will be computed. For example,
u_stencil = [u0, u1, u2] and index = 0
will compute all of the derivatives at u0, u0_x, u0_xx.
However, index = 0.5 will compute the derivatives u1/2, u1/2_x and
u1/2_xx.
"""
# Set up matrix that needs to be inverted
n = len( u_stencil )
A = sympy.Matrix( n, n, lambda i,j : 0 )
for i in range(n):
t = Taylor( n, (i-index)*dx )
for j in range(n):
A[i,j] = t[j]
return A.inv()
def compute_poly_fit( u_stencil, x ):
"""Compute an expression for the polynomial approximation (in the variable
x) to the given stencil.
This polynomial fits points (x,p(x)) through
( 1, u[0] ), ( 2, u[1]), ( 3, u[2]), \dots
"""
from sympy.polys.polyfuncs import interpolate
return sympy.poly( interpolate( u_stencil, x ) )
def compute_h1_norm( u_stencil, indices ):
"""Compute the H1-norm of a given stencil. This routine computes the
integral
beta = \sum_l dx**(2l-1) \int_{xl}^{xr} d^l/dx^l p(x)\, dx.
that measures the smoothness of a polynomial that fits points
( x_0, u[0] ), (x_1, u[1]), (x_2, u[2]), \dots
The parameters xl,xr = indices[:] are used to define the inteverval for
integration.
"""
from sympy.polys.polyfuncs import interpolate
from sympy.abc import xi
# Fit a polynomial through the whole stencil.
# This fits the points, (x=1, p[0]), (x=2, p[1]), ...
# p = compute_poly_fit( u_stencil, xi )
p = sympy.poly( interpolate( u_stencil, xi ), xi )
print(p)
# dp = sympy.diff( p, xi )
# dpsqd = dp**2
# d2p = sympy.diff( dp, xi )
# d2psqd = d2p**2
# print( ' ' )
# print( u_stencil )
# print( p.subs( xi, 1 ), p.subs( xi, 2 ), p.subs( xi, 3 ) )
# print('p = ', p )
# print('dp = ', dp )
# print('d2p = ', d2p )
# print(' ')
# tmp = dpsqd.integrate( (xi, indices[0], indices[1] ) )
# tmp = tmp + d2psqd.integrate( (xi, indices[0], indices[1] ) )
# return tmp
tmp = 0
for mp in range( len( u_stencil ) ):
tmp = tmp + p.integrate( xi ).eval( xi, indices[1] ) - p.integrate( xi ).eval( xi, indices[0] )
p = p.diff( xi )
return tmp
# (uniform) grid spacing
dx = sympy.symbols("dx")
uim3 = sympy.symbols("uim3")
uim2 = sympy.symbols("uim2")
uim1 = sympy.symbols("uim1")
ui = sympy.symbols("ui")
uip1 = sympy.symbols("uip1")
uip2 = sympy.symbols("uip2")
uip3 = sympy.symbols("uip3")
u_stencil = [ uim3, uim2, uim1, ui, uip1, uip2, uip3 ]
# Compute derivative using the whole stencil:
gamma = compute_derivs( u_stencil, 2, dx )
# Four sub-stencils (of length four)
u0 = [uim3, uim2, uim1, ui ]
u1 = [uim2, uim1, ui, uip1]
u2 = [uim1, ui, uip1, uip2]
u3 = [ui, uip1, uip2, uip3]
u = [u0, u1, u2, u3 ]
# Three Lagrange polynomials and their derivatives:
beta0 = compute_h1_norm( [uim2, uim1, ui], (Rational(5,2), Rational(7,2) ) )
beta1 = compute_h1_norm( [uim1, ui, uip1], (Rational(3,2), Rational(5,2) ) )
beta2 = compute_h1_norm( [ui, uip1, uip2], (Rational(1,2), Rational(3,2) ) )
print('beta0 = ', beta0 )
print('beta1 = ', beta1 )
print('beta2 = ', beta2 )
# Now, work out 2nd-derivative using larger stencil
# Smoothness indicators
# eps = sympy.symbols("epsilon")
# beta = [None]*3
# beta[0] = Rational(13,12)*(uim2-2*uim1+ui)**2 + Rational(1,4)*(uim2-4*uim1+3*ui)**2
# beta[1] = Rational(13,12)*(uim1-2*ui+uip1)**2 + Rational(1,4)*(uim1-uip1)**2
# beta[2] = Rational(13,12)*(ui-2*uip1+uip2)**2 + Rational(1,4)*(3*ui-4*uip1+uip2)**2
|
r"""
===============================================================================
Submodule -- throat_shape_factor
===============================================================================
"""
import scipy as _sp
def compactness(geometry, throat_perimeter='throat.perimeter',
throat_area='throat.area', **kwargs):
r"""
Mortensen et al. have shown that the Hagen-Poiseuille hydraluic resistance
is linearly dependent on the compactness. Defined as perimeter^2/area.
The dependence is not universal as shapes with sharp corners provide more
resistance than those that are more elliptical. Count the number of
vertices and apply the right correction.
"""
# Only apply to throats with an area
ts = geometry.throats()[geometry[throat_area] > 0]
P = geometry[throat_perimeter]
A = geometry[throat_area]
C = _sp.ones(geometry.num_throats())
C[ts] = P[ts]**2/A[ts]
verts = geometry['throat.offset_vertices']
alpha = _sp.ones_like(C)
for i in ts:
if len(verts[i]) == 3:
# Triangular Correction
alpha[i] = C[i]*(25/17) + (40*_sp.sqrt(3)/17)
elif len(verts[i]) == 4:
# Rectangular Correction
alpha[i] = C[i]*(22/7) - (65/3)
elif len(verts[i]) > 4:
# Approximate Elliptical Correction
alpha[i] = C[i]*(8/3) - (8*_sp.pi/3)
# For a perfect circle alpha = 8*pi so normalize by this
alpha /= 8*_sp.pi
# Very small throats could have values less than one
alpha[alpha < 1.0] = 1.0
return alpha
def mason_morrow(geometry, throat_perimeter='throat.perimeter',
throat_area='throat.area', **kwargs):
r"""
Mason and Morrow relate the capillary pressure to the shaped factor in a
Similar way to Mortensen but for triangles.
Ref:
Mason, G. and Morrow, N.R., 1991. Capillary behavior of a perfectly wetting
liquid in irregular triangular tubes. Journal of Colloid and Interface
Science, 141(1), pp.262-274.
"""
# Only apply to throats with an area
ts = geometry.throats()[geometry[throat_area] <= 0]
P = geometry[throat_perimeter]
A = geometry[throat_area]
value = A/(P**2)
value[ts] = 1/(4*_sp.pi)
return value
def jenkins_rao(geometry, throat_perimeter='throat.perimeter',
throat_area='throat.area',
throat_diameter='throat.indiameter',
**kwargs):
r"""
Jenkins and Rao relate the capillary pressure in an eliptical throat to
the aspect ratio
Ref:
Jenkins, R.G. and Rao, M.B., 1984. The effect of elliptical pores on
mercury porosimetry results. Powder technology, 38(2), pp.177-180.
"""
P = geometry[throat_perimeter]
A = geometry[throat_area]
r = geometry[throat_diameter]/2
# Normalized by value for perfect circle
value = (P/A)/(2/r)
return value
|
# coding=utf-8
import unittest
from mem_dixy.tag.alphabet import logical
from mem_dixy.Unicode.U0000 import *
from enum import Enum
class enum_comparison(Enum):
lt = 0,
le = 1,
eq = 2,
ne = 3,
ge = 4,
gt = 5,
sa = 6,
sn = 7
class operator():
pass
class comparison(operator):
def __init__(self, symbol):
self.symbol = symbol
def __str__(self):
return this.symbol
class lt(comparison):
def __init__(self):
super().__init__(str().join([LESS_THAN_SIGN]))
class le(comparison):
def __init__(self):
super().__init__(str().join([LESS_THAN_SIGN, EQUALS_SIGN]))
class eq(comparison):
def __init__(self):
super().__init__(str().join([EQUALS_SIGN]))
class ne(comparison):
def __init__(self):
super().__init__(str().join([EXCLAMATION_MARK, EQUALS_SIGN]))
class ge(comparison):
def __init__(self):
super().__init__(str().join([GREATER_THAN_SIGN, EQUALS_SIGN]))
class gt(comparison):
def __init__(self):
super().__init__(str().join([GREATER_THAN_SIGN]))
class sa(comparison):
def __init__(self):
super().__init__(str().join(
[LESS_THAN_SIGN, EQUALS_SIGN, GREATER_THAN_SIGN]))
class sn(comparison):
def __init__(self):
super().__init__(str().join([EXCLAMATION_MARK]))
final_encoding = {
enum_comparison.lt: str().join([LESS_THAN_SIGN]),
enum_comparison.le: str().join([LESS_THAN_SIGN, EQUALS_SIGN]),
enum_comparison.eq: str().join([EQUALS_SIGN]),
enum_comparison.ne: str().join([EXCLAMATION_MARK, EQUALS_SIGN]),
enum_comparison.ge: str().join([GREATER_THAN_SIGN, EQUALS_SIGN]),
enum_comparison.gt: str().join([GREATER_THAN_SIGN]),
enum_comparison.sa: str().join([LESS_THAN_SIGN, EQUALS_SIGN, GREATER_THAN_SIGN]),
enum_comparison.sn: str().join([EXCLAMATION_MARK]),
}
all_encoding = {
0x0: sa(), # ____
0x1: gt(), # ___>
0x2: eq(), # __=_
0x3: ge(), # __=>
0x4: lt(), # _<__
0x5: ne(), # _<_>
0x6: le(), # _<=_
0x7: sa(), # _<=>
0x8: sn(), # !___
0x9: le(), # !__>
0xA: ne(), # !_=_
0xB: lt(), # !_=>
0xC: ge(), # !<__
0xD: eq(), # !<_>
0xE: gt(), # !<=_
0xF: sn(), # !<=>
}
def add_token(array):
index = 0
index |= EXCLAMATION_MARK in array
index <<= 1
index |= LESS_THAN_SIGN in array
index <<= 1
index |= EQUALS_SIGN in array
index <<= 1
index |= GREATER_THAN_SIGN in array
return all_encoding.get(index)
class check_comparison(unittest.TestCase):
def test_EMPTY(self):
t = []
self.assertTrue(isinstance(add_token(t), sa))
def test_EQUALS_SIGN(self):
t = [EQUALS_SIGN]
self.assertTrue(isinstance(add_token(t), eq))
def test_EQUALS_SIGN__EXCLAMATION_MARK(self):
t = [EQUALS_SIGN, EXCLAMATION_MARK]
self.assertTrue(isinstance(add_token(t), ne))
def test_EQUALS_SIGN__EXCLAMATION_MARK__GREATER_THAN_SIGN(self):
t = [EQUALS_SIGN, EXCLAMATION_MARK, GREATER_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), lt))
def test_EQUALS_SIGN__EXCLAMATION_MARK__GREATER_THAN_SIGN__LESS_THAN_SIGN(self):
t = [EQUALS_SIGN, EXCLAMATION_MARK,
GREATER_THAN_SIGN, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), sn))
def test_EQUALS_SIGN__EXCLAMATION_MARK__LESS_THAN_SIGN(self):
t = [EQUALS_SIGN, EXCLAMATION_MARK, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), gt))
def test_EQUALS_SIGN__GREATER_THAN_SIGN(self):
t = [EQUALS_SIGN, GREATER_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), ge))
def test_EQUALS_SIGN__GREATER_THAN_SIGN__LESS_THAN_SIGN(self):
t = [EQUALS_SIGN, GREATER_THAN_SIGN, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), sa))
def test_EQUALS_SIGN__LESS_THAN_SIGN(self):
t = [EQUALS_SIGN, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), le))
def test_EXCLAMATION_MARK(self):
t = [EXCLAMATION_MARK]
self.assertTrue(isinstance(add_token(t), sn))
def test_EXCLAMATION_MARK__GREATER_THAN_SIGN(self):
t = [EXCLAMATION_MARK, GREATER_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), le))
def test_EXCLAMATION_MARK__GREATER_THAN_SIGN__LESS_THAN_SIGN(self):
t = [EXCLAMATION_MARK, GREATER_THAN_SIGN, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), eq))
def test_EXCLAMATION_MARK__LESS_THAN_SIGN(self):
t = [EXCLAMATION_MARK, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), ge))
def test_GREATER_THAN_SIGN(self):
t = [GREATER_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), gt))
def test_GREATER_THAN_SIGN__LESS_THAN_SIGN(self):
t = [GREATER_THAN_SIGN, LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), ne))
def test_LESS_THAN_SIGN(self):
t = [LESS_THAN_SIGN]
self.assertTrue(isinstance(add_token(t), lt))
def testEQUALS_SIGN__EXCLAMATION_MARK__GREATER_THAN_SIGN__LESS_THAN_SIGN(self):
array = [
EQUALS_SIGN, EXCLAMATION_MARK, GREATER_THAN_SIGN, LESS_THAN_SIGN
]
token = add_token(array)
self.assertTrue(isinstance(token, sn))
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.core.validators import MinValueValidator, MaxValueValidator
from orchids.models import Orchid, Greenhouse
class NumberForm(forms.Form):
number = forms.IntegerField(label='Number of days to simulate', initial=1,
validators=[MinValueValidator(0), MaxValueValidator(20)])
class OrchidForm(forms.ModelForm):
class Meta:
model = Orchid
fields = ['name', 'preferred_weather', 'original_days_in_andes',
'original_days_in_coast', 'number', 'min_temperature', 'max_temperature', ]
labels = {
"name": "Identifier",
"original_days_in_andes": "Number of days in andes",
"original_days_in_coast": "Number of days in coast"
}
help_texts = {
'name': 'alphanumeric identifier for the orchid batch',
'number': 'number of plants in the batch',
'min_temperature': 'min recommended temperature',
'max_temperature': 'max recommended temperature',
}
class GreenhouseForm(forms.ModelForm):
class Meta:
model = Greenhouse
fields = ['name', 'weather', 'temperature', 'capacity', ]
labels = {
"name": "Identifier",
}
help_texts = {
'name': 'alphanumeric identifier for the greenhouse',
'capacity': 'Number of plants that fit the greenhouse',
}
|
import pytorch_lightning as pl
import sys
sys.path.append('./models')
import os
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from pytorch_lightning import Trainer, seed_everything
from argparse import ArgumentParser
from pytorch_lightning.loggers import TensorBoardLogger, TestTubeLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import Callback
from models.fomm import FOMM, FOMMAtt
from models.ssm.ssm import SSM, SSMAtt
from models.ssm.sdmm import SDMM
from models.ssm.ssm_baseline import SSMBaseline
from models.rnn import GRU
from models.sfomm import SFOMM
from distutils.util import strtobool
'''
Name: main_trainer.py
Purpose: High-level training script
Usage: Not meant to use directly, but rather will be called by launch_run.py.
'''
class MetricsCallback(Callback):
"""PyTorch Lightning metric callback."""
def __init__(self):
super().__init__()
self.metrics = []
self.train_loss = []
def on_validation_end(self, trainer, pl_module):
self.metrics.append(trainer.callback_metrics['val_loss'].item())
self.train_loss.append(trainer.callback_metrics['loss'].item())
def objective(trial, args):
dict_args = vars(args)
# pick model
if args.model_name == 'fomm':
model = FOMM(trial, **dict_args)
elif args.model_name == 'fomm_att':
model = FOMMAtt(trial, **dict_args)
elif args.model_name == 'ssm':
model = SSM(trial, **dict_args)
elif args.model_name == 'ssm_att':
model = SSMAtt(trial, **dict_args)
elif args.model_name == 'ssm_baseline':
model = SSMBaseline(trial, **dict_args)
elif args.model_name == 'gru':
model = GRU(trial, **dict_args)
elif args.model_name == 'sfomm':
model = SFOMM(trial, **dict_args)
elif args.model_name == 'sdmm':
model = SDMM(trial, **dict_args)
metrics_callback = MetricsCallback()
if args.ckpt_path != 'none':
checkpoint_callback = ModelCheckpoint(filepath=args.ckpt_path + str(args.nsamples_syn) + str(args.fold) + str(args.dim_stochastic) + '_' + args.ttype + '_{epoch:05d}-{val_loss:.2f}')
else:
checkpoint_callback = False
trainer = Trainer.from_argparse_args(args,
deterministic=True,
logger=False,
gpus=[args.gpu_id],
checkpoint_callback=checkpoint_callback,
callbacks=[metrics_callback],
# early_stop_callback=PyTorchLightningPruningCallback(trial, monitor='val_loss')
early_stop_callback=False,
progress_bar_refresh_rate=1
)
trainer.fit(model)
return min([x for x in metrics_callback.metrics]), metrics_callback.metrics, metrics_callback.train_loss
if __name__ == '__main__':
parser = ArgumentParser()
# figure out which model to use and other basic params
parser.add_argument('--model_name', type=str, default='sdmm', help='fomm, ssm, gru or sdmm')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=strtobool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--gpu_id', default=1, type=int)
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--optuna', type=strtobool, default=False, help='whether to use optuna for optimization')
parser.add_argument('--num_optuna_trials', default=100, type=int)
parser.add_argument('--include_baseline', type=str, default='all')
parser.add_argument('--include_treatment', type=str, default='lines')
parser.add_argument('--ckpt_path', type=str, default='none')
parser.add_argument('--cluster_run', type=strtobool, default=True)
parser.add_argument('--data_dir', type=str, \
default='/afs/csail.mit.edu/u/z/zeshanmh/research/ief/data/ml_mmrf/ml_mmrf/output/cleaned_mm_fold_2mos.pkl')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
seed_everything(temp_args.seed)
# let the model add what it wants
if temp_args.model_name == 'fomm':
parser = FOMM.add_model_specific_args(parser)
elif temp_args.model_name == 'fomm_att':
parser = FOMMAtt.add_model_specific_args(parser)
elif temp_args.model_name == 'ssm':
parser = SSM.add_model_specific_args(parser)
elif temp_args.model_name == 'ssm_att':
parser = SSMAtt.add_model_specific_args(parser)
elif temp_args.model_name == 'ssm_baseline':
parser = SSMBaseline.add_model_specific_args(parser)
elif temp_args.model_name == 'gru':
parser = GRU.add_model_specific_args(parser)
elif temp_args.model_name == 'sfomm':
parser = SFOMM.add_model_specific_args(parser)
elif temp_args.model_name == 'sdmm':
parser = SDMM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()
# train
if args.optuna:
pruner = optuna.pruners.MedianPruner()
study = optuna.create_study(direction='minimize', pruner=pruner, study_name=args.fname)
study.optimize(lambda trial: objective(trial, args), n_trials=args.num_optuna_trials)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
if not os.path.exists('./tbp_logs/optuna_logs'):
os.mkdir('./tbp_logs/optuna_logs')
with open(os.path.join('./tbp_logs/optuna_logs/', args.fname + '.txt'), 'a') as fi:
fi.write('[OPTUNA STUDY]\n')
fi.write('command line args: ' + ' '.join(sys.argv[1:]) + '\n\n')
fi.write(f'\tNumber of finished trials: {len(study.trials)}\n')
fi.write(f'\tBest trial:\n')
fi.write(f'\t\tValue: {trial.value}\n')
for k,v in trial.params.items():
fi.write(f'\t\t{k}: {v}\n')
else:
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
best_nelbo, val_nelbos, train_losses = objective(trial, args)
import pdb; pdb.set_trace()
print(f'BEST_NELBO: {best_nelbo}')
## TODO for Linden: save val_nelbos and train_losses to .csv and plot them ##
|
import requests
from bs4 import BeautifulSoup
import re
import os
import pandas as pd
def get_useful_columns(result):
"""
Filter csv columns to make use of:
GD - goal difference, FTHG (Full time home goals), HST (Home shots on target)
B365H (Odds favouring home team in the match by Bet365),
B365D - draw, B365A - away, FTR (full time result)
"""
result["GD"] = result["FTHG"] - result["FTAG"]
return result[["Date", "HomeTeam", "AwayTeam",
"FTHG", "FTAG", "GD",
"HST", "AST",
"B365H", "B365D", "B365A", "FTR"
]]
def get_csv_files(from_year, to_year):
"""
Uses the webpage football-data.co.uk to obtain historic data (even games played recently),
utitlizes BeatifulSoup to get to the csv files of the leagues desired
- csv files are stored locally
"""
base_url = "http://www.football-data.co.uk/"
competition_urls = {
base_url + "englandm.php": "E0",
base_url + "spainm.php": "SP1",
base_url + "germanym.php": "D1",
base_url + "italym.php": "I1",
base_url + "francem.php": "F1"
}
new_directory = "historic"
for url, competition in competition_urls.items():
request = requests.get(url)
soup = BeautifulSoup(request.content, features="html.parser")
allsearch = ""
complete_urls = []
# reading all the links on the selected page.
for link in soup.find_all('a'):
mysearch = link.get('href')
allsearch = allsearch + ' ' + mysearch
array = allsearch.split()
season_files = [
file for file in array
if re.search(
"^mmz.*.{}.csv$".format(competition), str(file)
)
]
for file in (season_files):
url = base_url + str(file)
complete_urls.append(url)
to_year -= from_year
from_year = 0
chosen_urls = complete_urls[from_year: to_year]
readings = pd.DataFrame()
for url in chosen_urls:
reader = pd.read_csv(url, sep=',', header=0, error_bad_lines=False)
readings = readings.append(reader)
if not os.path.exists(new_directory):
os.mkdir(new_directory)
readings = get_useful_columns(readings)
filename = new_directory + "/" + competition + ".csv"
readings.to_csv(filename, index=False)
if __name__ == "__main__":
get_csv_files(2010, 2022)
|
import os
if os.name == 'nt':
os.environ['basedir_a'] = 'F:/Temp2/'
else:
os.environ['basedir_a'] = '/gpfs/home/cj3272/tmp/'
import luccauchon.data.C as C
import PIL
import keras
from samples.amateur.config import AmateurInference
from os import listdir
from os.path import isfile, join
from sklearn.model_selection import train_test_split
import luccauchon.data.dataset_util as dataset_util
import luccauchon.data.__MYENV__ as E # Charge en mémoire des variables d'environnement.
import logging
E.APPLICATION_LOG_LEVEL = logging.DEBUG
if C.is_running_on_casir():
E.APPLICATION_LOG_LEVEL = logging.INFO
log = E.setup_logger(logger_name=__name__, _level=E.APPLICATION_LOG_LEVEL)
import mrcnn.model as model_lib
from mrcnn import utils
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def compute_batch_ap(model, dataset, image_ids, config, verbose=1):
assert isinstance(model, model_lib.MaskRCNN)
APs = []
buckets = [image_ids[i:i + config.BATCH_SIZE] for i in range(0, len(image_ids), config.BATCH_SIZE)]
for images_id in buckets:
if len(images_id) != config.BATCH_SIZE:
continue
images = []
images_meta = []
for image_id in images_id:
# Load image
log.debug('loading image %s' % image_id)
image, image_meta, gt_class_id, gt_bbox, gt_mask = model_lib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
images.append(image)
images_meta.append(image_meta)
# Run object detection
results = model.detect_molded(np.stack(images, axis=0), np.stack(images_meta, axis=0), verbose=0)
assert config.BATCH_SIZE == len(results)
# Compute AP over range 0.5 to 0.95
for r, image_id, image_meta in zip(results, images_id, images_meta):
ap = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'], verbose=0)
APs.append(ap)
if verbose:
info = dataset.image_info[image_id]
meta = model_lib.parse_image_meta(image_meta[np.newaxis, ...])
log.debug("{:3} {} AP: {:.2f}".format(meta["image_id"][0], meta["original_image_shape"][0], ap))
return APs
def main():
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
log.debug('keras.__version__=' + str(keras.__version__))
log.debug('tf.__version__=' + str(tf.__version__))
log.debug('PIL.__version__=' + str(PIL.__version__))
log.debug('Using GPU ' + str(os.environ["CUDA_VISIBLE_DEVICES"]) + ' Good luck...')
if C.is_running_on_casir():
# Root directory of the project
root_dir = os.path.abspath("./Mask_RCNN")
# Directory to save logs and trained model
model_dir = '/gpfs/home/cj3272/amateur/modeles/segmentation/pfe-ireq-master/mask-rcnn/Mask_RCNN/logsV2/hq20190619T1127/'
else:
model_dir = 'F:/AMATEUR/models_mask_rcnn/hq20190619T1127/'
config = AmateurInference()
if C.is_running_on_casir():
config.BASE_DIR = '/gpfs/groups/gc014a/AMATEUR/dataset/segmentation/30MAI2019/GEN_segmentation/'
config.IMAGES_PER_GPU = 12
else:
config.BASE_DIR = 'F:/AMATEUR/segmentation/13JUIN2019/GEN_segmentation/'
config.IMAGES_PER_GPU = 12
seed = 11
test_size = 0.99
image_ids = [f for f in listdir(config.BASE_DIR) if isfile(join(config.BASE_DIR, f)) and f.endswith('gz')]
_, test_list = train_test_split(image_ids, test_size=test_size, random_state=seed)
log.info('found %s files for test in %s' % (str(len(test_list)), config.BASE_DIR))
# Test dataset
if C.is_running_on_casir():
dataset_test = dataset_util.AmateurDatasetOnDiskMRCNN()
else:
dataset_test = dataset_util.AmateurDatasetOnDiskMRCNN()
dataset_test.load(config.BASE_DIR, test_list, n=config.IMAGES_PER_GPU * 10)
dataset_test.prepare()
log.info(str(len(dataset_test.image_ids)) + ' images for testing.')
# Recreate the model in inference mode
model = model_lib.MaskRCNN(mode="inference", config=config, model_dir=model_dir)
for epoch in range(1, 150, 1):
model_path = model_dir + ('/mask_rcnn_hq_{:04d}.h5'.format(epoch))
if not os.path.isfile(model_path):
continue
log.debug('Loading weights of ' + model_path + '...')
model.load_weights(model_path, by_name=True)
log.debug('Computing for model %s...' % model_path)
aps_test = compute_batch_ap(model, dataset_test, dataset_test.image_ids, config, verbose=1)
log.info(
"[{}] Mean mAP over {} tests images: {:.4f} (computed mAP over a range of IoU thresholds [0.5,0.95,0.05])".format(model_path, len(aps_test), np.mean(aps_test)))
if __name__ == "__main__":
main()
|
#Import modules for main GUI program
import vienna_config_windows
from vienna_config_windows import gs_path, find_gs
import os, sys, subprocess, shutil, time
import tkinter
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfile
from PIL import Image, ImageTk, EpsImagePlugin
#Define functions for the GUI
#This function shows the widget
def display(widget1):
widget1.grid()
#This function hides the widget
def remove(widget1):
widget1.grid_remove()
#This function opens the browse window and allows only fasta
#and text files to be used. When file selected add to the
#textbox
def browse():
file = askopenfile(mode='rb', title='Choose a file',
filetypes=(('fasta files','*.fa *.fasta *.fna *.ffn *.faa *.frn'),
('text files','*.txt')))
global filepath
filepath = ""
if file:
filepath = os.path.abspath(file.name)
#print (filepath)
browse_box.delete(0, 'end')
browse_box.insert(0, filepath)
#This function opens browse window for alignment files only.
#This is specific to RNAalifold
def browse_aln():
file = askopenfile(mode='rb', title='Choose a file',
filetypes=(('Clustal files','*.aln'), ('Stockholm', '*.sto *.stk'),
('fasta files','*.fa *.fasta *.fna *.ffn *.faa *.frn'),
('MAF files', '*.maf')))
global filepath
filepath = ""
if file:
filepath = os.path.abspath(file.name)
#print (filepath)
browse_box.delete(0, 'end')
browse_box.insert(0, filepath)
#This function dictates what happens when the checkbutton widget
#is checked. When checked, hide large textbox and show the browse
#box and the browse button. When unchecked, hide browse box and
#browse button
def isChecked():
if cb.get():
remove(txt_seq)
display(browse_box)
browse_box.delete(0, 'end')
display(browse_btn)
else:
remove(browse_box)
remove(browse_btn)
display(txt_seq)
#This function will display widgets for RNAfold program
def fold_pl_select():
if txt_seq.winfo_ismapped() == True and cb_file.winfo_ismapped() == True:
remove(browse_box)
remove(browse_btn2)
remove(browse_btn)
cb.set(0)
print("textbox present")
print("checkbox present")
elif browse_box.winfo_ismapped() == True:
remove(browse_box)
remove(browse_btn2)
remove(browse_btn)
display(txt_seq)
cb.set(0)
display(cb_file)
#cb_file.grid(row=6, column=1, columnspan=5,
# sticky=W, padx=5, pady=5)
else:
cb.set(0)
print("nothing to change")
#cb_file.grid(row=6, column=1, columnspan=5, sticky=W, padx=5, pady=5)
#This function will display widgets for RNAalifold program
def aln_select():
remove(txt_seq)
remove(cb_file)
display(browse_box)
browse_box.delete(0, 'end')
display(browse_btn2)
#This function will give output after the go button is pressed,
#depending on input from text box or opening file
def go_event():
if rbtn.get()==1:
#if txt_seq is showing do the following
if txt_seq.winfo_ismapped() == True:
with open ("input.txt", "w") as usr_inp:
usr_inp.write(txt_seq.get(1.0, "end-1c"))
subprocess.run(["RNAfold.exe", "input.txt"])
#find the ps file
find_file()
#open the ps file on canvas
open_file()
#else do this instead
else:
subprocess.run(["RNAfold.exe", filepath])
#find the ps file
find_file()
#open the ps file on canvas
open_file()
elif rbtn.get()==2:
subprocess.run(["RNAalifold.exe", filepath])
#find the ps file
find_file()
#display the output in terminal
#print (output)
#open the ps file on canvas
open_file()
else:
#if txt_seq is showing do the following
if txt_seq.winfo_ismapped() == True:
with open ("input.txt", "w") as usr_inp:
usr_inp.write(txt_seq.get(1.0, "end-1c"))
output = subprocess.run("RNAplfold.exe < input.txt", shell=True)
#find the ps file
find_file()
#display the output in terminal
#print (output)
#open the ps file on canvas
open_file()
#else do this instead
else:
subprocess.run("RNAplfold.exe < %s" %filepath,
shell=True)
#find the ps file
find_file()
#display the output in terminal
#print (output)
#open the ps file on canvas
open_file()
#This function will find all .ps files and convert to .pdf files
def find_file():
#find all .ps files in tmp folder
global find_ps
find_ps = []
list_dir = os.listdir()
for x in list_dir:
if x.endswith(".ps"):
find_ps.append(x)
#for confirmation purpose
#print(find_ps)
#This will sort the list by ascending time
find_ps = sorted(find_ps, key=os.path.getmtime)
return find_ps
#This function will open ps file in a different window
def open_file():
#ps_window = Toplevel(window)
#Toplevel window title and dimensions
#ps_window.title("Output")
#This block of code modified by Karina#
#empty variable and list
ps_loc = ""
ps_loc_list = []
#For debugging purposes
#print(find_ps)
#For loop that loops through list and gets
#path for the ps files in tmp folder
for y in find_ps:
ps_loc = os.path.join(os.getcwd(),y)
#For debugging purposes
#print(ps_loc)
#Adds the path to ps_loc_list
ps_loc_list.append(ps_loc)
#For debugging purposes to ensure all paths
#were added to list
print(ps_loc_list)
#For loop to go through the ps_loc_list and open
#ps files.
#Ana modification: if Ghostscript is not in
#the environment variables, edit the gs binary
for p in ps_loc_list:
if find_gs == None:
#Uncomment to check the path
#print (gs_path[0])
bin_gs = os.path.join(gs_path[0],'gswin64c')
#For debugging purpose, uncomment if needed
#print (bin_gs)
EpsImagePlugin.gs_windows_binary = bin_gs
img_open = Image.open(p)
#img_w, img_h = img_open.size
img_open.show()
#Code does this if gs is in environment variable
#for windows
else:
#Open the ps file
img_open = Image.open(p)
#img_w, img_h = img_open.size
img_open.show()
#Rest is commented out to check if the above works
#prior to removal
#global img
#img = ImageTk.PhotoImage(img_open)
#Create a blank canvas
#ps_canvas = Canvas(ps_window, width = img_w, height = img_h,
# bg= "white", highlightthickness=0)
#Paste the ps file onto the canvas
#ps_canvas.create_image(0, 0, anchor="nw", image=img)
#ps_canvas.grid()
#Function to quit the program and check if user is sure they want to quit
def quit_prg():
if messagebox.askokcancel("Quit",
"Quitting will delete files\nDo you want to quit?"):
#change out of directory
os.chdir('..')
#remove tmp directory
shutil.rmtree(os.path.join(os.getcwd(),'tmp'))
#remove main GUI window
window.destroy()
#Window title and dimensions
window = Tk()
window.title ("ViennaRNA Package")
#Variables for checkbutton and radio button
cb = IntVar()
rbtn = IntVar()
#Welcome and enter sequence labels
prg_title = Label(window, text="Welcome to Vienna RNA Program",
font=("Times New Roman", 14)).grid(
row=0, columnspan=15, padx=5, pady=5)
prg_choice1 = Radiobutton(window, text="RNAfold", variable=rbtn,
value=1, command=fold_pl_select)
prg_choice1.grid(row=1, column=3)
prg_choice2 = Radiobutton(window, text="RNAalifold", variable=rbtn,
value=2, command=aln_select)
prg_choice2.grid(row=1, column=4, padx=3, pady=3)
prg_choice3 = Radiobutton(window, text="RNAplfold", variable=rbtn,
value=3, command=fold_pl_select)
prg_choice3.grid(row=1, column=5)
rbtn.set(1)
lbl_seq = Label(window, text="Enter RNA sequence: ",
font=("Times New Roman", 12)).grid(
row=2, columnspan=15, padx=5, pady=5)
#Text box and go button on main GUI window
global txt_seq, go_btn, inp_seq, quit_btn
global cb_file, browse_box, browse_btn, browse_btn2
txt_seq = Text(window, width=40, height=10)
txt_seq.grid(row=4, column=1, columnspan=5, padx=5, pady=25)
inp_seq = txt_seq.get(1.0, "end-1c")
go_btn = Button(window, text="Go", command=go_event)
go_btn.grid(row=4, column=7, padx=5, pady=10)
#Checkbutton and browse on main GUI window
cb_file = Checkbutton(window, text="To upload file, check box", variable=cb,
command= isChecked)
cb_file.grid(row=6, column=1, columnspan=5, sticky=W, padx=5, pady=5)
cb.set(0)
browse_box = Entry(window, width = 40)
browse_box.grid(row=3, column=1, columnspan=6, padx=5, pady=5)
remove(browse_box)
browse_btn = Button(window, text="Browse", command=browse)
browse_btn.grid(row=3, column=7, sticky=W, padx=5, pady=5)
remove(browse_btn)
browse_btn2 = Button(window, text="Browse", command=browse_aln)
browse_btn2.grid(row=3, column=7, sticky=W, padx=5, pady=5)
remove(browse_btn2)
#Quit button on window
quit_btn = Button(window, text="Quit", command=quit_prg)
quit_btn.grid(row=6, column=7, padx=5, pady=5)
#Delete tmp and close program
window.protocol("WM_DELETE_WINDOW", quit_prg)
#The following is needed to keep window running
window.mainloop()
|
#coding:utf-8
import os
import requests
from PIL import Image
import math
def imagesget():
os.mkdir('images')
count=0
while True:
img=requests.get('http://wsxk.hust.edu.cn/randomImage.action').content
with open('images/%s.jpeg'%count,'wb') as imgfile:
imgfile.write(img)
count+=1
if(count==100):
break
def convert_image(image):
image=image.convert('L')
image2=Image.new('L',image.size,255)
for x in range(image.size[0]):
for y in range(image.size[1]):
pix=image.getpixel((x,y))
if pix<120:
image2.putpixel((x,y),0)
return image2
def cut_image(image):
inletter=False
foundletter=False
letters=[]
start=0
end=0
for x in range(image.size[0]):
for y in range(image.size[1]):
pix=image.getpixel((x,y))
if(pix==0):
inletter=True
if foundletter==False and inletter ==True:
foundletter=True
start=x
if foundletter==True and inletter==False:
end=x
letters.append((start,end))
foundletter=False
inletter=False
images=[]
for letter in letters:
img=image.crop((letter[0],0,letter[1],image.size[1]))
images.append(img)
return images
def buildvector(image):
result={}
count=0
for i in image.getdata():
result[count]=i
count+=1
return result
class CaptchaRecognize:
def __init__(self):
self.letters=['0','1','2','3','4','5','6','7','8','9']
self.loadSet()
def loadSet(self):
self.imgset=[]
for letter in self.letters:
temp=[]
for img in os.listdir('E:/PycharmProjects/NyPython-master/CaptchaRecognise/icon/%s'%(letter)):
temp.append(buildvector(Image.open('E:/PycharmProjects/NyPython-master/CaptchaRecognise/icon/%s/%s'%(letter,img))))
self.imgset.append({letter:temp})
#计算矢量大小
def magnitude(self,concordance):
total = 0
for word,count in concordance.items():
total += count ** 2
return math.sqrt(total)
#计算矢量之间的 cos 值
def relation(self,concordance1, concordance2):
relevance = 0
topvalue = 0
for word, count in concordance1.items():
if word in concordance2:
topvalue += count * concordance2[word]
return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2))
def recognise(self,image):
image=convert_image(image)
images=cut_image(image)
vectors=[]
for img in images:
vectors.append(buildvector(img))
result=[]
for vector in vectors:
guess=[]
for image in self.imgset:
for letter,temp in image.items():
relevance=0
num=0
for img in temp:
relevance+=self.relation(vector,img)
num+=1
relevance=relevance/num
guess.append((relevance,letter))
guess.sort(reverse=True)
result.append(guess[0])
return result
if __name__=='__main__':
imageRecognize=CaptchaRecognize()
# 下载验证码
with open('captcha.jpg', 'wb') as f:
f.write(requests.get('http://jwxt.upc.edu.cn/jwxt/verifycode.servlet').content)
image=Image.open('captcha.jpg')
result=imageRecognize.recognise(image)
print(requests)
string=[''.join(item[1]) for item in result]
print(string)
|
from dictish import Dictish
AN_EMPTY_DICTISH = Dictish()
LETTER_TO_NUMBER = Dictish([("a", 1), ("b", 2), ("c", 3)])
def test_an_empty_dictish_is_falsey():
assert bool(AN_EMPTY_DICTISH) is False
def test_a_populated_dictish_is_truthy():
assert bool(LETTER_TO_NUMBER) is True
|
# -*- coding: utf-8 -*-
"""
Created on 07 Mar 2021 14:02:18
@author: jiahuei
"""
from . import file
from . import image
from . import misc
from . import video
|
#!/usr/bin/env python3
class Solution:
def circularArrayLoop(self, nums):
l = len(nums)
for i in range(l):
print(f'i = {i}')
head = slow = fast = i
while True:
slow = (slow+nums[slow])%l
fast1 = (fast+nums[fast])%l
fast2 = (fast1+nums[fast1])%l
if fast == fast1 or fast1 == fast2:
print(f'break 1 fast = nums[{fast}] = {nums[fast]}')
break
if (nums[head]*nums[fast] <= 0) or (nums[head]*nums[fast1] <= 0):
print(f'break 2, fast = nums[{fast}] = {nums[fast]}')
break
fast = fast2
print(f'slow = nums[{slow}] = {nums[slow]}, fast = nums[{fast}] = {nums[fast]}')
if slow == fast:
return True
while head != fast:
nums[head], head = 0, (head+nums[head])%l
print(f'nums = {nums}')
return False
sol = Solution()
nums = [-2, 1, -1, -2,-2]
nums = [2, -1, 1, 2,2]
nums = [-1, 2]
nums = [-1,-2,-3,-4,-5]
print(sol.circularArrayLoop(nums))
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'tcwv_mean_by_date_range'
pp_file_contour ='408'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['dklyu', 'dkmbq', 'djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
cb_label='mm'
min_contour=40.
max_contour=70.
tick_interval=5.
clevs = np.linspace(min_contour, max_contour,16)
cmap=plt.cm.jet
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
#clevs_col = np.arange(clev_min, clev_max)
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 680.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 298.
clevpt_max = 310.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 312.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
clevs_lin = np.arange(clev_min, clev_max, 5)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
#pc = iris(pfile)
pcube_contourf = iris.load_cube(pfile)
pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,-1000)
#pdb.set_trace()
height_pp_file = '%s_%s_on_p_levs_mean_by_date_range.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
time_coords = pcube_contourf.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
time_coords = pcube_contour.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour[t]
#pdb.set_trace()
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
# if plot_diag=='temp':
# min_contour = clevpt_min
# max_contour = clevpt_max
# cb_label='K'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=2
# clev_number=max_contour-min_contour+1
# elif plot_diag=='sp_hum':
# min_contour = clevsh_min
# max_contour = clevsh_max
# cb_label='kg/kg'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=0.002
# clev_number=max_contour-min_contour+0.001
# clevs = np.linspace(min_contour, max_contour, clev_number)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#pdb.set_trace()
cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
# cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
# cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
# #cbar.set_label(time_cube.units, fontsize=10, color='#262626')
# cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
# ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
# cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
# cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_and_%s_%s_hPa_and_geop_height_%s' % (experiment_id, pp_file_contour, pp_file_contourf, p_level, h)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
#fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC' % (h))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
# Generated by Django 2.0.5 on 2018-06-07 21:43
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('graduation_design', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dissertation',
name='file_date',
field=models.DateField(default=django.utils.timezone.now, verbose_name='上传日期'),
),
migrations.AddField(
model_name='midtermreport',
name='file_date',
field=models.DateField(default=django.utils.timezone.now, verbose_name='上传日期'),
),
migrations.AddField(
model_name='openingreport',
name='file_date',
field=models.DateField(default=django.utils.timezone.now, verbose_name='上传日期'),
),
migrations.AlterField(
model_name='dissertation',
name='file_name',
field=models.CharField(blank=True, default='暂未命名', max_length=100, null=True, verbose_name='文档名称'),
),
migrations.AlterField(
model_name='dissertation',
name='file_url',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='文件路径'),
),
migrations.AlterField(
model_name='midtermreport',
name='file_name',
field=models.CharField(blank=True, default='暂未命名', max_length=100, null=True, verbose_name='文档名称'),
),
migrations.AlterField(
model_name='midtermreport',
name='file_url',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='文件路径'),
),
migrations.AlterField(
model_name='modelfile',
name='file_name',
field=models.CharField(blank=True, default='暂未命名', max_length=100, null=True, verbose_name='文件名称'),
),
migrations.AlterField(
model_name='openingreport',
name='file_name',
field=models.CharField(blank=True, default='暂未命名', max_length=100, null=True, verbose_name='文件名称'),
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class LogDestinationPolicy(pulumi.CustomResource):
"""
Provides a CloudWatch Logs destination policy resource.
"""
def __init__(__self__, __name__, __opts__=None, access_policy=None, destination_name=None):
"""Create a LogDestinationPolicy resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not access_policy:
raise TypeError('Missing required property access_policy')
elif not isinstance(access_policy, basestring):
raise TypeError('Expected property access_policy to be a basestring')
__self__.access_policy = access_policy
"""
The policy document. This is a JSON formatted string.
"""
__props__['accessPolicy'] = access_policy
if not destination_name:
raise TypeError('Missing required property destination_name')
elif not isinstance(destination_name, basestring):
raise TypeError('Expected property destination_name to be a basestring')
__self__.destination_name = destination_name
"""
A name for the subscription filter
"""
__props__['destinationName'] = destination_name
super(LogDestinationPolicy, __self__).__init__(
'aws:cloudwatch/logDestinationPolicy:LogDestinationPolicy',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'accessPolicy' in outs:
self.access_policy = outs['accessPolicy']
if 'destinationName' in outs:
self.destination_name = outs['destinationName']
|
#!/usr/bin/env python
# import some utils.
import rospy
from std_msgs.msg import Float64
class InterpolateThrottle:
def __init__(self):
car_name = rospy.get_param("~car_name", "/car")
# Allow our topics to be dynamic.
self.rpm_input_topic = rospy.get_param(
"~rpm_input_topic",
"{}/vesc/commands/motor/unsmoothed_speed".format(car_name),
)
self.rpm_output_topic = rospy.get_param(
"~rpm_output_topic", "{}/vesc/commands/motor/speed".format(car_name)
)
self.servo_input_topic = rospy.get_param(
"~servo_input_topic",
"{}/vesc/commands/servo/unsmoothed_position".format(car_name),
)
self.servo_output_topic = rospy.get_param(
"~servo_output_topic", "{}/vesc/commands/servo/position".format(car_name)
)
self.max_acceleration = rospy.get_param(rospy.search_param("max_acceleration"))
self.max_rpm = rospy.get_param(rospy.search_param("vesc_driver/speed_max"))
self.min_rpm = rospy.get_param(rospy.search_param("vesc_driver/speed_min"))
self.throttle_smoother_rate = rospy.get_param(
rospy.search_param("throttle_smoother_rate")
)
self.speed_to_erpm_gain = rospy.get_param(
rospy.search_param("speed_to_erpm_gain")
)
self.max_servo_speed = rospy.get_param(rospy.search_param("max_servo_speed"))
self.steering_angle_to_servo_gain = rospy.get_param(
rospy.search_param("steering_angle_to_servo_gain")
)
self.servo_smoother_rate = rospy.get_param(
rospy.search_param("servo_smoother_rate")
)
self.max_servo = rospy.get_param(rospy.search_param("vesc_driver/servo_max"))
self.min_servo = rospy.get_param(rospy.search_param("vesc_driver/servo_min"))
# Variables
self.last_rpm = 0
self.desired_rpm = self.last_rpm
self.last_servo = rospy.get_param(
rospy.search_param("steering_angle_to_servo_offset")
)
self.desired_servo_position = self.last_servo
# Create topic subscribers and publishers
self.rpm_output = rospy.Publisher(self.rpm_output_topic, Float64, queue_size=1)
self.servo_output = rospy.Publisher(
self.servo_output_topic, Float64, queue_size=1
)
rospy.Subscriber(self.rpm_input_topic, Float64, self._process_throttle_command)
rospy.Subscriber(self.servo_input_topic, Float64, self._process_servo_command)
self.max_delta_servo = abs(
self.steering_angle_to_servo_gain
* self.max_servo_speed
/ self.servo_smoother_rate
)
rospy.Timer(
rospy.Duration(1.0 / self.servo_smoother_rate), self._publish_servo_command
)
self.max_delta_rpm = abs(
self.speed_to_erpm_gain
* self.max_acceleration
/ self.throttle_smoother_rate
)
rospy.Timer(
rospy.Duration(1.0 / self.max_delta_rpm), self._publish_throttle_command
)
# run the node
self._run()
# Keep the node alive
def _run(self):
rospy.spin()
def _publish_throttle_command(self, evt):
desired_delta = self.desired_rpm - self.last_rpm
clipped_delta = max(min(desired_delta, self.max_delta_rpm), -self.max_delta_rpm)
smoothed_rpm = self.last_rpm + clipped_delta
self.last_rpm = smoothed_rpm
# print self.desired_rpm, smoothed_rpm
self.rpm_output.publish(Float64(smoothed_rpm))
def _process_throttle_command(self, msg):
input_rpm = msg.data
# Do some sanity clipping
input_rpm = min(max(input_rpm, self.min_rpm), self.max_rpm)
self.desired_rpm = input_rpm
def _publish_servo_command(self, evt):
desired_delta = self.desired_servo_position - self.last_servo
clipped_delta = max(
min(desired_delta, self.max_delta_servo), -self.max_delta_servo
)
smoothed_servo = self.last_servo + clipped_delta
self.last_servo = smoothed_servo
self.servo_output.publish(Float64(smoothed_servo))
def _process_servo_command(self, msg):
input_servo = msg.data
# Do some sanity clipping
input_servo = min(max(input_servo, self.min_servo), self.max_servo)
# set the target servo position
self.desired_servo_position = input_servo
# Boilerplate node spin up.
if __name__ == "__main__":
try:
rospy.init_node("Throttle_Interpolator")
p = InterpolateThrottle()
except rospy.ROSInterruptException:
pass
|
from django import template
from django_extras.utils import humanize
register = template.Library()
@register.filter(is_safe=True)
def describe_seconds(value):
"""
Convert a seconds value into a human readable (ie week, day, hour) value.
:param value: integer value of the number of seconds.
:return: a string with the humanized value.
"""
return humanize.describe_seconds(value)
|
# -*- coding: utf-8 -*-
# --------------------------------------
# @Time : 2021/5/12$ 12:12$
# @Author : Qian Li
# @Email : 1844857573@qq.com
# @File : network.py
# Description : details(i.e., online network,online projector network, online predictor,classifier, target network, target projector,) for self-supervised learning
import torch
from functools import wraps
from torch import nn
import numpy as np
from utils import MLP,ResNet50,accuracy
import copy
from torch.nn import init
from torchvision import models
def weigth_init(model,path):
from collections import OrderedDict
new_state_dict=OrderedDict()
state_dict=torch.load(path)["model"]
for k,v in state_dict.items():
if "target_" in k:
continue
new_state_dict[k]=v
model.load_state_dict(new_state_dict)
class VGG(nn.Module):
def __init__(self,num_classes=10,
projector_hidden_size=4096,
projector_output_size=256,
predictor_hidden_size=4096,
moving_average_decay=.9999,
eps=1e-5,use_momentum = True,mode="pre-train"):
##model:pre-train,fine-tune,test
super(VGG,self).__init__()
model=models.vgg16(pretrained=False)
print(model)
model.classifier=MLP(input_size=512,hidden_size=projector_hidden_size,output_size=projector_output_size)
model.avgpool=nn.Sequential()
self.mode=mode
model.classifier=nn.Sequential()
self.model=model
self.classifier=nn.Sequential(nn.Linear(512,4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Linear(4096,4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Linear(4096,num_classes)
)
self.model=model
self.cls_loss=nn.CrossEntropyLoss()
if self.classifier is not None:
for m in self.classifier.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m,nn.Linear):
init.normal_(m.weight, std=1e-3)
elif isinstance(m,nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m,nn.BatchNorm1d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
def forward(self,image_one=None,image_two=None,labels=None):
#if not image_two:
if self.mode is "test":
feature_view1=self.model(image_one)
logits_view1=nn.Softmax(dim=1)(self.classifier(feature_view1))
return logits_view1.argmax(dim=1),None,None
feature=self.model(image_one)
logit_view1=self.classifier(feature)
classifier_loss=self.cls_loss(logit_view1,labels)
logit_view1=nn.Softmax(dim=1)(logit_view1)
top1_acc,top5_acc=accuracy(logit_view1.data,labels, topk=(1, 5))
return classifier_loss.mean(),top1_acc.data.mean(),top5_acc.data.mean()
|
from datetime import datetime
from PIL import Image
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
SQL_MONTH = 'MONTH({})'
SQL_DAY = 'DAY({})'
SQL_MONTH = 'EXTRACT(MONTH FROM {})'
SQL_DAY = 'EXTRACT(DAY FROM {})'
WIDTH = 150
HEIGHT = 165
def parse_month_day(month_day=None):
m = 0
d = 0
if month_day is not None:
if month_day == '99':
month_day = datetime.now().isoformat()[5:7]
if '-' in month_day:
m, d = month_day.split('-')
m = int(m)
d = int(d)
else:
d = 0
m = int(month_day)
return m, d
def select_by_month_day(datename, m, d):
if d + m == 0:
if datename == 'birthdate':
return User.objects.filter(is_checked_by_admin=True)
return User.objects.filter(is_checked_by_admin=True, anniversary_alert=True)
if datename == 'birthdate':
if d == 0:
return User.objects.filter(is_checked_by_admin=True, birthdate__month=m)
return User.objects.filter(is_checked_by_admin=True, birthdate__month=m, birthdate__day=d)
elif datename == 'anniversary':
if d == 0:
return User.objects.filter(is_checked_by_admin=True, anniversary_alert=True, anniversary__month=m)
return User.objects.filter(is_checked_by_admin=True, anniversary_alert=True, anniversary__month=m, anniversary__day=d)
def user_ordered_by_month_day(datename, month_day=None):
month, day = parse_month_day(month_day)
selection = select_by_month_day(datename, month, day)
return selection.extra(
select={'month': SQL_MONTH.format(datename),
'day': SQL_DAY.format(datename)},
order_by=['month', 'day']
)
def get_sysadmin_users():
return [user for user in User.objects.filter(is_superuser=True)]
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
birthdate = models.DateField(_('Data de nascimento (DD/MM/AAAA)'), default=now)
anniversary = models.DateField('Data de admissão (DD/MM/AAAA)', default=now)
photo = models.ImageField(_('Foto'), default=settings.MEDIA_ROOT +'/perfil.png')
is_checked = models.BooleanField(_('Confirmo que estes dados estão atualizados'), default=False)
is_checked_by_admin = models.BooleanField(_('Validado'), default=False)
anniversary_alert = models.BooleanField(_('Alertar tempo de SciELO'), default=True)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
def fullname(self):
if all([self.first_name, self.last_name]):
if self.first_name + self.last_name != '':
return ' '.join([self.first_name, self.last_name])
if self.email != '':
return self.email[:self.email.find('@')].replace('.', ' ').title()
if self.username:
return self.username.replace('.', ' ').title()
def status(self):
if self.is_checked and self.is_checked_by_admin:
return 'valid'
if self.is_checked:
return 'updated'
return 'pending'
def registration_status(self):
if self.is_checked and self.is_checked_by_admin:
return '100'
if self.is_checked:
return '60'
return '30'
def years(self):
current = datetime.now().isoformat()[:4]
d = self.anniversary.isoformat()[:4]
return int(current) - int(d)
def display_years(self):
y = self.years()
if y > 1:
return '{} anos de SciELO'.format(y)
if y == 1:
return '{} ano de SciELO'.format(y)
return ''
def save(self, *args, **kwargs):
super(AbstractUser, self).save(*args, **kwargs)
self.resize_photo()
def resize_photo(self):
if self.photo is not None:
image = Image.open(self.photo)
(width, height) = image.size
fixed_w = width
fixed_h = height
fixed_w = WIDTH
r = float(WIDTH) / width
fixed_h = int(r * height)
size = (fixed_w, fixed_h)
image = image.resize(size, Image.ANTIALIAS)
if fixed_h > HEIGHT:
y = (fixed_h - HEIGHT) // 2
image = image.crop((0, y, WIDTH, HEIGHT))
image.save(self.photo.path)
|
from __future__ import absolute_import
from tastypie.authentication import Authentication
from tastypie.authorization import Authorization
from tastypie.resources import ModelResource
from tastypie_sorl_thumbnail.fields import ThumbnailField
from .models import Photo
class PhotoResource(ModelResource):
thumbnail = ThumbnailField('image', '120', quality=80)
class Meta(object):
queryset = Photo.objects.all()
resource_name = 'photo'
authentication = Authentication()
authorization = Authorization()
|
"""
Settings,
As for the database, look for ../sql_orm/database.py
"""
#DEBUG/TEST MODE: This allows HTTP connection instead of HTTPS
TEST_MODE = True
API_PORT = 8000
# These are used only if the TEST_MODE is disabled
SSL_CERT_LOCATION = "path"
SSL_KEY_LOCATION = "path"
SSL_CA_LOCATION = "path"
# JWT access settings: Secret key is randomly generated on the first instance and saved to the database (see main)
# The algorithm HS256 should not be changed!! Token expiration should be the same on every server
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
# How often the server checks for expired ssh authkeys
CLEANING_LADY_INTERVAL_SECONDS = 60
# SSH For this particular server! Please refer to what you can actually do: If you are using sish, please use the
# version on github.com/santomet as it is currently the one that supports blocking different ports than the ones allowed
PORT_LIST = list(range(9000, 9101))
SSH_AUTH_KEYS_FILE_PATH = "/opt/INSTALL/sish/deploy/pubkeys/authorized_keys"
SSH_PORT = "2222"
SSH_SERVER = "" # If kept empty, agents will use the same IP/DOMAIN that they use to connect the API
SSH_SERVER_PUBLIC_FINGERPRINT = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIADBJiJyOZq7Goad/J4IQZaCx70cUjVcMSoIWyzBlKtc"
SSH_SERVER_USERNAME = "" # A classic SSH server needs this, SISH does not
|
from django.contrib import admin
from django.urls import path
from pag.views import home
urlpatterns = [
path('admin/', admin.site.urls),
path('', home, name='home')
]
|
import glob
import os
from mkgen.utils import flat
default_config = {
"languages": [
{"name": "python", "extensions": [".py"], "interpreter": "$(PYTHON)"},
{"name": "R", "extensions": [".R", ".r"], "interpreter": "$(R)"},
],
"src_paths": ["src"],
}
def get_interpreter(config, file):
config_ext = [x["extensions"] for x in config["languages"]]
file_ext = "." + file.split(".")[-1]
language_index = [i for i, x in enumerate(config_ext) if file_ext in x][0]
return config["languages"][language_index]["interpreter"]
def get_code_files(config):
extensions = flat([x["extensions"] for x in config["languages"]])
code_files = []
for src_path in config["src_paths"]:
for ext in extensions:
fns = glob.glob(os.getcwd() + "/" + src_path + "/*" + ext, recursive=True)
code_files.append(fns)
return flat(code_files)
|
## https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
## given a list that's already sorted, find the indices that bound
## a given element (or -1, -1 if that element is not in the list).
## goal is to this in O(log(n)), i.e. to take advantage of the fact
## that the list is sorted.
## python's bisect library does this, so we can use bisect_left and
## bisect_right to solve this problem (i.e. do binary searches under
## the hood). that solution comes in at 96th percentile in runtime and
## 83rd in terms of memory
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
## super base case -- no number in my list
if not len(nums):
return [-1, -1]
## slightly less base case -- is the number not in the list?
if target < nums[0] or target > nums[-1]:
return [-1, -1]
## otherwise, take advantage of the fact that it's sorted:
import bisect
l = bisect.bisect_left(nums, target)
## check if we found the number, or if we find the closest number
if nums[l] != target:
return [-1, -1]
## if we found the number itself, then bisect_right will find it too
return [l, bisect.bisect_right(nums, target)-1]
|
# Mirror JNT & orient L>R
import maya.cmds as mc
def doMirrorJnt(side='L'):
sel = mc.ls(sl=1)
if side=='R':
side=['R_','L_']
else:
side=['L_','R_']
for each in sel:
right = each.replace(side[0], side[1], 1)
mc.duplicate(each, n=right)
try:
mc.parent(right, w=1)
except:
pass
mc.setAttr(right + '.translateX', mc.getAttr(right + '.translateX') * -1)
X = mc.getAttr(right + '.jointOrientX')
Y = mc.getAttr(right + '.jointOrientY')
Z = mc.getAttr(right + '.jointOrientZ')
mc.setAttr(right + '.jointOrientX', X)
mc.setAttr(right + '.jointOrientY', Y * -1)
mc.setAttr(right + '.jointOrientZ', Z * -1)
try:
if mc.listRelatives(each, ap=1):
mc.parent(right, mc.listRelatives(each, ap=1)[0].replace(side[0], side[1], 1))
except:
pass
|
import lsst.pipe.base as pipeBase
import lsst.pipe.base.connectionTypes as cT
from lsst.ip.isr import Defects
from .eoCalibBase import EoDetRunCalibTaskConfig, EoDetRunCalibTaskConnections, EoDetRunCalibTask
__all__ = ["EoDefectsTaskConfig", "EoDefectsTask"]
class EoDefectsTaskConnections(EoDetRunCalibTaskConnections):
brightPixels = cT.Input(
name="eoBrightPixels",
doc="Electrial Optical Calibration Output",
storageClass="Defects",
dimensions=("instrument", "detector"),
isCalibration=True,
)
darkPixels = cT.Input(
name="eoDarkPixels",
doc="Electrial Optical Calibration Output",
storageClass="Defects",
dimensions=("instrument", "detector"),
isCalibration=True,
)
defects = cT.Output(
name='defects',
doc="Output defect tables.",
storageClass="Defects",
dimensions=("instrument", "detector"),
isCalibration=True,
)
class EoDefectsTaskConfig(EoDetRunCalibTaskConfig,
pipelineConnections=EoDefectsTaskConnections):
def setDefaults(self):
self.connections.brightPixels = "eoBrightPixels"
self.connections.darkPixels = "eoDarkPixels"
self.connections.defects = "defects"
class EoDefectsTask(EoDetRunCalibTask):
"""Combines Defect sets from other tasks
Summary output is stored as `lsst.eotask_gen3.EoDefectsData`
Defect sets are stored as `lsst.ip.isr.Defects`
Currently combined defects from EoBrightPixelsTask and EoDarkPixelsTask.
To Do: add edge rolloff masking
"""
ConfigClass = EoDefectsTaskConfig
_DefaultName = "eoDefects"
def run(self, brightPixels, darkPixels, **kwargs):
""" Run method
Parameters
----------
brightPixels : `lsst.ip.isr.Defects`
Bright Pixel defect set
darkPixels : `lsst.ip.isr.Defects`
Dark Pixel defect set
Returns
-------
defects : `lsst.ip.isr.Defects`
Output defect list
"""
outDefects = Defects()
with outDefects.bulk_update():
for inputDefects in [brightPixels, darkPixels]:
for d in inputDefects:
outDefects.append(d)
return pipeBase.Struct(defects=outDefects)
|
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.141_linked_list_cycle', fromlist='*')
helper = __import__('utils.helper', fromlist='*')
class Test(unittest.TestCase):
def test_hasCycle(self):
s = solutions.Solution()
head = helper.constructListNode([1, 2])
self.assertEqual(s.hasCycle(head), False)
head = helper.constructListNode([1, 2, 3, 4])
self.assertEqual(s.hasCycle(head), False)
head = helper.constructListNode([1])
head.next = head
self.assertEqual(s.hasCycle(head), True)
head = helper.constructListNode([1, 2])
head.next.next = head
self.assertEqual(s.hasCycle(head), True)
self.assertEqual(s.hasCycle(None), False)
head = helper.constructListNode([1, 2, 3, 4])
head.next.next.next = head.next
self.assertEqual(s.hasCycle(head), True)
if __name__ == '__main__':
unittest.main()
|
import logging
from bglib.flow.decision import Decision
class PickBetweenPlayers:
UntilEmpty = 1
OneEach = 2
def __init__(self, choice_pool, pick_order, mode, remove_picked_choices=True):
if len(choice_pool) < len(pick_order):
raise ValueError('Not enough choices ({}) to make {} picks.'.format(
len(choice_pool), len(pick_order)))
if not remove_picked_choices and mode == PickBetweenPlayers.UntilEmpty:
raise ValueError("'UntilEmpty' cannot be used if picked choices are not removed.")
self.choice_pool = choice_pool
self.mode = mode
self.pick_order = pick_order
self.remove_picked_choices = remove_picked_choices
self._already_picked = []
def next_pick(self):
if not self.pick_order:
raise ValueError('No picks left.')
logging.info('Still picking: ' + ', '.join([str(picker) for picker in self.pick_order]))
decision_maker = self.pick_order.pop(0)
if self.mode == PickBetweenPlayers.UntilEmpty:
self.pick_order.append(decision_maker)
decision = Decision(owner=decision_maker, options=self.choice_pool,
callback=self.pick_made)
return decision
def pick_made(self, decision):
assert decision.status == Decision.Decided
logging.info('{} picked option {}'.format(decision.owner, decision.picked_option))
self._already_picked.append((decision.owner, decision.picked_option))
if self.remove_picked_choices:
self.choice_pool.remove(decision.picked_option)
def run_commandline(self):
while self.choice_pool and self.pick_order:
self.next_pick().run_commandline()
|
import unittest
import datetime
import numpy as np
import pandas as pd
import pytz
import time
from ibis.impala.tests.common import IbisTestEnv, ImpalaE2E, connect_test
from ibis.tests.util import assert_equal
import ibis
import ibis.common as com
import ibis.config as config
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.util as util
import pytest
pytest.importorskip('sqlalchemy')
pytest.importorskip('hdfs')
pytest.importorskip('impala.dbapi')
ENV = IbisTestEnv()
class TestImpalaClient(ImpalaE2E, unittest.TestCase):
def test_execute_exprs_default_backend(self):
cases = [
(ibis.literal(2), 2)
]
ibis.options.default_backend = None
client = connect_test(ENV, with_hdfs=False)
assert ibis.options.default_backend is client
for expr, expected in cases:
result = expr.execute()
assert result == expected
def test_cursor_garbage_collection(self):
for i in range(5):
self.con.raw_sql('select 1', True).fetchall()
self.con.raw_sql('select 1', True).fetchone()
def test_raise_ibis_error_no_hdfs(self):
# #299
client = connect_test(ENV, with_hdfs=False)
self.assertRaises(com.IbisError, getattr, client, 'hdfs')
def test_get_table_ref(self):
table = self.db.functional_alltypes
assert isinstance(table, ir.TableExpr)
table = self.db['functional_alltypes']
assert isinstance(table, ir.TableExpr)
def test_run_sql(self):
query = """SELECT li.*
FROM {0}.tpch_lineitem li
""".format(self.test_data_db)
table = self.con.sql(query)
li = self.con.table('tpch_lineitem')
assert isinstance(table, ir.TableExpr)
assert_equal(table.schema(), li.schema())
expr = table.limit(10)
result = expr.execute()
assert len(result) == 10
def test_sql_with_limit(self):
query = """\
SELECT *
FROM functional_alltypes
LIMIT 10"""
table = self.con.sql(query)
ex_schema = self.con.get_schema('functional_alltypes')
assert_equal(table.schema(), ex_schema)
def test_raw_sql(self):
query = 'SELECT * from functional_alltypes limit 10'
cur = self.con.raw_sql(query, results=True)
rows = cur.fetchall()
cur.release()
assert len(rows) == 10
def test_explain(self):
t = self.con.table('functional_alltypes')
expr = t.group_by('string_col').size()
result = self.con.explain(expr)
assert isinstance(result, str)
def test_get_schema(self):
t = self.con.table('tpch_lineitem')
schema = self.con.get_schema('tpch_lineitem',
database=self.test_data_db)
assert_equal(t.schema(), schema)
def test_result_as_dataframe(self):
expr = self.alltypes.limit(10)
ex_names = expr.schema().names
result = self.con.execute(expr)
assert isinstance(result, pd.DataFrame)
assert list(result.columns) == ex_names
assert len(result) == 10
def test_adapt_scalar_array_results(self):
table = self.alltypes
expr = table.double_col.sum()
result = self.con.execute(expr)
assert isinstance(result, float)
with config.option_context('interactive', True):
result2 = expr.execute()
assert isinstance(result2, float)
expr = (table.group_by('string_col')
.aggregate([table.count().name('count')])
.string_col)
result = self.con.execute(expr)
assert isinstance(result, pd.Series)
def test_time_to_int_cast(self):
now = pytz.utc.localize(datetime.datetime.now())
d = ibis.literal(now)
result = self.con.execute(d.cast('int64'))
assert result == int(time.mktime(now.timetuple())) * 1000000
def test_interactive_repr_call_failure(self):
t = self.con.table('tpch_lineitem').limit(100000)
t = t[t, t.l_receiptdate.cast('timestamp').name('date')]
keys = [t.date.year().name('year'), 'l_linestatus']
filt = t.l_linestatus.isin(['F'])
expr = (t[filt]
.group_by(keys)
.aggregate(t.l_extendedprice.mean().name('avg_px')))
w2 = ibis.trailing_window(9, group_by=expr.l_linestatus,
order_by=expr.year)
metric = expr['avg_px'].mean().over(w2)
enriched = expr[expr, metric]
with config.option_context('interactive', True):
repr(enriched)
def test_array_default_limit(self):
t = self.alltypes
result = self.con.execute(t.float_col, limit=100)
assert len(result) == 100
def test_limit_overrides_expr(self):
# #418
t = self.alltypes
result = self.con.execute(t.limit(10), limit=5)
assert len(result) == 5
def test_limit_equals_none_no_limit(self):
t = self.alltypes
with config.option_context('sql.default_limit', 10):
result = t.execute(limit=None)
assert len(result) > 10
def test_verbose_log_queries(self):
queries = []
def logger(x):
queries.append(x)
with config.option_context('verbose', True):
with config.option_context('verbose_log', logger):
self.con.table('tpch_orders', database=self.test_data_db)
assert len(queries) == 1
expected = 'DESCRIBE {0}.`tpch_orders`'.format(self.test_data_db)
assert queries[0] == expected
def test_sql_query_limits(self):
table = self.con.table('tpch_nation', database=self.test_data_db)
with config.option_context('sql.default_limit', 100000):
# table has 25 rows
assert len(table.execute()) == 25
# comply with limit arg for TableExpr
assert len(table.execute(limit=10)) == 10
# state hasn't changed
assert len(table.execute()) == 25
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
with config.option_context('sql.default_limit', 20):
# TableExpr observes default limit setting
assert len(table.execute()) == 20
# explicit limit= overrides default
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=23)) == 23
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
# eliminating default_limit doesn't break anything
with config.option_context('sql.default_limit', None):
assert len(table.execute()) == 25
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=10000)) == 25
assert table.count().execute() == 25
assert table.count().execute(limit=10) == 25
def test_expr_compile_verify(self):
table = self.db.functional_alltypes
expr = table.double_col.sum()
assert isinstance(expr.compile(), str)
assert expr.verify()
def test_api_compile_verify(self):
t = self.db.functional_alltypes
s = t.string_col
supported = s.lower()
unsupported = s.replace('foo', 'bar')
assert ibis.impala.verify(supported)
assert not ibis.impala.verify(unsupported)
def test_database_repr(self):
assert self.test_data_db in repr(self.db)
def test_database_drop(self):
tmp_name = '__ibis_test_{0}'.format(util.guid())
self.con.create_database(tmp_name)
db = self.con.database(tmp_name)
self.temp_databases.append(tmp_name)
db.drop()
assert not self.con.exists_database(tmp_name)
def test_database_default_current_database(self):
db = self.con.database()
assert db.name == self.con.current_database
def test_namespace(self):
ns = self.db.namespace('tpch_')
assert 'tpch_' in repr(ns)
table = ns.lineitem
expected = self.db.tpch_lineitem
attrs = dir(ns)
assert 'lineitem' in attrs
assert 'functional_alltypes' not in attrs
assert_equal(table, expected)
def test_close_drops_temp_tables(self):
from posixpath import join as pjoin
hdfs_path = pjoin(self.test_data_dir, 'parquet/tpch_region')
client = connect_test(ENV)
table = client.parquet_file(hdfs_path)
name = table.op().name
assert self.con.exists_table(name) is True
client.close()
assert not self.con.exists_table(name)
def test_execute_async_simple(self):
t = self.db.functional_alltypes
expr = t.double_col.sum()
q = expr.execute(async=True)
result = q.get_result()
expected = expr.execute()
assert np.allclose(result, expected)
@pytest.mark.xfail(
raises=NotImplementedError,
reason='_collect_Union not implemented'
)
def test_query_cancel(self):
import time
t = self.db.functional_alltypes
t2 = t.union(t).union(t)
# WM: this query takes about 90 seconds to execute for me locally, so
# I'm eyeballing an acceptable time frame for the cancel to work
expr = t2.join(t2).count()
start = time.clock()
q = expr.execute(async=True)
q.cancel()
end = time.clock()
elapsed = end - start
assert elapsed < 5
assert q.is_finished()
def test_set_compression_codec(self):
old_opts = self.con.get_options()
assert old_opts['COMPRESSION_CODEC'].upper() == 'NONE'
self.con.set_compression_codec('snappy')
opts = self.con.get_options()
assert opts['COMPRESSION_CODEC'].upper() == 'SNAPPY'
self.con.set_compression_codec(None)
opts = self.con.get_options()
assert opts['COMPRESSION_CODEC'].upper() == 'NONE'
def test_disable_codegen(self):
self.con.disable_codegen(False)
opts = self.con.get_options()
assert opts['DISABLE_CODEGEN'] == '0'
self.con.disable_codegen()
opts = self.con.get_options()
assert opts['DISABLE_CODEGEN'] == '1'
impala_con = self.con.con
cur1 = impala_con.execute('SET')
cur2 = impala_con.execute('SET')
opts1 = dict(cur1.fetchall())
cur1.release()
opts2 = dict(cur2.fetchall())
cur2.release()
assert opts1['DISABLE_CODEGEN'] == '1'
assert opts2['DISABLE_CODEGEN'] == '1'
def test_attr_name_conflict(self):
LEFT = 'testing_{0}'.format(util.guid())
RIGHT = 'testing_{0}'.format(util.guid())
schema = ibis.schema([('id', 'int32'), ('name', 'string'),
('files', 'int32')])
db = self.con.database(self.tmp_db)
for tablename in (LEFT, RIGHT):
db.create_table(tablename, schema=schema,
format='parquet')
left = db[LEFT]
right = db[RIGHT]
left.join(right, ['id'])
left.join(right, ['id', 'name'])
left.join(right, ['id', 'files'])
def test_rerelease_cursor(self):
con = connect_test(self.env)
with con.raw_sql('select 1', True) as cur1:
pass
cur1.release()
with con.raw_sql('select 1', True) as cur2:
pass
cur2.release()
with con.raw_sql('select 1', True) as cur3:
pass
assert cur1 == cur2
assert cur2 == cur3
def test_set_option_with_dot(self):
con = self.con
con.set_options({'request_pool': 'baz.quux'})
result = dict(con.raw_sql('set', True).fetchall())
assert result['REQUEST_POOL'] == 'baz.quux'
def test_day_of_week(self):
date_var = ibis.literal(datetime.date(2017, 1, 1), type=dt.date)
expr_index = date_var.day_of_week.index()
result = self.con.execute(expr_index)
assert result == 6
expr_name = date_var.day_of_week.full_name()
result = self.con.execute(expr_name)
assert result == 'Sunday'
|
# pylint: disable=C0111,R0903
"""Displays APT package update information (<to upgrade>/<to remove >)
Requires the following packages:
* aptitude
contributed by `qba10 <https://github.com/qba10>`_ - many thanks!
"""
import re
import threading
import core.module
import core.widget
import core.decorators
import util.cli
PATTERN = "{} packages upgraded, {} newly installed, {} to remove and {} not upgraded."
def parse_result(to_parse):
# We want the line with the package upgrade information
line_to_parse = to_parse.split("\n")[-4]
result = re.search(
r"(.+) packages upgraded, (.+) newly installed, (.+) to remove", line_to_parse
)
return int(result.group(1)), int(result.group(3))
def get_apt_check_info(module):
widget = module.widget()
try:
res = util.cli.execute("aptitude full-upgrade --simulate --assume-yes")
widget.set("error", None)
except (RuntimeError, FileNotFoundError) as e:
widget.set("error", "unable to query APT: {}".format(e))
return
to_upgrade = 0
to_remove = 0
try:
to_upgrade, to_remove = parse_result(res)
widget.set("to_upgrade", to_upgrade)
widget.set("to_remove", to_remove)
except Exception as e:
widget.set("error", "parse error: {}".format(e))
core.event.trigger("update", [module.id], redraw_only=True)
class Module(core.module.Module):
@core.decorators.every(minutes=30)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.updates))
self.__thread = None
def updates(self, widget):
if widget.get("error"):
return widget.get("error")
return "{} to upgrade, {} to remove".format(
widget.get("to_upgrade", 0), widget.get("to_remove", 0)
)
def update(self):
if self.__thread and self.__thread.is_alive():
return
self.__thread = threading.Thread(target=get_apt_check_info, args=(self,))
self.__thread.start()
def state(self, widget):
cnt = 0
ret = "good"
for t in ["to_upgrade", "to_remove"]:
cnt += widget.get(t, 0)
if cnt > 50:
ret = "critical"
elif cnt > 0:
ret = "warning"
if widget.get("error"):
ret = "critical"
return ret
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
from couchbase_helper.documentgenerator import SDKDataLoader
from lib.membase.api.rest_client import RestConnection
from lib.testconstants import STANDARD_BUCKET_PORT
from pytests.eventing.eventing_constants import HANDLER_CODE
from pytests.eventing.eventing_base import EventingBaseTest, log
from membase.helper.cluster_helper import ClusterOperationHelper
class EventingSanity(EventingBaseTest):
def setUp(self):
super(EventingSanity, self).setUp()
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=700)
if self.create_functions_buckets:
self.bucket_size = 100
log.info(self.bucket_size)
bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
replicas=0)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
query = "create primary index on {}".format(self.src_bucket_name)
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
query = "create primary index on {}".format(self.dst_bucket_name)
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
query = "create primary index on {}".format(self.metadata_bucket_name)
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
if self.non_default_collection:
self.create_scope_collection(bucket=self.src_bucket_name,scope=self.src_bucket_name,collection=self.src_bucket_name)
self.create_scope_collection(bucket=self.metadata_bucket_name,scope=self.metadata_bucket_name,collection=self.metadata_bucket_name)
self.create_scope_collection(bucket=self.dst_bucket_name,scope=self.dst_bucket_name,collection=self.dst_bucket_name)
def tearDown(self):
super(EventingSanity, self).tearDown()
def test_create_mutation_for_dcp_stream_boundary_from_beginning(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE)
self.deploy_function(body)
# Wait for eventing to catch up with all the create mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_delete_mutation_for_dcp_stream_boundary_from_beginning(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE)
self.deploy_function(body)
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
# Wait for eventing to catch up with all the delete mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_expiry_mutation_for_dcp_stream_boundary_from_now(self):
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
body = self.create_save_function_body(self.function_name,"handler_code/ABO/insert_exp_delete_only.js",
dcp_stream_boundary="from_now")
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
self.deploy_function(body)
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
else:
self.verify_doc_count_collections("dst_bucket._default._default", 0)
### update all the documents with expiry
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_update=True,expiry=10)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_update=True,expiry=10)
# Wait for eventing to catch up with all the expiry mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_update_mutation_for_dcp_stream_boundary_from_now(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_WITH_RAND,
dcp_stream_boundary="from_now")
self.deploy_function(body)
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
else:
self.verify_doc_count_collections("dst_bucket._default._default", 0)
# update all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_update=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_update=True)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_n1ql_query_execution_from_handler_code(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, 'handler_code/collections/n1ql_insert_on_update.js')
# Enable this after MB-26527 is fixed
# sock_batch_size=10, worker_count=4, cpp_worker_thread_count=4)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_timer_events_from_handler_code_with_n1ql(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name,'handler_code/collections/n1ql_insert_with_timer.js')
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_timer_events_from_handler_code_with_bucket_ops(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_DOC_TIMER)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_delete_bucket_operation_from_handler_code(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
# Wait for eventing to catch up with all the delete mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
else:
self.verify_doc_count_collections("dst_bucket._default._default", 0)
self.undeploy_and_delete_function(body)
def test_timers_without_context(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_TIMER_WITHOUT_CONTEXT)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_cancel_timers_with_timers_being_overwritten(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_WITH_TIMER_OVERWRITTEN)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_source_doc_mutations(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_DOC_MUTATION)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_source_bucket_mutation(self.docs_per_day * 2016,bucket='src_bucket.src_bucket.src_bucket')
else:
self.verify_source_bucket_mutation(self.docs_per_day * 2016,bucket='src_bucket._default._default')
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
if self.non_default_collection:
self.verify_source_bucket_mutation(self.docs_per_day * 2016, deletes=True, timeout=1200,
bucket='src_bucket.src_bucket.src_bucket')
else:
self.verify_source_bucket_mutation(self.docs_per_day * 2016, deletes=True, timeout=1200,
bucket='src_bucket._default._default')
self.undeploy_and_delete_function(body)
def test_source_doc_mutations_with_timers(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_DOC_MUTATION_WITH_TIMERS)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_source_bucket_mutation(self.docs_per_day * 2016,bucket='src_bucket.src_bucket.src_bucket')
else:
self.verify_source_bucket_mutation(self.docs_per_day * 2016,bucket='src_bucket._default._default')
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
if self.non_default_collection:
self.verify_source_bucket_mutation(self.docs_per_day * 2016, deletes=True, timeout=1200,
bucket='src_bucket.src_bucket.src_bucket')
else:
self.verify_source_bucket_mutation(self.docs_per_day * 2016, deletes=True, timeout=1200,
bucket='src_bucket._default._default')
self.undeploy_and_delete_function(body)
def test_source_bucket_mutations(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs*2)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs*2)
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_source_bucket_mutations_with_timers(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_BUCKET_MUTATION_WITH_TIMERS)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs*2)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs*2)
# delete all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs)
self.undeploy_and_delete_function(body)
def test_pause_resume_execution(self):
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE)
self.deploy_function(body)
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",wait_for_loading=False)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",wait_for_loading=False)
self.pause_function(body)
# intentionally added , as it requires some time for eventing-consumers to shutdown
self.sleep(60)
self.assertTrue(self.check_if_eventing_consumers_are_cleaned_up(),
msg="eventing-consumer processes are not cleaned up even after undeploying the function")
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs*2, "src_bucket.src_bucket.src_bucket",wait_for_loading=False)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs*2, "src_bucket._default._default",wait_for_loading=False)
self.resume_function(body)
# Wait for eventing to catch up with all the create mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs*2)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs*2)
self.undeploy_and_delete_function(body)
def test_source_bucket_mutation_for_dcp_stream_boundary_from_now(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name,HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION ,
dcp_stream_boundary="from_now")
self.deploy_function(body)
# update all documents
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",is_update=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_update=True)
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", self.docs_per_day * self.num_docs*2)
else:
self.verify_doc_count_collections("src_bucket._default._default", self.docs_per_day * self.num_docs*2)
self.undeploy_and_delete_function(body)
def test_compress_handler(self):
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket")
body = self.create_save_function_body(self.function_name,"handler_code/compress.js")
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",is_delete=True)
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
self.undeploy_and_delete_function(body)
def test_expired_mutation(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",expiry=10)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",expiry=10)
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
body = self.create_save_function_body(self.function_name, "handler_code/bucket_op_expired.js")
self.deploy_function(body)
# Wait for eventing to catch up with all the expiry mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", 0)
else:
self.verify_doc_count_collections("src_bucket._default._default", 0)
self.undeploy_and_delete_function(body)
def test_cancel_timer(self):
bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size, replicas=0)
self.cluster.create_standard_bucket(name=self.dst_bucket_name1, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
query = "create primary index on {}".format(self.dst_bucket_name1)
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
if self.non_default_collection:
self.collection_rest.create_scope_collection(bucket=self.dst_bucket_name1,scope=self.dst_bucket_name1,
collection=self.dst_bucket_name1)
self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
body = self.create_save_function_body(self.function_name, "handler_code/cancel_timer.js")
if self.non_default_collection:
body['depcfg']['buckets'].append({"alias": self.dst_bucket_name1, "bucket_name": self.dst_bucket_name1,
"scope_name":self.dst_bucket_name1,"collection_name":self.dst_bucket_name1,
"access": "rw"})
else:
body['depcfg']['buckets'].append({"alias": self.dst_bucket_name1, "bucket_name": self.dst_bucket_name1})
self.rest.create_function(body['appname'], body)
self.deploy_function(body)
# print timer context and alarm
self.print_timer_alarm_context()
# Wait for eventing to catch up with all the update mutations and verify results
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",is_delete=True)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_delete=True)
# Wait for eventing to catch up with all the delete mutations and verify results
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket1.dst_bucket1.dst_bucket1", self.docs_per_day * self.num_docs)
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
else:
self.verify_doc_count_collections("dst_bucket1._default._default", self.docs_per_day * self.num_docs)
self.verify_doc_count_collections("dst_bucket._default._default", 0)
self.assertEqual(self.get_stats_value(self.function_name,"execution_stats.timer_cancel_counter"),self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
def test_advance_bucket_op(self):
if self.non_default_collection:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",
expiry=300)
else:
self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", expiry=300)
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 3, bucket=self.src_bucket_name)
body = self.create_save_function_body(self.function_name, "handler_code/ABO/curl_timer_insert.js")
if self.non_default_collection:
body['depcfg']['buckets'].append({"alias": self.src_bucket_name, "bucket_name": self.src_bucket_name,
"scope_name":self.src_bucket_name,"collection_name":self.src_bucket_name})
else:
body['depcfg']['buckets'].append({"alias": self.src_bucket_name, "bucket_name": self.src_bucket_name})
self.rest.create_function(body['appname'], body)
self.deploy_function(body)
if self.non_default_collection:
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
else:
self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
self.verify_doc_count_collections("dst_bucket._default._default", 0)
self.undeploy_and_delete_function(body)
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import pprint
from multiprocessing import Queue
import dragon.core.mpi as mpi
import dragon.core.logging as logging
from .data_reader import DataReader
from .data_transformer import DataTransformer
from .blob_fetcher import BlobFetcher
class DataBatch(object):
"""DataBatch aims to prefetch data by *Triple-Buffering*.
It takes full advantages of the Process/Thread of Python,
which provides remarkable I/O speed up for scalable distributed training.
"""
def __init__(self, **kwargs):
"""Construct a ``DataBatch``.
Parameters
----------
source : str
The path of database.
multiple_nodes: boolean, optional, default=False
Whether to split data for multiple parallel nodes.
shuffle : bool, optional, default=False
Whether to shuffle the data.
num_chunks : int, optional, default=2048
The number of chunks to split.
chunk_size : int, optional, default=-1
The size(MB) of each chunk.
padding : int, optional, default=0
The zero-padding size.
fill_value : int, optional, default=127
The value to fill when padding is valid.
crop_size : int, optional, default=0
The cropping size.
mirror : bool, optional, default=False
Whether to mirror(flip horizontally) images.
color_augmentation : bool, optional, default=False
Whether to use color distortion.1
min_random_scale : float, optional, default=1.
The min scale of the input images.
max_random_scale : float, optional, default=1.
The max scale of the input images.
force_gray : bool, optional, default=False
Set not to duplicate channel for gray.
phase : {'TRAIN', 'TEST'}, optional
The optional running phase.
batch_size : int, optional, default=128
The size of a mini-batch.
partition : bool, optional, default=False
Whether to partition batch for parallelism.
prefetch : int, optional, default=5
The prefetch count.
"""
super(DataBatch, self).__init__()
# Init mpi
global_rank = 0; local_rank = 0; group_size = 1
if mpi.Is_Init():
idx, group = mpi.AllowParallel()
if idx != -1: # DataParallel
global_rank = mpi.Rank()
group_size = len(group)
for i, node in enumerate(group):
if global_rank == node: local_rank = i
kwargs['group_size'] = group_size
# Configuration
self._prefetch = kwargs.get('prefetch', 5)
self._num_readers = kwargs.get('num_readers', 1)
self._num_transformers = kwargs.get('num_transformers', -1)
self._max_transformers = kwargs.get('max_transformers', 3)
self._num_fetchers = kwargs.get('num_fetchers', 1)
# Io-Aware Policy
if self._num_transformers == -1:
self._num_transformers = 1
# Add 1 transformer for color augmentation
if kwargs.get('color_augmentation', False):
self._num_transformers += 1
# Add 1 transformer for random scale
if kwargs.get('max_random_scale', 1.0) - \
kwargs.get('min_random_scale', 1.0) != 0:
self._num_transformers += 1
# Add 1 transformer for random crop
if kwargs.get('crop_size', 0) > 0 and \
kwargs.get('phase', 'TEST') == 'TRAIN':
self._num_transformers += 1
self._num_transformers = min(self._num_transformers, self._max_transformers)
self._batch_size = kwargs.get('batch_size', 128)
self._partition = kwargs.get('partition', False)
if self._partition:
self._batch_size = int(self._batch_size / kwargs['group_size'])
# Init queues
self.Q_level_1 = Queue(self._prefetch * self._num_readers * self._batch_size)
self.Q_level_2 = Queue(self._prefetch * self._num_readers * self._batch_size)
self.Q_level_3 = Queue(self._prefetch * self._num_readers)
# Init readers
self._readers = []
for i in range(self._num_readers):
self._readers.append(DataReader(**kwargs))
self._readers[-1].Q_out = self.Q_level_1
for i in range(self._num_readers):
num_parts = self._num_readers
part_idx = i
if self._readers[i]._multiple_nodes or \
self._readers[i]._use_shuffle:
num_parts *= group_size
part_idx += local_rank * self._num_readers
self._readers[i]._num_parts = num_parts
self._readers[i]._part_idx = part_idx
self._readers[i]._random_seed += part_idx
self._readers[i].start()
time.sleep(0.1)
# Init transformers
self._transformers = []
for i in range(self._num_transformers):
transformer = DataTransformer(**kwargs)
transformer._random_seed += (i + local_rank * self._num_transformers)
transformer.Q_in = self.Q_level_1
transformer.Q_out = self.Q_level_2
transformer.start()
self._transformers.append(transformer)
time.sleep(0.1)
# Init blob fetchers
self._fetchers = []
for i in range(self._num_fetchers):
fetcher = BlobFetcher(**kwargs)
fetcher.Q_in = self.Q_level_2
fetcher.Q_out = self.Q_level_3
fetcher.start()
self._fetchers.append(fetcher)
time.sleep(0.1)
def cleanup():
def terminate(processes):
for process in processes:
process.terminate()
process.join()
terminate(self._fetchers)
if local_rank == 0: logging.info('Terminating BlobFetcher ......')
terminate(self._transformers)
if local_rank == 0: logging.info('Terminating DataTransformer ......')
terminate(self._readers)
if local_rank == 0: logging.info('Terminating DataReader......')
import atexit
atexit.register(cleanup)
def get(self):
"""Get a batch.
Returns
-------
tuple
The batch, representing data and labels respectively.
"""
return self.Q_level_3.get()
|
# SPDX-License-Identifier: MIT
# (c) 2019 The TJHSST Director 4.0 Development Team & Contributors
from django.contrib import admin
from .models import SiteRequest
# Register your models here.
admin.site.register(SiteRequest)
|
import os
from github import Github
import yaml
g = Github(os.getenv('GITHUB_TOKEN'))
GITHUB_USER = 'MachineUserHallicopter/'
def push_file_to_github(filename, content, repo):
repo = g.get_repo(GITHUB_USER + repo)
print(repo.create_file(path=filename, content=content, message="adds: autocommit new post", branch="gh-pages"))
def update_template(repo):
file_name = 'template/_config.yml'
print("title: "+repo+"\nbaseurl: '/"+repo+"/' # name of the repository")
file = open(file_name, "a") # append mode
file.write("title: "+repo+"\nbaseurl: '/"+repo+"/' # name of the repository")
file.close()
# Create and push the template
os.system('./create_repo.sh ' + repo + ' ' + os.getenv('GITHUB_TOKEN'))
update_template("yamu")
|
import gym
import numpy as np
import matplotlib.pyplot as plt
def policy(state, theta):
"""
Parameters
----------
state : numpy array
contains state of cartpole environment.
theta : numpy array
contains parameters of linear features
Returns
-------
numpy array
return output of softmax function
"""
z = state.dot(theta)
exp = np.exp(z)
return exp/np.sum(exp)
def generate_episode(env, theta, display=False):
""" enerates one episode and returns the list of states, the list of rewards and the list of actions of that episode """
state = env.reset()
states = [state]
actions = []
rewards = []
for t in range(500):
if display:
env.render()
p = policy(state, theta)
action = np.random.choice(len(p), p=p)
state, reward, done, info = env.step(action)
rewards.append(reward)
actions.append(action)
if done:
break
states.append(state)
return states, rewards, actions
def REINFORCE(env):
# policy parameters
alpha = 0.025
gamma = 0.99
n_episodes = 800
theta = np.random.rand(4, 2)
# init lists to store rewards of each episode and means of last 100 episodes
last_100_episodes = []
episodes = []
means = []
for e in range(n_episodes):
# render env every x steps
if e % 100 == 0:
states, rewards, actions = generate_episode(env, theta, True)
else:
states, rewards, actions = generate_episode(env, theta, False)
# keep track of previous 100 episode lengths
if e < 100:
last_100_episodes.append(sum(rewards))
else:
last_100_episodes.append(sum(rewards))
last_100_episodes.pop(0)
# compute mean
mean = np.mean(last_100_episodes)
means.append(mean)
episodes.append(e)
# learning rate decay
if e % 200 == 0:
# alpha = alpha/2
if mean > 495:
alpha = 0.00001 # slow down learning if mean of last 100 episodes is 500
if mean < 495:
alpha = 0.025
# print mean every 100 episodes
if e % 100 == 0 or e == (n_episodes - 1):
print("episode: " + str(e) + " Mean of last 100 episodes: " + str(mean))
# REINFORCE Algorithm
steps = len(states) # length of episode
G_t = np.zeros([steps]) # init G_t
for t in range(steps):
# MC sampling of G_t
for k in range(t+1,steps+1):
G_t[t] += np.power(gamma,k-t-1) * rewards[k-1]
pi = policy(states[t], theta)
action = actions[t]
# update rule
theta[:,action] = theta[:,action] + alpha * np.power(gamma, t) * G_t[t] * (states[t] * (1 - pi[action]))
# create plot
plt.plot(episodes,means,'b')
plt.xlabel("Episodes")
plt.ylabel("Mean of last 100 episodes")
plt.title("REINFORCE")
def main():
env = gym.make('CartPole-v1')
REINFORCE(env)
env.close()
if __name__ == "__main__":
main()
|
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.PostListView.as_view(), name='post_list'),
path('page/<int:page>/', views.PostListView.as_view(), name='post_list'),
path('category/<slug:category>/', views.PostCategoryListView.as_view(), name='post_category_list'),
path('category/<slug:category>/page/<int:page>/', views.PostCategoryListView.as_view(), name='post_category_list'),
path('category/<slug:category>/feed/', views.PostCategoryFeedView.as_view(), name='post_category_feed'),
path('feed/', views.PostFeedView.as_view(), name='post_feed'),
path('post/<int:identifier>/', views.PostDetailView.as_view(), name='post_detail'),
path('post/<int:identifier>/<slug:slug>/', views.PostDetailView.as_view(), name='post_detail'),
# Media upload stuff for the admin.
path('image-upload/', views.ImageUploadView.as_view(), name='image_upload'),
path('image-redirect/<int:pk>/', views.ImageRedirectView.as_view(), name='image_redirect'),
]
|
import numpy as np
from emulator.main import Account
from agent.agent import Agent
env = Account()
state = env.reset()
print(state.shape)
agent = Agent([5, 50, 58], 3)
# state = np.transpose(state, [2, 0, 1])
# state = np.expand_dims(state, 0)
# action = agent.get_epsilon_policy(state)
# reward, next_state, done = env.step(action)
# print(reward)
for i in range(1440):
state = np.transpose(state, [2, 0, 1])
state = np.expand_dims(state, 0)
action = agent.get_epsilon_policy(state)
reward, state, done = env.step(action)
print(done, reward)
if done:
state = env.reset()
break
|
from PySide2.QtCore import Signal, Slot, Qt
from PySide2.QtWidgets import QMessageBox, QTabWidget, QHBoxLayout
import numpy as np
from hexrd.ui.constants import OverlayType, PAN, ViewType, ZOOM
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.image_canvas import ImageCanvas
from hexrd.ui.image_series_toolbar import ImageSeriesToolbar
from hexrd.ui.navigation_toolbar import NavigationToolbar
from hexrd.ui import utils
class ImageTabWidget(QTabWidget):
# Tell the main window that an update is needed
update_needed = Signal()
# Emitted when the mouse is moving on the canvas, but outside
# an image/plot. Intended to clear the status bar.
clear_mouse_position = Signal()
# Emitted when the mouse moves on top of an image/plot
# Arguments are: x, y, xdata, ydata, intensity
new_mouse_position = Signal(dict)
def __init__(self, parent=None):
super(ImageTabWidget, self).__init__(parent)
self.image_canvases = [ImageCanvas(self)]
# Set up a mouse move connection to use with the status bar
cid = self.image_canvases[0].mpl_connect(
'motion_notify_event',
self.on_motion_notify_event)
self.mpl_connections = [cid]
self.image_names = []
self.current_index = 0
# These will get set later
self.cmap = None
self.norm = None
self.toolbars = []
self.toolbar_visible = True
self.setup_connections()
def setup_connections(self):
self.tabBarClicked.connect(self.switch_toolbar)
HexrdConfig().tab_images_changed.connect(self.load_images)
HexrdConfig().detectors_changed.connect(self.reset_index)
def clear(self):
# This removes all canvases except the first one,
# and it calls super().clear()
for canvas, cid in zip(self.image_canvases[1:],
self.mpl_connections[1:]):
canvas.mpl_disconnect(cid)
canvas.deleteLater()
del self.image_canvases[1:]
del self.toolbars[1:]
del self.mpl_connections[1:]
super().clear()
def reset_index(self):
self.current_index = 0
def allocate_canvases(self):
while len(self.image_canvases) < len(self.image_names):
self.image_canvases.append(ImageCanvas(self))
# Make connections to use with the status bar
while len(self.mpl_connections) < len(self.image_canvases):
ind = len(self.mpl_connections)
cid = self.image_canvases[ind].mpl_connect(
'motion_notify_event',
self.on_motion_notify_event)
self.mpl_connections.append(cid)
def load_images_tabbed(self):
self.clear()
self.allocate_canvases()
self.allocate_toolbars()
for i, name in enumerate(self.image_names):
self.image_canvases[i].load_images(image_names=[name])
self.addTab(self.image_canvases[i], name)
self.update_canvas_cmaps()
self.update_canvas_norms()
self.tabBar().show()
self.setCurrentIndex(self.current_index)
def load_images_untabbed(self):
self.clear()
self.image_canvases[0].load_images(
image_names=self.image_names)
self.allocate_toolbars()
self.addTab(self.image_canvases[0], '')
self.update_canvas_cmaps()
self.update_canvas_norms()
self.tabBar().hide()
def update_image_names(self):
if self.image_names != list(HexrdConfig().imageseries_dict.keys()):
self.image_names = list(HexrdConfig().imageseries_dict.keys())
def load_images(self):
self.update_image_names()
self.update_ims_toolbar()
if HexrdConfig().tab_images:
self.load_images_tabbed()
else:
self.load_images_untabbed()
self.switch_toolbar(self.currentIndex())
def change_ims_image(self, pos):
HexrdConfig().current_imageseries_idx = pos
self.update_needed.emit()
if not HexrdConfig().has_omega_ranges:
return
# For rotation series, changing the image series index may require
# a re-draw of the overlays. The rotation series overlays are designed
# so that on an image series index change, the data does not have to
# be re-generated, only the overlay needs to be redrawn.
for overlay in HexrdConfig().overlays:
redraw = (
overlay['type'] == OverlayType.rotation_series and
overlay.get('options', {}).get('aggregated', True) is False
)
if redraw:
for canvas in self.active_canvases:
canvas.redraw_overlay(overlay)
@Slot(bool)
def show_toolbar(self, b):
self.toolbar_visible = b
if self.current_index < 0 or not self.toolbars:
return
self.toolbars[self.current_index]['tb'].setVisible(b)
self.toolbars[self.current_index]['sb'].set_visible(b)
def allocate_toolbars(self):
parent = self.parent()
while len(self.toolbars) != len(self.image_canvases):
# The new one to add
idx = len(self.toolbars)
tb = NavigationToolbar(self.image_canvases[idx], parent, False)
# Current detector
name = self.image_names[idx]
sb = ImageSeriesToolbar(name, self)
# This will put it at the bottom of the central widget
toolbar = QHBoxLayout()
toolbar.addWidget(tb)
toolbar.addWidget(sb.widget)
parent.layout().addLayout(toolbar)
parent.layout().setAlignment(toolbar, Qt.AlignCenter)
self.toolbars.append({'tb': tb, 'sb': sb})
def switch_toolbar(self, idx):
if idx < 0:
return
self.current_index = idx
# None should be visible except the current one
for i, toolbar in enumerate(self.toolbars):
status = self.toolbar_visible if idx == i else False
toolbar['tb'].setVisible(status)
toolbar['sb'].set_visible(status)
self.update_ims_toolbar()
def update_ims_toolbar(self):
idx = self.current_index
if self.toolbars:
self.toolbars[idx]['sb'].update_name(self.image_names[idx])
self.toolbars[idx]['sb'].update_range(True)
def toggle_off_toolbar(self):
toolbars = [bars['tb'] for bars in self.toolbars]
for tb in toolbars:
if tb.mode == ZOOM:
tb.zoom()
if tb.mode == PAN:
tb.pan()
def show_cartesian(self):
self.update_image_names()
self.update_ims_toolbar()
# Make sure we actually have images
if len(self.image_names) == 0:
msg = 'Cannot show Cartesian view without images!'
QMessageBox.warning(self, 'HEXRD', msg)
return
self.clear()
self.image_canvases[0].show_cartesian()
self.addTab(self.image_canvases[0], '')
self.tabBar().hide()
self.switch_toolbar(self.currentIndex())
def show_polar(self):
self.update_image_names()
self.update_ims_toolbar()
# Make sure we actually have images
if len(self.image_names) == 0:
msg = 'Cannot show Polar view without images!'
QMessageBox.warning(self, 'HEXRD', msg)
return
self.clear()
self.image_canvases[0].show_polar()
self.addTab(self.image_canvases[0], '')
self.tabBar().hide()
self.switch_toolbar(self.currentIndex())
@property
def active_canvases(self):
"""Get the canvases that are actively being used"""
if not HexrdConfig().tab_images:
return [self.image_canvases[0]]
return self.image_canvases[:len(self.image_names)]
def update_canvas_cmaps(self):
if self.cmap is not None:
for canvas in self.active_canvases:
canvas.set_cmap(self.cmap)
def update_canvas_norms(self):
if self.norm is not None:
for canvas in self.active_canvases:
canvas.set_norm(self.norm)
def set_cmap(self, cmap):
self.cmap = cmap
self.update_canvas_cmaps()
def set_norm(self, norm):
self.norm = norm
self.update_canvas_norms()
def on_motion_notify_event(self, event):
# Clear the info if the mouse leaves a plot
if event.inaxes is None:
self.clear_mouse_position.emit()
return
mode = self.image_canvases[0].mode
if mode is None:
mode = ViewType.raw
info = {
'x': event.x,
'y': event.y,
'x_data': event.xdata,
'y_data': event.ydata,
'mode': mode
}
# TODO: we are currently calculating the pixel intensity
# mathematically, because I couldn't find any other way
# to obtain it. If we find a better way, let's do it.
if event.inaxes.get_images():
# Image was created with imshow()
artist = event.inaxes.get_images()[0]
i, j = utils.coords2index(artist, info['x_data'], info['y_data'])
intensity = artist.get_array().data[i, j]
else:
# This is probably just a plot. Do not calculate intensity.
intensity = None
info['intensity'] = intensity
# intensity being None implies here that the mouse is on top of the
# azimuthal integration plot in the polar view.
if intensity is not None:
iviewer = self.image_canvases[0].iviewer
if mode in (ViewType.cartesian, ViewType.raw):
if mode == ViewType.cartesian:
dpanel = iviewer.dpanel
else:
# The title is the name of the detector
key = event.inaxes.get_title()
dpanel = iviewer.instr.detectors[key]
xy_data = dpanel.pixelToCart(np.vstack([i, j]).T)
ang_data, gvec = dpanel.cart_to_angles(xy_data)
tth = ang_data[:, 0][0]
eta = ang_data[:, 1][0]
else:
tth = np.radians(info['x_data'])
eta = np.radians(info['y_data'])
# We will only display the active material's hkls
plane_data = HexrdConfig().active_material.planeData
dsp = 0.5 * plane_data.wavelength / np.sin(0.5 * tth)
hkl = str(plane_data.getHKLs(asStr=True, allHKLs=True,
thisTTh=tth))
info['tth'] = np.degrees(tth)
info['eta'] = np.degrees(eta)
info['dsp'] = dsp
info['hkl'] = hkl
self.new_mouse_position.emit(info)
def export_current_plot(self, filename):
self.image_canvases[0].export_current_plot(filename)
def polar_show_snip1d(self):
self.image_canvases[0].polar_show_snip1d()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
# This will just test for __init__ errors
ImageTabWidget()
|
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from bio_hansel.qc import QC
from bio_hansel.subtype import Subtype
from bio_hansel.subtype_stats import SubtypeCounts
from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, \
get_missing_internal_subtypes
from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases
def test_absent_downstream_subtypes():
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1.3', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) is None
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.3']
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.1', '1.2', '1.3']
def test_sorted_subtype_ints():
assert sorted_subtype_ints(pd.Series([], dtype=object)) == []
exp_subtype_ints = [
[1],
[1, 1],
[1, 1, 1],
[1, 1, 1, 99]
]
assert sorted_subtype_ints(pd.Series(['1', '1.1', '1.1.1', '1.1.1.99'])) == exp_subtype_ints
series = pd.Series(['1', '1.1', '1.1.1', '1.1.1.99', '1.1', '1.1.1'])
assert sorted_subtype_ints(series) == exp_subtype_ints
def test_empty_results():
st = Subtype(sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
scheme='enteritidis',
scheme_version='1.0.5',
subtype=None,
non_present_subtypes=None,
all_subtypes=None,
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)
df_empty = empty_results(st)
df_expected_empty = pd.DataFrame(
{
0: dict(
sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
subtype=None,
refposition=None,
is_pos_kmer=None,
scheme='enteritidis',
scheme_version='1.0.5',
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)}).transpose()
assert ((df_empty == df_expected_empty) | (df_empty.isnull() == df_expected_empty.isnull())).values.all(), \
f'Empty result DataFrame should equal df_expected_empty: {df_expected_empty}'
def test_find_inconsistent_subtypes():
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1', ]
consistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
assert find_inconsistent_subtypes(consistent_subtypes) == [], \
'Expecting all subtypes to be consistent with each other'
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
exp_incon_subtypes = ['1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
assert find_inconsistent_subtypes(inconsistent_subtypes) == exp_incon_subtypes, \
f'Expecting subtypes {exp_incon_subtypes} to be inconsistent with each other'
subtypes_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3',
'1.1.2',
'2', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtypes_list))
assert set(find_inconsistent_subtypes(inconsistent_subtypes)) == set(subtypes_list), \
f'All subtypes should be inconsistent with each other in {subtypes_list}'
def test_subtype_regex():
good_values = ['1.1.1.1', '10', '77.10.1.9', '17.1.1.1.1.12.4', ]
for good_value in good_values:
assert SubtypeCounts._check_subtype(None, None, good_value) == good_value
bad_values = [
'1..',
'1..1',
'1.1..1.1',
'1....',
'100.',
'',
' ',
'a1.1.1',
'1.11.1a',
'a',
'not.a.valid.subtype',
'B.1.1.7'
]
for bad_value in bad_values:
with pytest.raises(ValueError):
assert SubtypeCounts._check_subtype(None, None, bad_value) == ''
def test_get_missing_internal_subtypes():
st_vals = ['1', '1', '1', '1']
pos_subtypes_set = {
'1',
'1.1',
'1.1.1',
'1.1.1.1'
}
exp_missing_internal_subtypes = set()
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
st_vals = ['2', '22', '222', '2222', '22222']
pos_subtypes_set = {'2', '2.22.222.2222.22222'}
exp_missing_internal_subtypes = {
'2.22',
'2.22.222',
'2.22.222.2222'
}
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
def test_expand_degenerate_bases():
assert len(expand_degenerate_bases('NNNNN')) == 1024
with open('tests/data/expand_degenerate_bases_DARTHVADR.txt') as f:
assert expand_degenerate_bases('DARTHVADR') == f.read().split('\n')
|
import os
import sys
import argparse
import time
import schedule
import json
from mailattachmentsarchiver import mailattachmentsarchiver as maa_app
def generateIMAP(file,addr,port,user,pwd):
"""Generates IMAP credentials from sensitive environmental values.
:param file: File to output.
:param addr: Email server address, ex imap.gmail.com
:param port: Email server port.
:param user: Username used to authenticate.
:param pwd: Password used to authenticate.
"""
credentials = {}
credentials['server']=addr
credentials['user']=user
credentials['password']=pwd
print('connecting to server %s with username %s'%(addr,user))
with open(file, 'w') as fp:
json.dump(credentials,fp,indent=2,separators=(',', ': '),sort_keys=True)
#this is for scheduling
def getmail(maa):
"""Gets the mail."""
maa.get_mail()
def main(maa, hours):
"""
Retreives the mail every X hours.
:param maa: Mailattachmentarchiver used for getting email attachments.
:param hours: Interval between checking email in hours, default 1.
"""
#if we don't define it, its an hour interval
if hours is None or hours == '' or not hours.isdigit(): hours = 1
print('Script started, initial mail check commencing')
maa.get_mail()
#every day at noon, check for mail.
schedule.every(int(hours)).hours.do(getmail,maa)
print(f'Will check mail every {hours} hour(s) from now on.')
while True:
schedule.run_pending()
time.sleep(60)
if __name__ == '__main__':
#generate IMAP from docker .env
generateIMAP('/tmp/imap.json',os.getenv('IMAP_ADDR'),os.getenv('IMAP_PORT'),os.getenv('IMAP_USER'),os.getenv('IMAP_PASS'))
maa = maa_app('/tmp/imap.json','/usr/share/config.json')
main(maa, os.getenv('SYNCTIME'))
|
import os
from PIL import Image
def alpha_to_color(image):
image = image.convert('RGBA')
image.load()
background = Image.new('RGB', image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3])
return background
if __name__ == '__main__':
for dir_name in ['dark chocolate', 'white chocolate']:
dir_path = './downloads/{}'.format(dir_name)
for image_path in os.listdir(dir_path):
image_path = '{}/{}'.format(dir_path, image_path)
with open(image_path, 'rb') as image_file:
if bytes('<!DOCTYPE html>', 'UTF-8') in image_file.read():
print('[*] Found wrong image file: {}'.format(image_path))
os.remove(image_path)
if '.png' in image_path:
png = Image.open(image_path)
background = alpha_to_color(png)
image_name = image_path.replace('png', 'jpg')
background.save(image_name, 'JPEG', quality=80)
os.remove(image_path)
print('{} ===> {}'.format(image_path, image_name))
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\roommate_situation.py
# Compiled at: 2019-02-13 18:34:49
# Size of source mod 2**32: 2516 bytes
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import classproperty
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, TunableSituationJobAndRoleState, SituationStateData, SituationState
from situations.situation_types import SituationSerializationOption, SituationCreationUIOption
class _RoommateSituationState(SituationState):
pass
class RoommateSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'roommate_situation_job_and_role_state': TunableSituationJobAndRoleState(description='\n The Situation Job and Role State for the Roommate Sim..\n ',
tuning_group=(GroupNames.ROLES))}
REMOVE_INSTANCE_TUNABLES = ('recommended_job_object_notification', 'recommended_job_object_text',
'targeted_situation', '_resident_job', '_relationship_between_job_members') + Situation.SITUATION_SCORING_REMOVE_INSTANCE_TUNABLES + Situation.SITUATION_START_FROM_UI_REMOVE_INSTANCE_TUNABLES
DOES_NOT_CARE_MAX_SCORE = -1
@classproperty
def situation_serialization_option(cls):
return SituationSerializationOption.DONT
@classmethod
def _states(cls):
return (SituationStateData(1, _RoommateSituationState),)
@classmethod
def default_job(cls):
return cls.roommate_situation_job_and_role_state.job
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.roommate_situation_job_and_role_state.job, cls.roommate_situation_job_and_role_state.role_state)]
def start_situation(self):
super().start_situation()
self._change_state(_RoommateSituationState())
lock_instance_tunables(RoommateSituation, exclusivity=(BouncerExclusivityCategory.ROOMMATE),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE),
duration=0,
force_invite_only=True,
_implies_greeted_status=True)
|
"""
MIT License
Sugaroid Artificial Inteligence
Chatbot Core
Copyright (c) 2020-2021 Srevin Saju
Copyright (c) 2021 The Sugaroid Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from chatterbot.logic import LogicAdapter
from chatterbot.trainers import ListTrainer
from nltk import word_tokenize
from sugaroid.sugaroid import SugaroidStatement
from sugaroid.brain.ooo import Emotion
from sugaroid.brain.preprocessors import normalize
class LearnAdapter(LogicAdapter):
"""
a specific adapter for learning responses
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
def can_process(self, statement):
normalized = word_tokenize(str(statement).lower())
try:
last_type = self.chatbot.globals['history']['types'][-1]
except IndexError:
last_type = False
logging.info(
'LearnAdapter: can_process() last_adapter was {}'.format(last_type))
if 'learn' in normalized and 'not' not in normalized and 'to' not in normalized:
return True
elif self.chatbot.globals['learn'] and (last_type == 'LearnAdapter'):
return True
else:
if self.chatbot.globals['learn']:
self.chatbot.globals['learn'] = False
return False
def process(self, statement, additional_response_selection_parameters=None):
response = None
if not self.chatbot.globals['learn']:
response = 'Enter something you want to teach me. What is the statement that you want me to learn.'
self.chatbot.globals['learn'] = 2
elif self.chatbot.globals['learn'] == 2:
response = 'What should I respond to the above statement?'
self.chatbot.globals['learn_last_conversation'].append(
str(statement))
self.chatbot.globals['learn'] -= 1
elif self.chatbot.globals['learn'] == 1:
response = 'Thanks for teaching me something new. I will always try to remember that'
self.chatbot.globals['learn_last_conversation'].append(
str(statement))
self.chatbot.globals['learn'] -= 1
list_trainer = ListTrainer(self.chatbot)
list_trainer.train(self.chatbot.globals['learn_last_conversation'])
selected_statement = SugaroidStatement(response, chatbot=True)
selected_statement.confidence = 9
selected_statement.adapter = 'LearnAdapter'
emotion = Emotion.lol
selected_statement.emotion = emotion
return selected_statement
|
import numpy as np
def feature_normalize(X, y):
# You need to set these values correctly
x_mean = X.mean()
y_mean = y.mean()
x_std = X.std()
y_std = y.std()
X_norm = (X - x_mean) / x_std
y_norm = (y - y_mean) / y_std
mu = np.array([x_mean['Size'], x_mean['Bedrooms'], y_mean['Price']])
sigma = np.array([x_std['Size'], x_std['Bedrooms'], y_std['Price']])
# ===================== Your Code Here =====================
# Instructions : First, for each feature dimension, compute the mean
# of the feature and subtract it from the dataset,
# storing the mean value in mu. Next, compute the
# standard deviation of each feature and divide
# each feature by its standard deviation, storing
# the standard deviation in sigma
#
# Note that X is a 2D array where each column is a
# feature and each row is an example. You need
# to perform the normalization separately for
# each feature.
#
# Hint: You might find the 'np.mean' and 'np.std' functions useful.
# To get the same result as Octave 'std', use np.std(X, 0, ddof=1)
#
# ===========================================================
return X_norm, y_norm, mu, sigma
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file test_fitting_components.py
# Test module
# - It tests various multicomponents models
# =============================================================================
""" Test module
- It tests various multicomponents models
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID
from ostap.logger.utils import rooSilent
from builtins import range
from ostap.fitting.background import make_bkg
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_components' )
else :
logger = getLogger ( __name__ )
# =============================================================================
## make simple test mass
m_x = ROOT.RooRealVar ( 'mass_x' , 'Some test mass(X)' , 0 , 10 )
m_y = ROOT.RooRealVar ( 'mass_y' , 'Some test mass(Y)' , 0 , 10 )
m_z = ROOT.RooRealVar ( 'mass_z' , 'Some test mass(z)' , 0 , 10 )
## book very simple data set
varset = ROOT.RooArgSet ( m_x , m_y,m_z )
dataset = ROOT.RooDataSet ( dsID() , 'Test Data set-1' , varset )
m1 = VE(3,0.10**2)
m2 = VE(7,0.20**2)
## fill it with three gausissians, 5k events each
N_sss = 5000
N_ssb = 500
N_sbs = 500
N_sbb = 1000
N_bss = 500
N_bsb = 100
N_bbs = 100
N_bbb = 250
random.seed(0)
## fill it : 5000 events Gauss * Gauss *Gauss
for m in (m1,m2) :
for i in range(0,N_sss) :
m_x.value = m.gauss()
m_y.value = m.gauss()
m_z.value = m.gauss()
dataset.add ( varset )
## fill it : 500 events Gauss * const * Gauss
for i in range(0,N_ssb) :
m_x.value = m.gauss()
m_y.value = random.uniform ( *m_y.minmax() )
m_z.value = m.gauss()
dataset.add ( varset )
## fill it : 500 events const * Gauss * Gauss
for i in range(0,N_sbs) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = m.gauss()
m_z.value = m.gauss()
dataset.add ( varset )
## fill it : 1000 events const * const *Gauss
for i in range(0,N_sbb) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = random.uniform ( *m_y.minmax() )
m_z.value = m.gauss()
dataset.add ( varset )
## fill it : 500 events Gauss * Gauss * const
for i in range(0,N_bss) :
m_x.value = m.gauss()
m_y.value = m.gauss()
m_z.value = random.uniform ( *m_z.minmax() )
dataset.add ( varset )
## fill it : 100 events Gauss * const * const
for i in range(0,N_bsb) :
m_x.value = m.gauss()
m_y.value = random.uniform ( *m_y.minmax() )
m_z.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
## fill it : 100 events const * Gauss * const
for i in range(0,N_bbs) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = m.gauss()
m_z.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
## fill it : 250 events const * const * const
for i in range(0,N_bbb) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = random.uniform ( *m_y.minmax() )
m_z.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
logger.info ('Dataset: %s' % dataset )
## various fit components
signal_x1 = Models.Gauss_pdf ( 'G1x' , xvar = m_x , mean = m1.value() , sigma = m1.error() )
signal_y1 = signal_x1.clone ( name='G1y' , xvar = m_y )
signal_z1 = signal_x1.clone ( name='G1z' , xvar = m_z )
signal_x2 = Models.Gauss_pdf ( name='G2x' , xvar = m_x , mean = m2.value() , sigma = m2.error() )
signal_y2 = signal_x2.clone ( name='G2y' , xvar = m_y )
signal_z2 = signal_x2.clone ( name='G2z' , xvar = m_z )
bkg_x= make_bkg ( -1 , 'Bx' , m_x )
bkg_y= bkg_x.clone ( name= 'By' , xvar =m_y )
bkg_z= bkg_x.clone ( name='Bz' , xvar =m_z )
# S(x)*S(y) component
ss_cmp=signal_x2*signal_y2
# S(x)*B(y) component
sb_cmp=signal_x2*bkg_y
# B(x)*S(y) component
bs_cmp= bkg_x*signal_y2
# B(x)*B(y) component
bb_cmp=bkg_x*bkg_y
# S(x)*S(y)*S(z) component
sss_cmp=ss_cmp*signal_z2
# S(x)*B(y)*S(z)+ B(x)*S(y)*S(z) component
ssb=sb_cmp*signal_z2
sbs=bs_cmp*signal_z2
ssb_cmp=ssb+sbs
# S(x)*B(y)*B(z) component
sbb_cmp=bb_cmp*signal_z2
# S(x)*S(y)*S(z) component
bss_cmp=ss_cmp*bkg_z
# B(x)*B(y)*S(z)+ B(x)*S(y)*B(z) component
sbb=bb_cmp*signal_z2
bsb=sb_cmp*bkg_z
bbs=bs_cmp*bkg_z
bsb_cmp=bsb+bbs
# B(x)*B(y)*B(z) component
bbb_cmp=bb_cmp*bkg_z
def test_comp_3dMixfit () :
logger.info ('Test multi-component 3d Sym fit')
model = Models.Fit3DMix (
name = 'fitSym_comp',
signal_x = signal_x1,
signal_y = signal_y1,
signal_z = signal_z1,
bkg_1x = bkg_x ,
bkg_1y= bkg_y,
bkg_2x = 'clone' ,
bkg_2y= 'clone',
components=[sss_cmp,ssb_cmp,sbb_cmp,bss_cmp,bsb_cmp,bbb_cmp]
)
with rooSilent() :
## components
model.SSS.fix ( 5000 )
model.SSB.fix ( 1000 )
model.SBB.fix ( 1000 )
model.BSS.fix ( 500 )
model.BBS.fix ( 200 )
model.BBB.fix ( 250 )
model.C[0].fix ( 5000 )
model.C[1].fix ( 1000 )
model.C[2].fix ( 1000 )
model.C[3].fix ( 500 )
model.C[4].fix ( 200 )
model.C[5].fix ( 250 )
r = model.fitTo ( dataset , ncpu=8 )
model.SSS.release ( )
model.SSB.release ( )
model.SBB.release ( )
model.BSS.release ( )
model.BBS.release ( )
model.BBB.release ( )
model.C[0].release ( )
model.C[1].release ( )
model.C[2].release ( )
model.C[3].release ( )
model.C[4].release ( )
model.C[5].release ( )
r = model.fitTo ( dataset , ncpu=8 )
logger.info ( 'Model %s Fit result \n#%s ' % ( model.name , r ) )
# =============================================================================
if '__main__' == __name__ :
test_comp_3dMixfit ()
# =============================================================================
# The END
# =============================================================================
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_api
----------------------------------
Tests for `dibs.api` module.
"""
from dibs.models import Item
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
# from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
import pytest
pytestmark = pytest.mark.django_db
UserModel = get_user_model()
class SimpleItemAPITestCase(APITestCase):
def setUp(self):
# create a user that can add items
# and one test item and other user
# both users can lock items
perm_add = Permission.objects.get(codename='add_item')
perm_lock = Permission.objects.get(codename='lock_item')
self.user = UserModel.objects.create(username='testuser', password='password')
self.user.user_permissions.add(perm_add, perm_lock)
self.client.force_authenticate(user=self.user)
self.user2 = UserModel.objects.create(username='otheruser', password='password')
self.user2.user_permissions.add(perm_lock)
self.item = Item.objects.create(name="simple testing item")
def test_api_creation(self):
url = '/api/v1/items/'
res = self.client.post(url, {'name' : 'test item'})
data = res.data
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(data.get('name'), 'test item')
def test_lock_unlock_api_item(self):
url = '/api/v1/items/{0}/lock/'.format(self.item.pk)
res = self.client.post(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data.get('status'), 'item locked')
# try lock or unlock the same item as another user
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user2)
res2 = self.client.post(url)
self.assertEqual(res2.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(res2.data.get('status'), 'item not locked')
unlock_url = '/api/v1/items/{0}/unlock/'.format(self.item.pk)
res3 = self.client.post(unlock_url)
self.assertEqual(res3.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(res3.data.get('status'), 'item not unlocked')
# try to unlock the same item as the user who locked it
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user)
res4 = self.client.post(unlock_url)
self.assertEqual(res4.status_code, status.HTTP_200_OK)
self.assertEqual(res4.data.get('status'), 'item unlocked')
|
#!/usr/bin/env python
# Copyright 2018 Battelle Energy Alliance, LLC
import subprocess
import getpass
import argparse
import sys
import re
import textwrap
from argparse import RawTextHelpFormatter
import os
import hashlib
from base64 import encodestring as encode
from base64 import decodestring as decode
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../lib'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../SSH/lib'))
import myldap
import myssh
### Arguments ######################################################################################################################
parser = argparse.ArgumentParser(
description='Create password hash that is LDAP compatible',
epilog=textwrap.dedent('''
Examples:
%(prog)s -p secretpassword
'''),
formatter_class=RawTextHelpFormatter
)
parser.add_argument('-p', '--password', help="Password you would like to hash")
#def create_random_passord_hash(self, password):
def create_random_password_hash(password):
new_pass = password[:]
salt = os.urandom(4)
hashed = hashlib.sha1(new_pass)
hashed.update(salt)
salted = "{SSHA}" + encode(hashed.digest() + salt)[:-1]
return new_pass, salted
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
new_pass, salted = create_random_password_hash(args.password)
print(new_pass, salted)
|
""" by Luigi Acerbi, Shan Shen and Anne Urai
International Brain Laboratory, 2019
#TODO: Anne
"""
import numpy as np
class DataSet:
""" Object containing data that can be preprocessed.
Inputs: dataframe # pandas
Methods: preprocess
"""
class TrialData:
def __init__(self, data, meta_data=dict()):
"""data should contain the following columns:
stimulus_side : values to be -1 or 1
stimulus_strength : non-negative contrast
choice : -1 and 1, nan for missed trials
rewarded : 0 and 1, including 0 contrast trials
---
optional columns:
correct : 1 for correct, 0 for incorrect, nan for 0 contrast or missed trials
reaction_time : time diff of response time - stim on time
prob_left_block : probability (in block structure) of stimulus_side == -1
trial_id :
session_id :
"""
self.meta_data = meta_data
self.trials_df = self.fill_dataframe(data)
def fill_dataframe(self, data):
data['signed_stimulus'] = data['stimulus_strength'] * data['stimulus_side']
data['choice_right'] = data['choice'].replace([-1, 0, 1], [0, np.nan, 1])
return data
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file makes the Supplementary Figure 5, it needs the filter_SRAG.py
results to run.
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
data_init = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data_init = data_init[(data_init.EVOLUCAO==1)|(data_init.EVOLUCAO==2)]
for col in data_init.columns:
if (col[:2] == 'DT') or (col[:4] == 'DOSE'):
data_init.loc[:,col] = pd.to_datetime(data_init[col], format='%Y/%m/%d', errors='coerce')
data_init['ti'] = (data_init.DT_EVOLUCA - data_init.DT_INTERNA).dt.days
cases, td = np.histogram(data_init.ti, bins=np.arange(0, 90))
deaths, td = np.histogram(data_init.ti[data_init.EVOLUCAO==2], bins=np.arange(0, 90))
td = td[:-1]
plt.figure()
plt.plot(td, deaths/cases)
plt.ylabel('Mortality')
plt.xlabel('Stay Duration (days)')
plt.xlim([-0.5,89])
plt.ylim([0.2,0.7])
plt.grid()
plt.tight_layout()
s = {'days': td, 'mortality': deaths/cases}
s = pd.DataFrame(s)
s.to_csv('../Results/mort_dur_hosp.csv', index=False)
ts = [4, 12, 40]
for t in ts:
plt.plot([t,t], [0.2, 0.7], '--r')
plt.savefig('../Figures/SFig5.png')
|
from decimal import Decimal
from tests import BaseXchangeTestCase
from xchange.models.base import (
Ticker, OrderBook
)
class TickerTestCase(BaseXchangeTestCase):
def test_ticker(self):
ticker = Ticker({
'ask': '9314.65',
'bid': '100.51',
'high': '9480',
'last': '9312.34',
'low': '8800',
'volume': '16185076',
})
self.assertEqual(ticker.ask, Decimal('9314.65'))
self.assertEqual(ticker.bid, Decimal('100.51'))
self.assertEqual(ticker.high, Decimal('9480'))
self.assertEqual(ticker.last, Decimal('9312.34'))
self.assertEqual(ticker.low, Decimal('8800'))
self.assertEqual(ticker.volume, Decimal('16185076'))
with self.assertRaisesRegexp(AttributeError,
'Object Ticker has not attribute "foobar"'):
ticker.foobar
def test_ticker_invalid_data(self):
with self.assertRaisesRegexp(ValueError,
'Unknown field "foo" for class Ticker'):
Ticker({
'foo': 'bar'
})
class OrderBookTestCase(BaseXchangeTestCase):
def test_order_book(self):
order_book = OrderBook({
"asks": [
# (price_in_btc, amount_in_btc),
(Decimal('4630.12300'), Decimal('0.014')),
(Decimal('4620.23450'), Decimal('0.456')),
],
"bids": [
# (price_in_btc, amount_in_btc),
(Decimal('4610.54856'), Decimal('0.078')),
(Decimal('4600.78952'), Decimal('0.125')),
]
})
self.assertEqual(type(order_book.asks), list)
self.assertEqual(type(order_book.bids), list)
with self.assertRaisesRegexp(AttributeError,
'Object OrderBook has not attribute "foobar"'):
order_book.foobar
def test_order_book_invalid_data(self):
with self.assertRaisesRegexp(ValueError,
'Unknown field "foo" for class OrderBook'):
OrderBook({
'foo': 'bar'
})
|
"""
turbine_costsse.py
Created by Katherine Dykes 2012.
Copyright (c) NREL. All rights reserved.
"""
from openmdao.main.api import Component, Assembly
from openmdao.main.datatypes.api import Array, Float, Bool, Int, Enum
import numpy as np
from fusedwind.plant_cost.fused_tcc import FullTurbineCostModel, FullTCCAggregator, configure_full_tcc
from fusedwind.interface import implement_base
from rotor_costsse import Rotor_CostsSE
from nacelle_costsse import Nacelle_CostsSE
from tower_costsse import Tower_CostsSE
#-------------------------------------------------------------------------------
@implement_base(FullTurbineCostModel)
class Turbine_CostsSE(Assembly):
# variables
blade_mass = Float(iotype='in', units='kg', desc='component mass [kg]')
hub_mass = Float(iotype='in', units='kg', desc='component mass [kg]')
pitch_system_mass = Float(iotype='in', units='kg', desc='component mass [kg]')
spinner_mass = Float(iotype='in', units='kg', desc='component mass [kg]')
low_speed_shaft_mass = Float(iotype='in', units='kg', desc='component mass')
main_bearing_mass = Float(iotype='in', units='kg', desc='component mass')
second_bearing_mass = Float(iotype='in', units='kg', desc='component mass')
gearbox_mass = Float(iotype='in', units='kg', desc='component mass')
high_speed_side_mass = Float(iotype='in', units='kg', desc='component mass')
generator_mass = Float(iotype='in', units='kg', desc='component mass')
bedplate_mass = Float(iotype='in', units='kg', desc='component mass')
yaw_system_mass = Float(iotype='in', units='kg', desc='component mass')
tower_mass = Float(iotype='in', units='kg', desc='tower mass [kg]')
machine_rating = Float(iotype='in', units='kW', desc='machine rating')
# parameters
blade_number = Int(iotype='in', desc='number of rotor blades')
advanced_blade = Bool(True, iotype='in', desc='advanced (True) or traditional (False) blade design')
drivetrain_design = Enum('geared', ('geared', 'single_stage', 'multi_drive', 'pm_direct_drive'), iotype='in')
crane = Bool(iotype='in', desc='flag for presence of onboard crane')
offshore = Bool(iotype='in', desc='flag for offshore site')
year = Int(iotype='in', desc='Current Year')
month = Int(iotype='in', desc='Current Month')
assemblyCostMultiplier = Float(0.0, iotype='in', desc='multiplier for assembly cost in manufacturing')
overheadCostMultiplier = Float(0.0, iotype='in', desc='multiplier for overhead')
profitMultiplier = Float(0.0, iotype='in', desc='multiplier for profit markup')
transportMultiplier = Float(0.0, iotype='in', desc='multiplier for transport costs')
# Outputs
turbine_cost = Float(0.0, iotype='out', desc='Overall wind turbine capial costs including transportation costs')
def configure(self):
configure_full_tcc(self)
# select components
self.replace('rotorCC', Rotor_CostsSE())
self.replace('nacelleCC', Nacelle_CostsSE())
self.replace('towerCC', Tower_CostsSE())
self.replace('tcc', TurbineCostAdder())
# connect inputs
self.connect('blade_mass', 'rotorCC.blade_mass')
self.connect('blade_number', 'rotorCC.blade_number')
self.connect('hub_mass', 'rotorCC.hub_mass')
self.connect('pitch_system_mass', 'rotorCC.pitch_system_mass')
self.connect('spinner_mass', 'rotorCC.spinner_mass')
self.connect('advanced_blade', 'rotorCC.advanced')
self.connect('low_speed_shaft_mass', 'nacelleCC.low_speed_shaft_mass')
self.connect('main_bearing_mass', 'nacelleCC.main_bearing_mass')
self.connect('second_bearing_mass', 'nacelleCC.second_bearing_mass')
self.connect('gearbox_mass', 'nacelleCC.gearbox_mass')
self.connect('high_speed_side_mass', 'nacelleCC.high_speed_side_mass')
self.connect('generator_mass', 'nacelleCC.generator_mass')
self.connect('bedplate_mass', ['nacelleCC.bedplate_mass'])
self.connect('yaw_system_mass', 'nacelleCC.yaw_system_mass')
self.connect('machine_rating', ['nacelleCC.machine_rating'])
self.connect('drivetrain_design', ['nacelleCC.drivetrain_design'])
self.connect('crane', 'nacelleCC.crane')
self.connect('offshore', ['nacelleCC.offshore', 'tcc.offshore'])
self.connect('tower_mass', 'towerCC.tower_mass')
self.connect('year', ['rotorCC.year', 'nacelleCC.year', 'towerCC.year'])
self.connect('month', ['rotorCC.month', 'nacelleCC.month', 'towerCC.month'])
self.connect('assemblyCostMultiplier','tcc.assemblyCostMultiplier')
self.connect('overheadCostMultiplier','tcc.overheadCostMultiplier')
self.connect('profitMultiplier','tcc.profitMultiplier')
self.connect('transportMultiplier','tcc.transportMultiplier')
#-------------------------------------------------------------------------------
@implement_base(FullTCCAggregator)
class TurbineCostAdder(Component):
# Variables
rotor_cost = Float(iotype='in', units='USD', desc='rotor cost')
nacelle_cost = Float(iotype='in', units='USD', desc='nacelle cost')
tower_cost = Float(iotype='in', units='USD', desc='tower cost')
# parameters
offshore = Bool(iotype='in', desc='flag for offshore site')
assemblyCostMultiplier = Float(0.0, iotype='in', desc='multiplier for assembly cost in manufacturing')
overheadCostMultiplier = Float(0.0, iotype='in', desc='multiplier for overhead')
profitMultiplier = Float(0.0, iotype='in', desc='multiplier for profit markup')
transportMultiplier = Float(0.0, iotype='in', desc='multiplier for transport costs')
# Outputs
turbine_cost = Float(0.0, iotype='out', desc='Overall wind turbine capial costs including transportation costs')
def __init__(self):
Component.__init__(self)
#controls what happens if derivatives are missing
self.missing_deriv_policy = 'assume_zero'
def execute(self):
partsCost = self.rotor_cost + self.nacelle_cost + self.tower_cost
self.turbine_cost = (1 + self.transportMultiplier + self.profitMultiplier) * ((1+self.overheadCostMultiplier+self.assemblyCostMultiplier)*partsCost)
# derivatives
self.d_cost_d_rotor_cost = (1 + self.transportMultiplier + self.profitMultiplier) * (1+self.overheadCostMultiplier+self.assemblyCostMultiplier)
self.d_cost_d_nacelle_cost = (1 + self.transportMultiplier + self.profitMultiplier) * (1+self.overheadCostMultiplier+self.assemblyCostMultiplier)
self.d_cost_d_tower_cost = (1 + self.transportMultiplier + self.profitMultiplier) * (1+self.overheadCostMultiplier+self.assemblyCostMultiplier)
if self.offshore:
self.turbine_cost *= 1.1
# derivatives
self.d_cost_d_rotor_cost *= 1.1
self.d_cost_d_nacelle_cost *= 1.1
self.d_cost_d_tower_cost *= 1.1
def list_deriv_vars(self):
inputs = ['rotor_cost', 'nacelle_cost', 'tower_cost']
outputs = ['turbine_cost']
return inputs, outputs
def provideJ(self):
# Jacobian
self.J = np.array([[self.d_cost_d_rotor_cost, self.d_cost_d_nacelle_cost, self.d_cost_d_tower_cost]])
return self.J
#-------------------------------------------------------------------------------
def example():
# simple test of module
turbine = Turbine_CostsSE()
turbine.blade_mass = 17650.67 # inline with the windpact estimates
turbine.hub_mass = 31644.5
turbine.pitch_system_mass = 17004.0
turbine.spinner_mass = 1810.5
turbine.low_speed_shaft_mass = 31257.3
#bearingsMass = 9731.41
turbine.main_bearing_mass = 9731.41 / 2
turbine.second_bearing_mass = 9731.41 / 2
turbine.gearbox_mass = 30237.60
turbine.high_speed_side_mass = 1492.45
turbine.generator_mass = 16699.85
turbine.bedplate_mass = 93090.6
turbine.yaw_system_mass = 11878.24
turbine.tower_mass = 434559.0
turbine.machine_rating = 5000.0
turbine.advanced = True
turbine.blade_number = 3
turbine.drivetrain_design = 'geared'
turbine.crane = True
turbine.offshore = True
turbine.year = 2010
turbine.month = 12
turbine.run()
print "The results for the NREL 5 MW Reference Turbine in an offshore 20 m water depth location are:"
print
print "Overall rotor cost with 3 advanced blades is ${0:.2f} USD".format(turbine.rotorCC.cost)
print "Blade cost is ${0:.2f} USD".format(turbine.rotorCC.bladeCC.cost)
print "Hub cost is ${0:.2f} USD".format(turbine.rotorCC.hubCC.cost)
print "Pitch system cost is ${0:.2f} USD".format(turbine.rotorCC.pitchSysCC.cost)
print "Spinner cost is ${0:.2f} USD".format(turbine.rotorCC.spinnerCC.cost)
print
print "Overall nacelle cost is ${0:.2f} USD".format(turbine.nacelleCC.cost)
print "LSS cost is ${0:.2f} USD".format(turbine.nacelleCC.lssCC.cost)
print "Main bearings cost is ${0:.2f} USD".format(turbine.nacelleCC.bearingsCC.cost)
print "Gearbox cost is ${0:.2f} USD".format(turbine.nacelleCC.gearboxCC.cost)
print "High speed side cost is ${0:.2f} USD".format(turbine.nacelleCC.hssCC.cost)
print "Generator cost is ${0:.2f} USD".format(turbine.nacelleCC.generatorCC.cost)
print "Bedplate cost is ${0:.2f} USD".format(turbine.nacelleCC.bedplateCC.cost)
print "Yaw system cost is ${0:.2f} USD".format(turbine.nacelleCC.yawSysCC.cost)
print
print "Tower cost is ${0:.2f} USD".format(turbine.towerCC.cost)
print
print "The overall turbine cost is ${0:.2f} USD".format(turbine.turbine_cost)
print
if __name__ == "__main__":
example()
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
from .managers import UserManager
class User(AbstractUser):
"""Custom User model for authentication with email as identifier."""
USERNAME_FIELD = "email"
REQUIRED_FIELDS = [f for f in AbstractUser.REQUIRED_FIELDS if f != "email"]
email = models.EmailField(_("email address"), unique=True)
first_name = models.CharField(_("first name"), max_length=150, blank=False)
last_name = models.CharField(_("last name"), max_length=150, blank=False)
username = None
invitation_completed_at = models.DateTimeField(
_("invitation completed at"), blank=True, null=True
)
is_lab_admin = models.BooleanField(_("is Euphrosyne admin"), default=False)
objects = UserManager()
def __str__(self):
return (
f"{self.last_name}, {self.first_name}<{self.email}>"
if (self.last_name and self.first_name)
else self.email
)
def delete(self, *_):
self.is_active = False
self.save()
class UserInvitation(User):
class Meta:
proxy = True
verbose_name = _("User invitation")
verbose_name_plural = _("User invitations")
def clean(self):
if not self.pk:
self.is_staff = True
|
import os
from typing import Any
from typing import cast
from typing import Dict
from typing import Iterable
from ...exceptions import InvalidFile
from ..plugins.util import get_plugins_from_file
def upgrade(baseline: Dict[str, Any]) -> None:
for function in [
_migrate_filters,
_rename_high_entropy_string_arguments,
_migrate_custom_plugins,
]:
function(baseline)
def _migrate_filters(baseline: Dict[str, Any]) -> None:
"""
In v1.0.0, we introduced the idea of `filters`. This consolidated a variety of different
false positive filters into a configurable layout. To reduce upgrade friction, this will
contain the default filters used before this version upgrade.
"""
baseline['filters_used'] = [
{
'path': 'detect_secrets.filters.allowlist.is_line_allowlisted',
},
{
'path': 'detect_secrets.filters.heuristic.is_sequential_string',
},
{
'path': 'detect_secrets.filters.heuristic.is_potential_uuid',
},
{
'path': 'detect_secrets.filters.heuristic.is_likely_id_string',
},
{
'path': 'detect_secrets.filters.heuristic.is_templated_secret',
},
{
'path': 'detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign',
},
{
'path': 'detect_secrets.filters.heuristic.is_indirect_reference',
},
{
'path': 'detect_secrets.filters.common.is_ignored_due_to_verification_policies',
# Hard-code this, just in case VerifiedResult enum values changes.
# This corresponds to VerifiedResult.UNVERIFIED
'min_level': 2,
},
]
if baseline.get('exclude'):
if baseline['exclude'].get('files'):
baseline['filters_used'].append({
'path': 'detect_secrets.filters.regex.should_exclude_file',
'pattern': [
baseline['exclude']['files'],
],
})
if baseline['exclude'].get('lines'):
baseline['filters_used'].append({
'path': 'detect_secrets.filters.regex.should_exclude_line',
'pattern': [
baseline['exclude']['lines'],
],
})
baseline.pop('exclude')
if baseline.get('word_list'):
if baseline['word_list']['file']:
baseline['filters_used'].append({
'path': 'detect_secrets.filters.wordlist.should_exclude_secret',
'min_length': 3,
'file_name': baseline['word_list']['file'],
'file_hash': baseline['word_list']['hash'],
})
baseline.pop('word_list')
def _rename_high_entropy_string_arguments(baseline: Dict[str, Any]) -> None:
"""
During the great refactor for v1.0.0, we also decided to rename these arguments for
consistency and simplicity.
"""
for plugin in baseline['plugins_used']:
if plugin['name'] == 'Base64HighEntropyString':
plugin['limit'] = plugin.pop('base64_limit')
elif plugin['name'] == 'HexHighEntropyString':
plugin['limit'] = plugin.pop('hex_limit')
# TODO: KeywordDetector?
def _migrate_custom_plugins(baseline: Dict[str, Any]) -> None:
if 'custom_plugin_paths' not in baseline:
return
for path in baseline['custom_plugin_paths']:
try:
# NOTE: We don't want to use `detect_secrets.core.plugins.initialize.from_file`
# since we don't want to *initialize* these plugins. That will pollute our global
# settings object. Instead, we're merely "parsing" this file, and applying changes
# as necessary.
custom_plugins = cast(Iterable, get_plugins_from_file(path))
except InvalidFile:
# Best effort upgrade. Don't break if invalid file.
continue
for plugin in custom_plugins:
baseline['plugins_used'].append({
'name': plugin.__name__,
'path': f'file://{os.path.abspath(path)}',
})
del baseline['custom_plugin_paths']
|
somaidade=0
madiaidade=0
maioridadehomem=0
nomevelho=''
totmulher20=0
for p in range(1, 5):
print('------{}°pessoa------'.format(p))
nome=str(input('Nome:')).strip()
idade=int(input('Idade:'))
sexo=str(input('Sexo[M/F]:')).strip().upper()
somaidade+=idade
if p==1 and sexo in 'M':
maioridadehomem=idade
nomevelho=nome
if sexo in 'Mm' and idade>maioridadehomem:
maioridadehomem=idade
nomevelho=nome
if sexo in 'Ff' and idade<21:
totmulher20+=1
mediaidade=somaidade/4
print('A média de idade do grupo é de {} anos'.format(mediaidade))
print('O homem mais velho tem {} anos e se chama {}.'.format(maioridadehomem, nomevelho))
print('Ao todo são {} mulheres com 20 anos.'.format(totmulher20))
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mpimg
img = cv2.imread('input/forest.jpeg')
cv2.imshow('foreste',img)
# Histograma da imagem em tons de cinza
plt.hist(img.ravel(),256,[0,256])
plt.xlabel('Tonalidade de Cinza')
plt.ylabel('Quantidade de Pixels')
plt.suptitle('Histograma de tons de cinza')
plt.show()
# Histograma da imagem colorida
histograma = cv2.calcHist(images=[img],channels=[0],mask=None,histSize=[256],ranges=[0,256])
cores = ('b','g','r')
for i, col in enumerate(cores):
histograma = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histograma,color = col)
plt.xlabel(f'Tonalidade de {col}')
plt.ylabel('Quantidade de Pixels')
plt.suptitle('Histograma dos canais RGB')
plt.xlim([0,256])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""This is an auto-generated file. Modify at your own risk"""
from typing import Awaitable, Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cripy import ConnectionType, SessionType
__all__ = ["Overlay"]
class Overlay:
"""
This domain provides various functionality related to drawing atop the inspected page.
Domain Dependencies:
* DOM
* Page
* Runtime
Status: Experimental
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay`
"""
__slots__ = ["client"]
def __init__(self, client: Union["ConnectionType", "SessionType"]) -> None:
"""Initialize a new instance of Overlay
:param client: The client instance to be used to communicate with the remote browser instance
"""
self.client: Union["ConnectionType", "SessionType"] = client
def disable(self) -> Awaitable[Dict]:
"""
Disables domain notifications.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-disable`
:return: The results of the command
"""
return self.client.send("Overlay.disable", {})
def enable(self) -> Awaitable[Dict]:
"""
Enables domain notifications.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-enable`
:return: The results of the command
"""
return self.client.send("Overlay.enable", {})
def getHighlightObjectForTest(self, nodeId: int) -> Awaitable[Dict]:
"""
For testing.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-getHighlightObjectForTest`
:param nodeId: Id of the node to get highlight object for.
:return: The results of the command
"""
return self.client.send("Overlay.getHighlightObjectForTest", {"nodeId": nodeId})
def hideHighlight(self) -> Awaitable[Dict]:
"""
Hides any highlight.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-hideHighlight`
:return: The results of the command
"""
return self.client.send("Overlay.hideHighlight", {})
def highlightFrame(
self,
frameId: str,
contentColor: Optional[Dict[str, Any]] = None,
contentOutlineColor: Optional[Dict[str, Any]] = None,
) -> Awaitable[Dict]:
"""
Highlights owner element of the frame with given id.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-highlightFrame`
:param frameId: Identifier of the frame to highlight.
:param contentColor: The content box highlight fill color (default: transparent).
:param contentOutlineColor: The content box highlight outline color (default: transparent).
:return: The results of the command
"""
msg = {"frameId": frameId}
if contentColor is not None:
msg["contentColor"] = contentColor
if contentOutlineColor is not None:
msg["contentOutlineColor"] = contentOutlineColor
return self.client.send("Overlay.highlightFrame", msg)
def highlightNode(
self,
highlightConfig: Dict[str, Any],
nodeId: Optional[int] = None,
backendNodeId: Optional[int] = None,
objectId: Optional[str] = None,
selector: Optional[str] = None,
) -> Awaitable[Dict]:
"""
Highlights DOM node with given id or with the given JavaScript object wrapper. Either nodeId or
objectId must be specified.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-highlightNode`
:param highlightConfig: A descriptor for the highlight appearance.
:param nodeId: Identifier of the node to highlight.
:param backendNodeId: Identifier of the backend node to highlight.
:param objectId: JavaScript object id of the node to be highlighted.
:param selector: Selectors to highlight relevant nodes.
:return: The results of the command
"""
msg = {"highlightConfig": highlightConfig}
if nodeId is not None:
msg["nodeId"] = nodeId
if backendNodeId is not None:
msg["backendNodeId"] = backendNodeId
if objectId is not None:
msg["objectId"] = objectId
if selector is not None:
msg["selector"] = selector
return self.client.send("Overlay.highlightNode", msg)
def highlightQuad(
self,
quad: List[Union[int, float]],
color: Optional[Dict[str, Any]] = None,
outlineColor: Optional[Dict[str, Any]] = None,
) -> Awaitable[Dict]:
"""
Highlights given quad. Coordinates are absolute with respect to the main frame viewport.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-highlightQuad`
:param quad: Quad to highlight
:param color: The highlight fill color (default: transparent).
:param outlineColor: The highlight outline color (default: transparent).
:return: The results of the command
"""
msg = {"quad": quad}
if color is not None:
msg["color"] = color
if outlineColor is not None:
msg["outlineColor"] = outlineColor
return self.client.send("Overlay.highlightQuad", msg)
def highlightRect(
self,
x: int,
y: int,
width: int,
height: int,
color: Optional[Dict[str, Any]] = None,
outlineColor: Optional[Dict[str, Any]] = None,
) -> Awaitable[Dict]:
"""
Highlights given rectangle. Coordinates are absolute with respect to the main frame viewport.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-highlightRect`
:param x: X coordinate
:param y: Y coordinate
:param width: Rectangle width
:param height: Rectangle height
:param color: The highlight fill color (default: transparent).
:param outlineColor: The highlight outline color (default: transparent).
:return: The results of the command
"""
msg = {"x": x, "y": y, "width": width, "height": height}
if color is not None:
msg["color"] = color
if outlineColor is not None:
msg["outlineColor"] = outlineColor
return self.client.send("Overlay.highlightRect", msg)
def setInspectMode(
self, mode: str, highlightConfig: Optional[Dict[str, Any]] = None
) -> Awaitable[Dict]:
"""
Enters the 'inspect' mode. In this mode, elements that user is hovering over are highlighted.
Backend then generates 'inspectNodeRequested' event upon element selection.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setInspectMode`
:param mode: Set an inspection mode.
:param highlightConfig: A descriptor for the highlight appearance of hovered-over nodes. May be omitted if `enabled
== false`.
:return: The results of the command
"""
msg = {"mode": mode}
if highlightConfig is not None:
msg["highlightConfig"] = highlightConfig
return self.client.send("Overlay.setInspectMode", msg)
def setShowAdHighlights(self, show: bool) -> Awaitable[Dict]:
"""
Highlights owner element of all frames detected to be ads.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowAdHighlights`
:param show: True for showing ad highlights
:return: The results of the command
"""
return self.client.send("Overlay.setShowAdHighlights", {"show": show})
def setPausedInDebuggerMessage(
self, message: Optional[str] = None
) -> Awaitable[Dict]:
"""
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setPausedInDebuggerMessage`
:param message: The message to display, also triggers resume and step over controls.
:return: The results of the command
"""
msg = {}
if message is not None:
msg["message"] = message
return self.client.send("Overlay.setPausedInDebuggerMessage", msg)
def setShowDebugBorders(self, show: bool) -> Awaitable[Dict]:
"""
Requests that backend shows debug borders on layers
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowDebugBorders`
:param show: True for showing debug borders
:return: The results of the command
"""
return self.client.send("Overlay.setShowDebugBorders", {"show": show})
def setShowFPSCounter(self, show: bool) -> Awaitable[Dict]:
"""
Requests that backend shows the FPS counter
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowFPSCounter`
:param show: True for showing the FPS counter
:return: The results of the command
"""
return self.client.send("Overlay.setShowFPSCounter", {"show": show})
def setShowPaintRects(self, result: bool) -> Awaitable[Dict]:
"""
Requests that backend shows paint rectangles
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowPaintRects`
:param result: True for showing paint rectangles
:return: The results of the command
"""
return self.client.send("Overlay.setShowPaintRects", {"result": result})
def setShowScrollBottleneckRects(self, show: bool) -> Awaitable[Dict]:
"""
Requests that backend shows scroll bottleneck rects
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowScrollBottleneckRects`
:param show: True for showing scroll bottleneck rects
:return: The results of the command
"""
return self.client.send("Overlay.setShowScrollBottleneckRects", {"show": show})
def setShowHitTestBorders(self, show: bool) -> Awaitable[Dict]:
"""
Requests that backend shows hit-test borders on layers
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowHitTestBorders`
:param show: True for showing hit-test borders
:return: The results of the command
"""
return self.client.send("Overlay.setShowHitTestBorders", {"show": show})
def setShowViewportSizeOnResize(self, show: bool) -> Awaitable[Dict]:
"""
Paints viewport size upon main frame resize.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#method-setShowViewportSizeOnResize`
:param show: Whether to paint size or not.
:return: The results of the command
"""
return self.client.send("Overlay.setShowViewportSizeOnResize", {"show": show})
def inspectNodeRequested(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Fired when the node should be inspected. This happens after call to `setInspectMode` or when
user manually inspects an element.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#event-inspectNodeRequested`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Overlay.inspectNodeRequested"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def nodeHighlightRequested(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Fired when the node should be highlighted. This happens after call to `setInspectMode`.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#event-nodeHighlightRequested`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Overlay.nodeHighlightRequested"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def screenshotRequested(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Fired when user asks to capture screenshot of some area on the page.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#event-screenshotRequested`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Overlay.screenshotRequested"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def inspectModeCanceled(
self, listener: Optional[Callable[[Any], Any]] = None
) -> Any:
"""
Fired when user cancels the inspect mode.
See `https://chromedevtools.github.io/devtools-protocol/tot/Overlay#event-inspectModeCanceled`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Overlay.inspectModeCanceled"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
|
import pandas as pd
import matplotlib.pyplot as plt
from math import ceil
def event_count_plot(qa_data_path, event_name,**kwargs):
event_data_path = qa_data_path + '_' + event_name + '.csv'
qa_data_df=pd.read_csv(event_data_path)
plt.figure(figsize=(6, 4))
plt.plot('event_date', 'n_event', data=qa_data_df, marker='', color='black', linewidth=2)
plt.ylim(0, ceil(1.1 * qa_data_df['n_event'].dropna().max()))
plt.title('{} event count'.format(event_name))
plt.gca().figure.autofmt_xdate()
plt.xticks(list(filter(lambda x:x.endswith(("01")),qa_data_df['event_date'].tolist())))
plt.tight_layout()
save_to_path=event_data_path.replace('.csv', '_' + event_name + '_event_qa.png')
print('Saving metric qa plot to ' + save_to_path)
plt.savefig(save_to_path)
plt.close()
|
"""
Author: Rayla Kurosaki
File: phase4_print_to_workbook.py
Description: This file contains the functionality to print the student's
transcript on a Microsoft Excel Workbook in a pretty and easy to
read format.
"""
from openpyxl import styles
from openpyxl.styles import Alignment, Font
from openpyxl.utils import get_column_letter
from openpyxl.utils.cell import column_index_from_string
from openpyxl.utils.cell import coordinate_from_string
import copy
from os.path import exists as file_exists
import __utils__ as utils
CYAN = "00FFFF"
PINK = "FFDDF4"
def get_col_width(path):
"""
Compute the column widths of the soon-to-be-deleted output file.
:param path: Path to the soon-to-be-deleted output file.
:return: The column widths of the soon-to-be-deleted output file.
"""
if not file_exists(path):
return None
else:
n = 10
col_widths = []
workbook = utils.get_workbook(path)
worksheet = utils.get_worksheet(workbook, "Transcript")
for i in range(1, utils.get_max_cols(worksheet) + 1):
col_letter = get_column_letter(i)
col_widths.append(worksheet.column_dimensions[col_letter].width)
pass
return col_widths
pass
def init_new_workbook(col_widths):
"""
Initializes a new Microsoft Excel Workbook. If there is already an output
Excel file, it will use the column widths from the previous file and
apply the column widths to the new Excel file.
:param col_widths: The size of each column from the current Microsoft
Excel Workbook.
:return: A new Microsoft Excel Workbook.
"""
workbook = utils.create_workbook()
worksheet_name = "Transcript"
utils.create_new_worksheet(workbook, worksheet_name)
worksheet = utils.get_worksheet(workbook, worksheet_name)
if col_widths is not None:
for i, col_width in enumerate(col_widths):
col_letter = get_column_letter(i + 1)
worksheet.column_dimensions[col_letter].width = col_width
pass
pass
utils.delete_worksheet(workbook, "Sheet")
return workbook
def add_basic_info(workbook, student):
"""
Adds the student's basic information to the Excel file.
:param workbook: Excel Workbook to add data to.
:param student: The student to extract the data from.
"""
font = Font(bold=True, underline="single")
fill_cyan = styles.PatternFill(
start_color=CYAN, end_color=CYAN, fill_type="solid"
)
fill_pink = styles.PatternFill(
start_color=PINK, end_color=PINK, fill_type="solid"
)
center = Alignment(horizontal="center")
worksheet = utils.get_worksheet(workbook, "Transcript")
set_cols_space = ["A", "B", "I", "J"]
for col in set_cols_space:
cell_loc = f"{col}1"
utils.update_cell_value(worksheet, cell_loc, "llll")
cell = utils.get_cell(worksheet, cell_loc)
cell.font = Font(color="FFFFFF")
utils.update_cell_value(worksheet, "C3", "Basic Information")
utils.merge_cells(worksheet, "C3", "H3")
cell = utils.get_cell(worksheet, "C3")
cell.font = font
cell.fill = fill_cyan
cell.alignment = center
utils.update_cell_value(worksheet, "C4", "Name")
utils.merge_cells(worksheet, "C4", "D4")
utils.update_cell_value(worksheet, "E4", student.get_name())
utils.merge_cells(worksheet, "E4", "H4")
utils.update_cell_value(worksheet, "C5", "Cumulative GPA")
utils.merge_cells(worksheet, "C5", "D5")
utils.update_cell_value(worksheet, "E5", student.get_gpa())
utils.merge_cells(worksheet, "E5", "H5")
utils.apply_thick_border_style(worksheet, "C3:H5")
utils.update_cell_value(worksheet, "C6", "Major(s)/Minor(s)")
utils.merge_cells(worksheet, "C6", "D6")
cell = utils.get_cell(worksheet, "C6")
cell.font = font
cell.fill = fill_cyan
utils.update_cell_value(worksheet, "E6", "Degree")
utils.merge_cells(worksheet, "E6", "F6")
cell = utils.get_cell(worksheet, "E6")
cell.font = font
cell.fill = fill_cyan
utils.update_cell_value(worksheet, "G6", "GPA")
utils.merge_cells(worksheet, "G6", "H6")
cell = utils.get_cell(worksheet, "G6")
cell.font = font
cell.fill = fill_cyan
columns = ["C", "E", "G"]
data = []
for major in student.majors.values():
data.append([major.get_name(), major.get_degree(), major.get_gpa()])
pass
for minor in student.minors.values():
data.append([minor.get_name(), "Minor", minor.get_gpa()])
pass
for i, data_row in enumerate(data):
r = i + 7
for row, col in zip(data_row, columns):
cell_loc_1 = f"{col}{r}"
xy = coordinate_from_string(cell_loc_1)
col = get_column_letter(column_index_from_string(xy[0]) + 1)
cell_loc_2 = f"{col}{r}"
utils.update_cell_value(worksheet, cell_loc_1, row)
utils.merge_cells(worksheet, cell_loc_1, cell_loc_2)
pass
pass
utils.apply_thick_border_style(worksheet, f"C6:H{r}")
utils.merge_cells(worksheet, "C2", "H2")
cell = utils.get_cell(worksheet, "C2")
cell.fill = fill_pink
utils.merge_cells(worksheet, "B2", f"B{r + 1}")
cell = utils.get_cell(worksheet, "B2")
cell.fill = fill_pink
utils.merge_cells(worksheet, "I2", f"I{r + 1}")
cell = utils.get_cell(worksheet, "I2")
cell.fill = fill_pink
utils.merge_cells(worksheet, f"C{r + 1}", f"H{r + 1}")
cell = utils.get_cell(worksheet, f"C{r + 1}")
cell.fill = fill_pink
utils.apply_thick_border_style(worksheet, f"B2:I{r + 1}")
pass
def get_data(student):
"""
Gets the data needed to print on the Excel Workbook.
:param student: The student to extract the data from.
:return: Course and GPA data.
"""
course_data, gpa_data = {}, {}
term = ""
lst = []
attempted_term, earned_term, units_term, points_term = 0, 0, 0, 0
attempted_cum, earned_cum, units_cum, points_cum = 0, 0, 0, 0
for i, course in enumerate(student.get_courses()):
course_term = course.get_term()
if course_term != term:
if i == 0:
lst.append([
course.get_id(), course.get_name(), course.get_credit(),
course.get_earned_credit(), course.get_final_grade(),
course.get_points()
])
attempted_term += course.get_credit()
earned_term += course.get_earned_credit()
units_term += course.get_units()
points_term += course.get_points()
attempted_cum += course.get_credit()
earned_cum += course.get_earned_credit()
units_cum += course.get_units()
points_cum += course.get_points()
pass
else:
years, season = term.split(" ")
course_data[f"{season} Term: {years}"] = lst
gpa_data[f"{season} Term: {years}"] = [
["Term GPA", points_term / units_term, attempted_term,
earned_term, units_term, points_term],
["Cum GPA", points_cum / units_cum, attempted_cum,
earned_cum, units_cum, points_cum],
]
lst = [[course.get_id(), course.get_name(),
course.get_credit(), course.get_earned_credit(),
course.get_final_grade(), course.get_points()]]
attempted_term = course.get_credit()
earned_term = course.get_earned_credit()
units_term = course.get_units()
points_term = course.get_points()
attempted_cum += course.get_credit()
earned_cum += course.get_earned_credit()
units_cum += course.get_units()
points_cum += course.get_points()
pass
pass
term = course_term
else:
lst.append([
course.get_id(), course.get_name(), course.get_credit(),
course.get_earned_credit(), course.get_final_grade(),
course.get_points()
])
attempted_term += course.get_credit()
earned_term += course.get_earned_credit()
units_term += course.get_units()
points_term += course.get_points()
attempted_cum += course.get_credit()
earned_cum += course.get_earned_credit()
units_cum += course.get_units()
points_cum += course.get_points()
pass
pass
years, season = term.split(" ")
course_data[f"{season} Term: {years}"] = lst
gpa_data[f"{season} Term: {years}"] = [
["Term GPA", points_term / units_term, attempted_term, earned_term,
units_term, points_term],
["Cum GPA", points_cum / units_cum, attempted_cum, earned_cum,
units_cum, points_cum],
]
return course_data, gpa_data
def add_transcript_data(workbook, student):
"""
Adds the student's course and GPA data to the Excel Workbook.
:param workbook: The Microsoft Excel Workbook to add data to.
:param student: THe student to extract the data from.
"""
font = Font(bold=True, underline="single")
fill_cyan = styles.PatternFill(
start_color=CYAN, end_color=CYAN, fill_type="solid"
)
fill_pink = styles.PatternFill(
start_color=PINK, end_color=PINK, fill_type="solid"
)
center = Alignment(horizontal="center")
worksheet = utils.get_worksheet(workbook, "Transcript")
r = 9 + len(student.get_majors()) + len(student.get_minors())
course_data, gpa_data = get_data(student)
header = ["Course", "Description", "Attempted", "Earned", "Grade",
"Points"]
old_r = copy.deepcopy(r)
for term, data_set in course_data.items():
term_gpa, cum_gpa = gpa_data[term]
cell_loc = f"C{r}"
utils.update_cell_value(worksheet, cell_loc, term)
cell = utils.get_cell(worksheet, cell_loc)
cell.alignment = center
r += 1
for j in range(3, 8 + 1):
cell_loc = f"{get_column_letter(j)}{r}"
utils.update_cell_value(worksheet, cell_loc, header[j - 3])
cell = utils.get_cell(worksheet, cell_loc)
cell.font = font
cell.fill = fill_cyan
pass
r += 1
for course_data in data_set:
for j in range(3, 8 + 1):
cell_loc = f"{get_column_letter(j)}{r}"
val = course_data[j - 3]
utils.update_cell_value(worksheet, cell_loc, val)
if type(val) == float:
cell = utils.get_cell(worksheet, cell_loc)
cell.number_format = '0.000'
pass
pass
r += 1
pass
cell_loc = f"C{old_r + 1}:H{r - 1}"
utils.apply_thick_border_style(worksheet, cell_loc)
for j in range(3, 8 + 1):
cell_loc = f"{get_column_letter(j)}{r}"
utils.update_cell_value(worksheet, cell_loc, term_gpa[j - 3])
cell = utils.get_cell(worksheet, cell_loc)
cell.fill = fill_cyan
if (j - 3 == 1) or (j - 3 == 5):
cell.number_format = '0.000'
pass
pass
cell_loc = f"C{r}:H{r}"
utils.apply_thick_border_style(worksheet, cell_loc)
r += 1
for j in range(3, 8 + 1):
cell_loc = f"{get_column_letter(j)}{r}"
utils.update_cell_value(worksheet, cell_loc, cum_gpa[j - 3])
cell = utils.get_cell(worksheet, cell_loc)
cell.fill = fill_cyan
if (j - 3 == 1) or (j - 3 == 5):
cell.number_format = '0.000'
pass
pass
cell_loc = f"C{r}:H{r}"
utils.apply_thick_border_style(worksheet, cell_loc)
r += 1
utils.merge_cells(worksheet, f"B{old_r}", f"B{r}")
cell = utils.get_cell(worksheet, f"B{old_r}")
cell.fill = fill_pink
utils.merge_cells(worksheet, f"I{old_r}", f"I{r}")
cell = utils.get_cell(worksheet, f"I{old_r}")
cell.fill = fill_pink
utils.merge_cells(worksheet, f"C{old_r}", f"H{old_r}")
cell = utils.get_cell(worksheet, f"C{old_r}")
cell.fill = fill_pink
utils.merge_cells(worksheet, f"C{r}", f"H{r}")
cell = utils.get_cell(worksheet, f"C{r}")
cell.fill = fill_pink
cell_range = f"B{old_r}:I{r}"
utils.apply_thick_border_style(worksheet, cell_range)
r += 2
old_r = copy.deepcopy(r)
pass
pass
def phase4_main(student, filename):
"""
The driver function to print the student's transcript onto a Microsoft
Excel Workbook.
:param student: The student to manipulate.
:param filename: Name of the file.
"""
path = f"../output/{filename}.xlsx"
col_widths = get_col_width(path)
print(col_widths)
workbook = init_new_workbook(col_widths)
add_basic_info(workbook, student)
add_transcript_data(workbook, student)
utils.save_workbook(workbook, path)
pass
|
import fastapi
from app.schemas.health import HealthOutSchema
router = fastapi.APIRouter()
@router.get("/", response_model=HealthOutSchema)
def health():
return HealthOutSchema(status="OK")
|
import discord
import sys
import os
import io
import asyncio
import json
from discord.ext import commands
class Mod:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_permissions(administrator = True)
async def msg(self, ctx, user: discord.Member, *, msg: str):
"""Message someone as me!"""
try:
await user.send(msg)
await ctx.message.delete()
await ctx.send("The message was sent")
except commands.MissingPermissions:
await ctx.send("rip. you dont have enough perms. xd")
except:
await ctx.send(":x: Format: +msg (user tag) (messgae)")
@commands.command()
@commands.has_permissions(manage_messages = True)
async def purge(self, ctx, num: int):
"""Deletes messages. +purge [number]"""
try:
if num is None:
await ctx.send("+purge [number]")
else:
try:
float(num)
except ValueError:
return await ctx.send("The number is invalid. Make sure its a number! +purge [number]")
await ctx.channel.purge(limit=num+1)
msg = await ctx.send("Done. ( ͡° ͜ʖ ͡°) ", delete_after=4)
except discord.Forbidden:
await ctx.send("I don't have **Manage Messages** permission.")
except commands.errors.MissingPermissions:
await ctx.send("Cant delete messages without perms.")
@commands.command()
@commands.has_permissions(kick_members = True)
async def kick(self, ctx, user: discord.Member):
"""Kicks a member out of your server."""
try:
await user.kick()
await ctx.channel.send(f"The administrator is putting their boot on. They kick the boot into {user.mention}'s bottom. {user.mention} has been kicked. ")
except discord.Forbidden:
await ctx.send("00F! I need the **Kick Members** permission.")
except discord.ext.commands.MissingPermissions:
await ctx.send("Can't kick people without permissions.")
@commands.command()
@commands.has_permissions(ban_members = True)
async def ban(self, ctx, user: discord.Member):
"""Ban a member out of your server."""
try:
await user.ban()
await ctx.channel.send(f"The ban hammer has been swung at {user.mention}!")
except discord.Forbidden:
await ctx.send("00F! I need the **Ban Members or Manage Members** permission.")
except discord.ext.commands.MissingPermissions:
await ctx.send("Can't ban people without permissions.")
@commands.has_permissions(ban_members=True)
@commands.command(aliases=['hban'], pass_context=True)
async def hackban(self, ctx, user_id: int):
"""Ban a user outside of your server."""
author = ctx.message.author
guild = author.guild
user = guild.get_member(user_id)
if user is not None:
return await ctx.invoke(self.ban, user=user)
try:
await self.bot.http.ban(user_id, guild.id, 0)
await ctx.message.edit(content=self.bot.bot_prefix + 'Banned user: %s' % user_id)
except discord.NotFound:
await ctx.message.edit(content=self.bot.bot_prefix + 'Could not find user. '
'Invalid ID was given.')
except discord.errors.Forbidden:
await ctx.message.edit(content=self.bot.bot_prefix + '00F! I need *Ban Members or Manage Memmbers**')
@commands.command()
@commands.has_permissions(kick_members=True)
async def mute(self, ctx, user: discord.Member = None):
"""Mute a member"""
if user is None:
return await ctx.send("Please tag that annoying member to mute them!")
try:
await ctx.channel.set_permissions(user, send_messages=False)
return await ctx.send(f"Lol {user.mention} just got muted. ")
except commands.errors.MissingPermissions:
return await ctx.send("You dont have enought permissions.")
except discord.Forbidden:
return await ctx.send("00F! I need the **Manage Channel** permission.")
@commands.command()
@commands.has_permissions(kick_members=True)
async def unmute(self, ctx, user: discord.Member = None):
"""Unmute a member"""
if user is None:
return await ctx.send("Please tag the user in order to unmute them")
try:
await ctx.channel.set_permissions(user, send_messages=True)
return await ctx.send(f"Times up, {user.mention}. You just got unmuted.")
except commands.errors.MissingPermissions:
return await ctx.send(" Cant unmute people with out perms")
except discord.Forbidden:
return await ctx.send("00F! I need the **Manage Channel** permission.")
def setup(bot):
bot.add_cog(Mod(bot))
|
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import time
from importlib import import_module
from typing import Callable, List, Optional
import click
from rich.markdown import Markdown
from zenml.cli import utils as cli_utils
from zenml.cli.cli import cli
from zenml.console import console
from zenml.enums import StackComponentType
from zenml.io import fileio
from zenml.repository import Repository
from zenml.stack import StackComponent
def _component_display_name(
component_type: StackComponentType, plural: bool = False
) -> str:
"""Human-readable name for a stack component."""
name = component_type.plural if plural else component_type.value
return name.replace("_", " ")
def _get_stack_component(
component_type: StackComponentType,
component_name: Optional[str] = None,
) -> StackComponent:
"""Gets a stack component for a given type and name.
Args:
component_type: Type of the component to get.
component_name: Name of the component to get. If `None`, the
component of the active stack gets returned.
Returns:
A stack component of the given type.
Raises:
KeyError: If no stack component is registered for the given name.
"""
repo = Repository()
if component_name:
return repo.get_stack_component(component_type, name=component_name)
component = repo.active_stack.components[component_type]
cli_utils.declare(
f"No component name given, using `{component.name}` "
f"from active stack."
)
return component
def generate_stack_component_get_command(
component_type: StackComponentType,
) -> Callable[[], None]:
"""Generates a `get` command for the specific stack component type."""
def get_stack_component_command() -> None:
"""Prints the name of the active component."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
active_stack = Repository().active_stack
component = active_stack.components.get(component_type, None)
display_name = _component_display_name(component_type)
if component:
cli_utils.declare(f"Active {display_name}: '{component.name}'")
else:
cli_utils.warning(
f"No {display_name} set for active stack ('{active_stack.name}')."
)
return get_stack_component_command
def generate_stack_component_describe_command(
component_type: StackComponentType,
) -> Callable[[Optional[str]], None]:
"""Generates a `describe` command for the specific stack component type."""
@click.argument(
"name",
type=str,
required=False,
)
def describe_stack_component_command(name: Optional[str]) -> None:
"""Prints details about the active/specified component."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
singular_display_name = _component_display_name(component_type)
plural_display_name = _component_display_name(
component_type, plural=True
)
repo = Repository()
components = repo.get_stack_components(component_type)
if len(components) == 0:
cli_utils.warning(f"No {plural_display_name} registered.")
return
try:
component = _get_stack_component(
component_type, component_name=name
)
except KeyError:
if name:
cli_utils.warning(
f"No {singular_display_name} found for name '{name}'."
)
else:
cli_utils.warning(
f"No {singular_display_name} in active stack."
)
return
try:
active_component_name = repo.active_stack.components[
component_type
].name
is_active = active_component_name == component.name
except KeyError:
# there is no component of this type in the active stack
is_active = False
cli_utils.print_stack_component_configuration(
component, singular_display_name, is_active
)
return describe_stack_component_command
def generate_stack_component_list_command(
component_type: StackComponentType,
) -> Callable[[], None]:
"""Generates a `list` command for the specific stack component type."""
def list_stack_components_command() -> None:
"""Prints a table of stack components."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
repo = Repository()
components = repo.get_stack_components(component_type)
display_name = _component_display_name(component_type, plural=True)
if len(components) == 0:
cli_utils.warning(f"No {display_name} registered.")
return
try:
active_component_name = repo.active_stack.components[
component_type
].name
except KeyError:
active_component_name = None
cli_utils.print_stack_component_list(
components, active_component_name=active_component_name
)
return list_stack_components_command
def generate_stack_component_register_command(
component_type: StackComponentType,
) -> Callable[[str, str, List[str]], None]:
"""Generates a `register` command for the specific stack component type."""
display_name = _component_display_name(component_type)
@click.argument(
"name",
type=str,
required=True,
)
@click.option(
"--type",
"-t",
"flavor",
help=f"The type of the {display_name} to register.",
required=True,
type=str,
)
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
def register_stack_component_command(
name: str, flavor: str, args: List[str]
) -> None:
"""Registers a stack component."""
cli_utils.print_active_profile()
try:
parsed_args = cli_utils.parse_unknown_options(args)
except AssertionError as e:
cli_utils.error(str(e))
return
from zenml.stack.stack_component_class_registry import (
StackComponentClassRegistry,
)
component_class = StackComponentClassRegistry.get_class(
component_type=component_type, component_flavor=flavor
)
component = component_class(name=name, **parsed_args)
Repository().register_stack_component(component)
cli_utils.declare(f"Successfully registered {display_name} `{name}`.")
return register_stack_component_command
def generate_stack_component_delete_command(
component_type: StackComponentType,
) -> Callable[[str], None]:
"""Generates a `delete` command for the specific stack component type."""
@click.argument("name", type=str)
def delete_stack_component_command(name: str) -> None:
"""Deletes a stack component."""
cli_utils.print_active_profile()
Repository().deregister_stack_component(
component_type=component_type,
name=name,
)
display_name = _component_display_name(component_type)
cli_utils.declare(f"Deleted {display_name}: {name}")
return delete_stack_component_command
def generate_stack_component_up_command(
component_type: StackComponentType,
) -> Callable[[Optional[str]], None]:
"""Generates a `up` command for the specific stack component type."""
@click.argument("name", type=str, required=False)
def up_stack_component_command(name: Optional[str] = None) -> None:
"""Deploys a stack component locally."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
component = _get_stack_component(component_type, component_name=name)
display_name = _component_display_name(component_type)
if component.is_running:
cli_utils.declare(
f"Local deployment is already running for {display_name} "
f"'{component.name}'."
)
return
if not component.is_provisioned:
cli_utils.declare(
f"Provisioning local resources for {display_name} "
f"'{component.name}'."
)
try:
component.provision()
except NotImplementedError:
cli_utils.error(
f"Provisioning local resources not implemented for "
f"{display_name} '{component.name}'."
)
if not component.is_running:
cli_utils.declare(
f"Resuming local resources for {display_name} "
f"'{component.name}'."
)
component.resume()
return up_stack_component_command
def generate_stack_component_down_command(
component_type: StackComponentType,
) -> Callable[[Optional[str], bool], None]:
"""Generates a `down` command for the specific stack component type."""
@click.argument("name", type=str, required=False)
@click.option(
"--force",
"-f",
is_flag=True,
help="Deprovisions local resources instead of suspending them.",
)
def down_stack_component_command(
name: Optional[str] = None, force: bool = False
) -> None:
"""Stops/Tears down the local deployment of a stack component."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
component = _get_stack_component(component_type, component_name=name)
display_name = _component_display_name(component_type)
if component.is_running and not force:
cli_utils.declare(
f"Suspending local resources for {display_name} "
f"'{component.name}'."
)
try:
component.suspend()
except NotImplementedError:
cli_utils.error(
f"Provisioning local resources not implemented for "
f"{display_name} '{component.name}'. If you want to "
f"deprovision all resources for this component, use the "
f"`--force/-f` flag."
)
elif component.is_provisioned and force:
cli_utils.declare(
f"Deprovisioning resources for {display_name} "
f"'{component.name}'."
)
component.deprovision()
else:
cli_utils.declare(
f"No provisioned resources found for {display_name} "
f"'{component.name}'."
)
return down_stack_component_command
def generate_stack_component_logs_command(
component_type: StackComponentType,
) -> Callable[[Optional[str], bool], None]:
"""Generates a `logs` command for the specific stack component type."""
@click.argument("name", type=str, required=False)
@click.option(
"--follow",
"-f",
is_flag=True,
help="Follow the log file instead of just displaying the current logs.",
)
def stack_component_logs_command(
name: Optional[str] = None, follow: bool = False
) -> None:
"""Displays stack component logs."""
cli_utils.print_active_profile()
cli_utils.print_active_stack()
component = _get_stack_component(component_type, component_name=name)
display_name = _component_display_name(component_type)
log_file = component.log_file
if not log_file or not fileio.exists(log_file):
cli_utils.warning(
f"Unable to find log file for {display_name} "
f"'{component.name}'."
)
return
if follow:
try:
with open(log_file, "r") as f:
# seek to the end of the file
f.seek(0, 2)
while True:
line = f.readline()
if not line:
time.sleep(0.1)
continue
line = line.rstrip("\n")
click.echo(line)
except KeyboardInterrupt:
cli_utils.declare(f"Stopped following {display_name} logs.")
else:
with open(log_file, "r") as f:
click.echo(f.read())
return stack_component_logs_command
def generate_stack_component_explain_command(
component_type: StackComponentType,
) -> Callable[[], None]:
"""Generates an `explain` command for the specific stack component type."""
def explain_stack_components_command() -> None:
"""Explains the concept of the stack component."""
component_module = import_module(f"zenml.{component_type.plural}")
if component_module.__doc__ is not None:
md = Markdown(component_module.__doc__)
console.print(md)
else:
console.print(
"The explain subcommand is yet not available for "
"this stack component. For more information, you can "
"visit our docs page: https://docs.zenml.io/ and "
"stay tuned for future releases."
)
return explain_stack_components_command
def register_single_stack_component_cli_commands(
component_type: StackComponentType, parent_group: click.Group
) -> None:
"""Registers all basic stack component CLI commands."""
command_name = component_type.value.replace("_", "-")
singular_display_name = _component_display_name(component_type)
plural_display_name = _component_display_name(component_type, plural=True)
@parent_group.group(
command_name, help=f"Commands to interact with {plural_display_name}."
)
def command_group() -> None:
"""Group commands for a single stack component type."""
# zenml stack-component get
get_command = generate_stack_component_get_command(component_type)
command_group.command(
"get", help=f"Get the name of the active {singular_display_name}."
)(get_command)
# zenml stack-component describe
describe_command = generate_stack_component_describe_command(component_type)
command_group.command(
"describe",
help=f"Show details about the (active) {singular_display_name}.",
)(describe_command)
# zenml stack-component list
list_command = generate_stack_component_list_command(component_type)
command_group.command(
"list", help=f"List all registered {plural_display_name}."
)(list_command)
# zenml stack-component register
register_command = generate_stack_component_register_command(component_type)
context_settings = {"ignore_unknown_options": True}
command_group.command(
"register",
context_settings=context_settings,
help=f"Register a new {singular_display_name}.",
)(register_command)
# zenml stack-component delete
delete_command = generate_stack_component_delete_command(component_type)
command_group.command(
"delete", help=f"Delete a registered {singular_display_name}."
)(delete_command)
# zenml stack-component up
up_command = generate_stack_component_up_command(component_type)
command_group.command(
"up",
help=f"Provisions or resumes local resources for the {singular_display_name} if possible.",
)(up_command)
# zenml stack-component down
down_command = generate_stack_component_down_command(component_type)
command_group.command(
"down",
help=f"Suspends resources of the local {singular_display_name} deployment.",
)(down_command)
# zenml stack-component logs
logs_command = generate_stack_component_logs_command(component_type)
command_group.command(
"logs", help=f"Display {singular_display_name} logs."
)(logs_command)
# zenml stack-component explain
explain_command = generate_stack_component_explain_command(component_type)
command_group.command(
"explain", help=f"Explaining the {plural_display_name}."
)(explain_command)
def register_all_stack_component_cli_commands() -> None:
"""Registers CLI commands for all stack components."""
for component_type in StackComponentType:
register_single_stack_component_cli_commands(
component_type, parent_group=cli
)
register_all_stack_component_cli_commands()
|
try:
import _persistence_module
except ImportError as err:
class _persistence_module:
@staticmethod
def run_persistence_operation(persistence_op_type, protocol_buffer, save_slot_id, callback):
callback(save_slot_id, False)
return False
class PersistenceOpType:
kPersistenceOpInvalid = 0
kPersistenceOpLoad = 1
kPersistenceOpSave = 2
kPersistenceOpLoadZoneObjects = 3
kPersistenceOpSaveZoneObjects = 4
kPersistenceOpSaveGameplayGlobalData = 5
kPersistenceOpLoadGameplayGlobalData = 6
kPersistenceOpSaveHousehold = 1000
run_persistence_operation = _persistence_module.run_persistence_operation
|
from tests.compat import unittest, mock
from tests.test_search_exact import TestSearchExactBase
from tests.test_substitutions_only import TestSubstitionsOnlyBase
from tests.test_levenshtein import TestFindNearMatchesLevenshteinBase
from fuzzysearch import find_near_matches, Match
from fuzzysearch.common import FuzzySearchBase
class MockSearchClassFailsUnlessDefined(FuzzySearchBase):
UNDEFINED = object()
def __init__(self):
self.return_value = self.UNDEFINED
self.call_count = 0
self.call_args = None
def search(self, *args, **kwargs):
self.call_count += 1
self.call_args = (args, kwargs)
if self.return_value is self.UNDEFINED:
raise Exception('Undefined mock function called!')
else:
return self.return_value
class TestFindNearMatches(unittest.TestCase):
def patch_concrete_search_classes(self):
self.mock_search_exact = MockSearchClassFailsUnlessDefined()
self.mock_find_near_matches_levenshtein = \
MockSearchClassFailsUnlessDefined()
self.mock_find_near_matches_substitutions = \
MockSearchClassFailsUnlessDefined()
self.mock_find_near_matches_generic = \
MockSearchClassFailsUnlessDefined()
patcher = mock.patch.multiple(
'fuzzysearch',
ExactSearch=self.mock_search_exact,
LevenshteinSearch=
self.mock_find_near_matches_levenshtein,
SubstitutionsOnlySearch=
self.mock_find_near_matches_substitutions,
GenericSearch=
self.mock_find_near_matches_generic,
)
self.addCleanup(patcher.stop)
patcher.start()
def test_no_limitations(self):
with self.assertRaises(Exception):
find_near_matches('a', 'a')
def test_unlimited_parameter(self):
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_substitutions=1)
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_insertions=1)
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_deletions=1)
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_substitutions=1, max_insertions=1)
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_substitutions=1, max_deletions=1)
with self.assertRaises(Exception):
find_near_matches('a', 'a', max_insertions=1, max_deletions=1)
def test_all_zero(self):
self.patch_concrete_search_classes()
self.mock_search_exact.return_value = [Match(42, 43, 0, 'x')]
self.assertEqual(
find_near_matches('a', 'a', 0, 0, 0, 0),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(self.mock_search_exact.call_count, 1)
def test_zero_max_l_dist(self):
self.patch_concrete_search_classes()
self.mock_search_exact.return_value = [Match(42, 43, 0, 'x')]
call_count = 0
for (max_subs, max_ins, max_dels) in [
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(1, 1, 0),
(1, 0, 1),
(0, 1, 1),
(1, 1, 1),
]:
with self.subTest('max_l_dist=0, max_subs={0}, max_ins={1}, max_dels={2}'.format(
max_subs, max_ins, max_dels)):
self.assertEqual(
find_near_matches('a', 'a', max_subs, max_ins, max_dels, 0),
[Match(42, 43, 0, 'x')],
)
call_count += 1
self.assertEqual(self.mock_search_exact.call_count, call_count)
def test_all_zero_except_max_l_dist(self):
self.patch_concrete_search_classes()
self.mock_search_exact.return_value = [Match(42, 43, 0, 'x')]
self.assertEqual(
find_near_matches('a', 'a', 0, 0, 0, 1),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(self.mock_search_exact.call_count, 1)
def test_all_none_except_max_l_dist(self):
self.patch_concrete_search_classes()
self.mock_find_near_matches_levenshtein.return_value = \
[Match(42, 43, 0, 'x')]
self.assertEqual(
find_near_matches('a', 'a', max_l_dist=1),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(self.mock_find_near_matches_levenshtein.call_count, 1)
def test_levenshtein(self):
"""test cases where 0 < max_l_dist <= max(others)"""
# in these cases, find_near_matches should call
# find_near_matches_levenshtein
self.patch_concrete_search_classes()
self.mock_find_near_matches_levenshtein.return_value = \
[mock.sentinel.SENTINEL]
self.assertEqual(
find_near_matches('a', 'a', 1, 1, 1, 1),
[mock.sentinel.SENTINEL],
)
self.assertEqual(self.mock_find_near_matches_levenshtein.call_count, 1)
self.assertEqual(
find_near_matches('a', 'a', 2, 2, 2, 2),
[mock.sentinel.SENTINEL],
)
self.assertEqual(self.mock_find_near_matches_levenshtein.call_count, 2)
self.assertEqual(
find_near_matches('a', 'a', 5, 3, 7, 2),
[mock.sentinel.SENTINEL],
)
self.assertEqual(self.mock_find_near_matches_levenshtein.call_count, 3)
def test_only_substitutions(self):
self.patch_concrete_search_classes()
self.mock_find_near_matches_substitutions.return_value = \
[Match(42, 43, 0, 'x')]
self.assertEqual(
find_near_matches('a', 'a', 1, 0, 0),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(
self.mock_find_near_matches_substitutions.call_count,
1,
)
self.assertEqual(
find_near_matches('a', 'a', 1, 0, 0, 1),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(
self.mock_find_near_matches_substitutions.call_count,
2,
)
def test_generic(self):
self.patch_concrete_search_classes()
self.mock_find_near_matches_generic.return_value = \
[Match(42, 43, 0, 'x')]
self.assertEqual(
find_near_matches('a', 'a', 1, 1, 1),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(
self.mock_find_near_matches_generic.call_count,
1,
)
self.assertEqual(
find_near_matches('a', 'a', 1, 1, 1, 2),
[Match(42, 43, 0, 'x')],
)
self.assertEqual(
self.mock_find_near_matches_generic.call_count,
2,
)
class TestFindNearMatchesAsLevenshtein(TestFindNearMatchesLevenshteinBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_l_dist):
return find_near_matches(subsequence, sequence, max_l_dist=max_l_dist)
class TestFindNearMatchesAsSearchExact(TestSearchExactBase,
unittest.TestCase):
def search(self, subsequence, sequence, start_index=0, end_index=None):
if end_index is None:
end_index = len(sequence)
sequence = sequence[start_index:end_index]
return [
start_index + match.start
for match in find_near_matches(subsequence, sequence, max_l_dist=0)
]
@classmethod
def get_supported_sequence_types(cls):
from tests.test_search_exact import TestSearchExact
return TestSearchExact.get_supported_sequence_types()
class TestFindNearMatchesAsSubstitutionsOnly(TestSubstitionsOnlyBase,
unittest.TestCase):
def search(self, subsequence, sequence, max_subs):
return find_near_matches(subsequence, sequence,
max_insertions=0, max_deletions=0,
max_substitutions=max_subs)
def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):
return self.assertEqual(search_results, expected_outcomes, *args, **kwargs)
from tests.test_generic_search import TestGenericSearch
class TestFindNearMatchesAsGeneric(TestGenericSearch,
unittest.TestCase):
def search(self, pattern, sequence, max_subs, max_ins, max_dels,
max_l_dist=None):
return find_near_matches(pattern, sequence,
max_subs, max_ins, max_dels, max_l_dist)
del TestGenericSearch
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 6 11:34:06 2020
@author: satish
"""
import numpy as np
import os
import sys
import argparse
import random
import torch
import torch.nn as nn
from torch.autograd import Variable, gradcheck
class MSE_loss:
def __init__(self):
print("Initializing MSE loss")
def mse_weighted(self, x, y, mask):
'''
Arguments
---------
x : target label
y : prediction/input
we want to penalize the error more if nearing to the peak
'''
self.target = x
self.pred = y
self.mask = mask
penalty_pos = torch.where(self.mask>0.0)
if(penalty_pos[0].shape[0] == 0): penalty_ratio = 1.0
else:
penalty_ratio = (self.target.numel()-penalty_pos[0].shape[0])/penalty_pos[0].shape[0]
mask[penalty_pos] = mask[penalty_pos]*penalty_ratio
#import pdb; pdb.set_trace()
sq_error = torch.sum(((self.pred - self.target) ** 2)*mask)
mean_loss = sq_error/(self.target.shape[1]*self.target.shape[2])
return mean_loss
if __name__ == '__main__':
MSE = MSE_loss()
#import pdb; pdb.set_trace()
x1 = Variable(torch.randn(1,10,3), requires_grad=True)
x2 = Variable(torch.randn(1,10,3), requires_grad=True)
print(x1.shape)
mse = MSE.mse_weighted(x1,x2)
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from multiselectfield import MultiSelectField
from django.conf import settings
class CustomUser(AbstractUser):
# user for chat id for current
gender = models.CharField(max_length=26,choices=[('Male', 'Male'), ('Female', 'Female')],default='Male')
phonenumber = models.IntegerField(null=True)
profilePicture= models.URLField(max_length=1000,default='https://avatar-management--avatars.us-west-2.prod.public.atl-paas.net/:/128')
interests = MultiSelectField(max_length=100,null=True, choices=[
('Reading', 'Reading'),
('Cycling', 'Cycling'),
('Hiking','Hiking'),
('Drawing', 'Drawing'),
('Photography', 'Photography'),
('Swimming','Swimming'),
('Sleeping','Sleeping'),
('Sports','Sports'),
('Gaming','Gaming')])
def __str__(self):
return str(self.id)
class Follow(models.Model):
to_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='to_user', on_delete=models.CASCADE)
from_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='from_user', on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.id)
|
import json
import os, sys
sys.path.append('/opt/airflow/')
from dags.connectors.sf import _write_to_stage, sf
def _opened_vaults(manager, **setup):
q = f"""SELECT outputs[1].value::string, outputs[0].value
FROM {setup['db']}.staging.manager
WHERE function = 'open'; """
opened_vaults_list = sf.execute(q).fetchall()
opened_vaults = dict()
for urn, cdp_id in opened_vaults_list:
opened_vaults[urn] = cdp_id
for (
load_id,
block,
timestamp,
breadcrumb,
tx_hash,
tx_index,
type,
value,
from_address,
to_address,
function,
arguments,
outputs,
error,
status,
gas_used,
) in sf.execute(f"""
select t.$1, t.$2, t.$3, t.$4, t.$5, t.$6, t.$7, t.$8, t.$9, t.$10, t.$11, t.$12, t.$13, t.$14, t.$15, t.$16
from @mcd.staging.vaults_extracts/{manager} ( FILE_FORMAT => mcd.staging.mcd_file_format ) t
order by t.$2;
""").fetchall():
status = int(status)
outputs = json.loads(outputs.replace("\'", "\""))
if status == 1 and function == 'open':
opened_vaults[outputs[1]['value']] = outputs[0]['value']
return opened_vaults
|
'''
QBI Batch Crop APP: Batch cropping of high resolution microscopy images
QBI Software Team
*******************************************************************************
Copyright (C) 2018 QBI Software, The University of Queensland
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import csv
import os
import re
import sys
import time
from os import mkdir
from os.path import join, isdir, exists
import matplotlib
# maintain this order of matplotlib
# TkAgg causes Runtime errors in Thread
matplotlib.use('Agg')
from autoanalysis.gui.ImageThumbnail import ImageThumbnail
from autoanalysis.gui.ImageViewer import ImageViewer
import wx
import wx.xrc
import wx.html2
import wx.dataview
from glob import iglob
from autoanalysis.controller import EVT_RESULT, Controller
from autoanalysis.utils import findResourceDir
from autoanalysis.gui.appgui import ConfigPanel, FilesPanel, WelcomePanel, ProcessPanel, dlgLogViewer
__version__ = '1.1.2'
DEBUG = 1
COLWIDTH=400 #DISPLAY COLUMNS
########################################################################
class HomePanel(WelcomePanel):
"""
This will be the first notebook tab
"""
# ----------------------------------------------------------------------
def __init__(self, parent):
super(HomePanel, self).__init__(parent)
img = wx.Bitmap(1, 1)
img.LoadFile(join(findResourceDir(), 'loadimage.bmp'), wx.BITMAP_TYPE_BMP)
self.m_richText1.BeginFontSize(14)
welcome = "Welcome to the Batch Slide Cropper App (v.%s)" % __version__
self.m_richText1.WriteText(welcome)
self.m_richText1.EndFontSize()
self.m_richText1.Newline()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.BeginItalic()
self.m_richText1.WriteText("developed by QBI Software, The University of Queensland")
self.m_richText1.EndItalic()
# self.m_richText1.EndLeftIndent()
self.m_richText1.Newline()
self.m_richText1.WriteImage(img)
self.m_richText1.Newline()
self.m_richText1.WriteText(
r'''This is a multi-threaded application to crop a serial sections into individual images.''')
self.m_richText1.Newline()
# self.m_richText1.BeginNumberedBullet(1, 0.2, 0.2, wx.TEXT_ATTR_BULLET_STYLE)
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Configure")
self.m_richText1.EndBold()
self.m_richText1.Newline()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.WriteText(
'All options can be specifically configured and multiple configurations saved and reloaded.')
self.m_richText1.Newline()
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Select Files")
self.m_richText1.EndBold()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.Newline()
self.m_richText1.WriteText(
"Select a top level directory containing the required data files and/or use the Drag'n'Drop for individual files. Only files checked in the file list will be included in processing. A preview of each file can be displayed by selecting the filename. If required, the file list can be saved and reloaded.")
self.m_richText1.Newline()
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Run Processes")
self.m_richText1.EndBold()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.Newline()
self.m_richText1.WriteText(
r"Select which processing modules is to run by viewing description. Each file is processed in the background as a multi-threaded process which will run in sequence as listed and once running their progress can be monitored. Any output files will be put in the output directory specified or in subfolders under a directory called 'cropped' in the input directory structure. A review panel is provided for output files. A log file is produced which can be viewed in a popup.")
# self.m_richText1.EndLeftIndent()
self.m_richText1.Newline()
self.m_richText1.BeginItalic()
self.m_richText1.AddParagraph(
r"Copyright (2018) https://github.com/QBI-Microscopy/BatchCrop")
self.m_richText1.EndItalic()
def loadController(self):
pass
########################################################################
class Config(ConfigPanel):
def __init__(self, parent):
super(Config, self).__init__(parent)
self.parent = parent
self.loadController()
def loadController(self):
self.controller = self.parent.controller
self.OnLoadData()
def OnLoadData(self):
self.controller.db.connect()
# load config values
rownum = 0
conf = self.controller.db.getConfigALL(self.controller.currentconfig)
if conf is not None:
for k in conf.keys():
self.m_grid1.SetCellValue(rownum, 0, k)
self.m_grid1.SetCellValue(rownum, 1, conf[k][0])
if conf[k][1] is not None:
self.m_grid1.SetCellValue(rownum, 2, conf[k][1])
rownum += 1
self.m_grid1.AutoSizeColumns()
self.m_grid1.AutoSize()
self.controller.db.closeconn()
def OnSaveConfig(self, event):
self.controller.db.connect()
configid = self.controller.currentconfig
configlist = []
data = self.m_grid1.GetTable()
for rownum in range(0, data.GetRowsCount()):
if not data.IsEmptyCell(rownum, 0):
configlist.append((self.m_grid1.GetCellValue(rownum, 0), self.m_grid1.GetCellValue(rownum, 1), configid,
self.m_grid1.GetCellValue(rownum, 2)))
# print('Saved config:', configlist)
# Save to DB
cnt = self.controller.db.addConfig(configid, configlist)
# reload other panels
for fp in self.parent.Children:
if isinstance(fp, wx.Panel) and self.__class__ != fp.__class__:
fp.loadController()
# notification
#msg = "Config saved: %s" % configid
msg = "Config updated successfully"
self.Parent.Info(msg)
self.controller.db.closeconn()
def OnAddRow(self, event):
self.m_grid1.AppendRows(1, True)
########################################################################
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, target):
super(MyFileDropTarget, self).__init__()
self.target = target
def OnDropFiles(self, x, y, filenames):
group = ''
fileList = [str(self.target.GetTextValue(i, 1)) for i in range(self.target.GetItemCount())]
for fname in list(set(filenames).difference(set(fileList))):
self.target.AppendItem([True, fname, "{:0.3f}".format(os.stat(fname).st_size / 10e8)])
# Update status bar
status = 'Total files loaded: %s' % self.target.Parent.m_dataViewListCtrl1.GetItemCount()
self.target.Parent.m_status.SetLabelText(status)
return len(filenames)
########################################################################
class FileSelectPanel(FilesPanel):
def __init__(self, parent):
super(FileSelectPanel, self).__init__(parent)
self.loadController()
self.filedrop = MyFileDropTarget(self.m_dataViewListCtrl1)
self.m_tcDragdrop.SetDropTarget(self.filedrop)
self.inputdir = None
def OnFileClicked(self, event):
row = self.m_dataViewListCtrl1.ItemToRow(event.GetItem())
filepath = self.m_dataViewListCtrl1.GetTextValue(row, 1)
#print('File clicked: ', filepath)
if self.preview_thumbnail is not None:
self.preview_thumbnail.Destroy()
try:
W, H = self.panel_right.GetSize()
self.preview_thumbnail = ImageThumbnail(self.panel_right, filepath, max_size=(H, W))
self.panel_right.Sizer.Add(self.preview_thumbnail, wx.CENTER)
except Exception as e:
msg = "Could not open file {0} as an IMS image. Error is {1}".format(filepath, str(e))
self.Parent.Warn(msg)
#self.preview_thumbnail = None
# self.Layout()
def loadController(self):
self.controller = self.Parent.controller
def OnInputdir(self, e):
""" Open a file"""
dlg = wx.DirDialog(self, "Choose a directory containing input files")
if dlg.ShowModal() == wx.ID_OK:
self.inputdir = str(dlg.GetPath())
self.txtInputdir.SetValue(self.inputdir)
dlg.Destroy()
def OnOutputdir(self, e):
""" Open a file"""
dlg = wx.DirDialog(self, "Choose a directory for output files")
if dlg.ShowModal() == wx.ID_OK:
self.outputdir = str(dlg.GetPath())
self.txtOutputdir.SetValue(self.outputdir)
dlg.Destroy()
def OnSaveList(self, event):
"""
Save selected files to csv
:param event:
:return:
"""
num_files = self.m_dataViewListCtrl1.GetItemCount()
try:
openFileDialog = wx.FileDialog(self, "Save file list", "", "", "CSV files (*.csv)|*",
wx.FD_SAVE | wx.FD_CHANGE_DIR)
if openFileDialog.ShowModal() == wx.ID_OK:
savefile = str(openFileDialog.GetPath())
with open(savefile, 'w') as csvfile:
swriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, num_files):
if self.m_dataViewListCtrl1.GetToggleValue(i, 0):
swriter.writerow(
[self.m_dataViewListCtrl1.GetValue(i, 1), self.m_dataViewListCtrl1.GetValue(i, 2)])
self.Parent.Info('SUCCESS: List saved')
except Exception as e:
self.Parent.Warn("ERROR: Save list:" + e.args[0])
def loadFileToPanel(self, filepath):
currentFileList = [str(self.m_dataViewListCtrl1.GetTextValue(i, 1)) for i in
range(self.m_dataViewListCtrl1.GetItemCount())]
if filepath not in currentFileList:
self.m_dataViewListCtrl1.AppendItem([True, filepath, "{:0.3f}".format(os.stat(filepath).st_size / 10e8)])
def OnLoadList(self, event):
"""
Load saved list
:param event:
:return:
"""
try:
openFileDialog = wx.FileDialog(self, "Open file list", "", "", "CSV files (*.csv)|*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR)
if openFileDialog.ShowModal() == wx.ID_OK:
savefile = str(openFileDialog.GetPath())
with open(savefile, 'r') as csvfile:
sreader = csv.reader(csvfile, delimiter=',', quotechar='"')
self.m_dataViewListCtrl1.DeleteAllItems()
for row in sreader:
if len(row) > 0:
self.loadFileToPanel(row[0])
msg = "Total Files loaded: %d" % self.m_dataViewListCtrl1.GetItemCount()
self.m_status.SetLabelText(msg)
# Try to resize column
self.col_file.SetWidth(COLWIDTH)
except Exception as e:
#print(e.args[0])
self.Parent.Warn("Load list error:" + e.args[0])
def OnAutofind(self, event):
"""
Find all matching files in top level directory
:param event:
:return:
"""
self.btnAutoFind.Disable()
if self.inputdir is not None:
self.m_status.SetLabelText("Finding files ... please wait")
#imgtype = self.controller.db.getConfigByName(self.controller.currentconfig, 'IMAGE_TYPE')
imgtype = self.m_choiceType.GetStringSelection().lower()
if imgtype is None:
imgtype = '*.ims'
else:
imgtype = '*.' + imgtype
allfiles = [y for y in iglob(join(self.inputdir, '**', imgtype), recursive=True)]
searchtext = self.m_tcSearch.GetValue()
if (len(searchtext) > 0):
allfiles = [f for f in allfiles if re.search(searchtext, f, flags=re.IGNORECASE)]
#Exclude directories
filenames = [f for f in allfiles if not isdir(f)]
for fname in filenames:
self.loadFileToPanel(fname)
if DEBUG:
msg = 'FilePanel loaded: %s' % fname
print(msg)
# Try to resize column
self.col_file.SetWidth(COLWIDTH)
msg = "Total Files loaded: %d" % len(filenames)
self.m_status.SetLabelText(msg)
else:
self.Parent.Warn("Cannot autofind files when no directory is selected. Please select Top Level Directory.")
self.btnAutoFind.Enable(True)
def OnSelectall(self, event):
for i in range(0, self.m_dataViewListCtrl1.GetItemCount()):
self.m_dataViewListCtrl1.SetToggleValue(event.GetSelection(), i, 0)
if DEBUG:
print("Toggled selections to: ", event.GetSelection())
def OnClearlist(self, event):
if DEBUG:
print("Clear items in list")
self.m_dataViewListCtrl1.DeleteAllItems()
########################################################################
class ProcessRunPanel(ProcessPanel):
def __init__(self, parent):
super(ProcessRunPanel, self).__init__(parent)
#self.m_panelImageOrder = ImagePanel(self)
self.loadController()
self.loadCaptions()
# Bind progress update function
EVT_RESULT(self, self.progressfunc)
# Set timer handler
self.start = {}
def loadController(self):
self.controller = self.Parent.controller
def loadCaptions(self):
self.controller.db.connect()
processes = self.controller.db.getCaptions()
self.m_checkListProcess.Clear()
self.m_checkListProcess.AppendItems(processes)
self.controller.db.closeconn()
def OnShowDescription(self, event):
"""
Pull the clicked on process description form the Database and display it in the description panel.
"""
self.controller.db.connect()
ref = self.controller.db.getRef(event.String)
desc = self.controller.db.getDescription(ref)
self.m_stTitle.SetLabelText(event.String)
self.m_stDescription.Clear()
self.m_stDescription.WriteText(desc)
self.Layout()
self.controller.db.closeconn()
def progressfunc(self, msg):
"""
Update progress bars in progress table. This will be sent from the panel running a process
(from the controller) with a msg as a RESULTEVENT.
:param msg: message passed in to Proccess panel. Currently in the form
(count, row, process, outputPath)
"""
(count, row, process, outputPath, update) = msg.data
if count == 0:
self.m_dataViewListCtrlRunning.AppendItem([process, outputPath, count, "Starting"])
self.m_dataViewListColumnFilename.SetWidth(200)
self.m_dataViewListColumnOutput.SetWidth(200)
self.m_dataViewListCtrlRunning.Refresh()
self.start[process] = time.time()
elif count < 100:
self.m_dataViewListCtrlRunning.SetValue(count, row=row, col=2)
self.m_dataViewListCtrlRunning.SetValue("Running - " + str(update), row=row, col=3)
self.m_dataViewListCtrlRunning.Refresh()
self.m_stOutputlog.SetLabelText("Running: %s for %s ...please wait" % (process, outputPath))
elif count == 100:
status ='Done'
if process in self.start:
endtime = time.time() - self.start[process]
status = "%s (%d secs)" % ("Done", endtime)
self.m_dataViewListCtrlRunning.SetValue(count, row=row, col=2)
self.m_dataViewListCtrlRunning.SetValue(status, row=row, col=3)
self.m_btnRunProcess.Enable()
self.m_stOutputlog.SetLabelText("COMPLETED process %s " % process)
else:
self.m_dataViewListCtrlRunning.SetValue("ERROR in process - see log file : " + update, row=row, col=3)
self.m_btnRunProcess.Enable()
def OnShowResults(self, event):
"""
Event handler for when a user clicks on a completed image in the process panel. Loads the segments and templates
the index order for them. Users can then renumber and submit the segments to be ordered via the
self.CreateOrderFile function.
"""
# Get the file directory from the selected row.
row = self.m_dataViewListCtrlRunning.ItemToRow(event.GetItem())
process = self.m_dataViewListCtrlRunning.GetTextValue(row, 0)
status = self.m_dataViewListCtrlRunning.GetTextValue(row, 3)
if self.controller.db.getProcessFilesout(process) != 'NA' and status.startswith('Done'):
self.segmentGridPath = self.m_dataViewListCtrlRunning.GetTextValue(row, 1)
# Load filenames to Review Panel
imglist = [y for y in iglob(join(self.segmentGridPath, '*.tiff'), recursive=False)]
self.m_dataViewListCtrlReview.DeleteAllItems()
for fname in imglist:
self.m_dataViewListCtrlReview.AppendItem([False, fname, "{:0.3f}".format(os.stat(fname).st_size / 10e8)])
self.m_Col_reviewFilename.SetWidth(COLWIDTH)
# Launch Viewer in separate window
viewerapp = wx.App()
frame = ImageViewer(imglist)
viewerapp.MainLoop()
def OnDeleteImage(self, event):
"""
Delete image from disk if marked in list
:return:
"""
dial = wx.MessageDialog(None, 'Are you sure you want to delete these files?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
ret = dial.ShowModal()
if ret == wx.ID_YES:
try:
filenames = []
for i in range(self.m_dataViewListCtrlReview.GetItemCount()):
fname = self.m_dataViewListCtrlReview.GetValue(i, 1)
if self.m_dataViewListCtrlReview.GetToggleValue(i, 0):
os.remove(fname)
msg = "PROCESSPANEL: Deleted file: %s" % fname
print(msg)
else:
filenames.append(fname)
# Refresh list
self.m_dataViewListCtrlReview.DeleteAllItems()
for fname in filenames:
self.m_dataViewListCtrlReview.AppendItem([False, fname,"{:0.3f}".format(os.stat(fname).st_size / 10e8)])
self.Refresh()
except PermissionError as e:
self.Parent.Warn('Windows Permission Error: file is still open so cannot delete: ', str(e))
except Exception as e:
self.Parent.Warn('Error: cannot delete selected file: ',str(e))
def getFilePanel(self):
"""
Get access to filepanel
:return:
"""
filepanel = None
for fp in self.Parent.Children:
if isinstance(fp, FileSelectPanel):
filepanel = fp
break
return filepanel
def getDefaultOutputdir(self):
"""
:return: the default output directory for the segmented images. If a problem occured, return default.
"""
default = "cropped"
try:
self.controller.db.connect()
sdir = self.controller.db.getConfigByName(self.controller.currentconfig, 'CROPPED_IMAGE_FILES')
if len(sdir) > 0:
default = sdir
except Exception as e:
print("Error occured when getting the default output directory: {0}".format(str(e)))
finally:
self.controller.db.closeconn()
return default
def OnRunScripts(self, event):
"""
Run selected scripts sequentially - updating progress bars
:param e:
:return:
"""
# Clear processing window
self.m_dataViewListCtrlRunning.DeleteAllItems()
# Disable Run button
btn = event.GetEventObject()
btn.Disable()
# Get selected processes
selections = self.m_checkListProcess.GetCheckedStrings()
try:
if len(selections) <= 0:
raise ValueError("No Processes selected. Please check at least one process then click Run.")
# Get data from other panels
filepanel = self.getFilePanel()
filenames = []
num_files = filepanel.m_dataViewListCtrl1.GetItemCount()
outputdir = filepanel.txtOutputdir.GetValue()
# Set output directory: if blank will use subdir in inputdir
if len(outputdir) <= 0:
outputdir = join(filepanel.txtInputdir.GetValue(), self.getDefaultOutputdir())
if not exists(outputdir):
mkdir(outputdir)
self.outputdir = outputdir
if num_files > 0:
# Get selected files and sort into groups
for i in range(0, num_files):
if filepanel.m_dataViewListCtrl1.GetToggleValue(i, 0):
fname = filepanel.m_dataViewListCtrl1.GetValue(i, 1)
if not isdir(fname):
filenames.append(fname)
if len(filenames) <= 0:
raise ValueError("No files selected in Files Panel")
# For each process
prow = 0
for pcaption in selections:
p = self.controller.db.getRef(pcaption)
prow = self.controller.RunProcess(self, p, outputdir, filenames,prow)
prow += 1
else:
raise ValueError("No files selected - please go to Files Panel and add to list")
except Exception as e:
self.Parent.Warn(e.args[0])
# Enable Run button
self.m_btnRunProcess.Enable()
finally:
if self.controller.db is not None:
self.controller.db.closeconn()
def OnShowLog(self, event):
"""
Load logfile into viewer
:param event:
:return:
"""
dlg = dlgLogViewer(self)
logfile = self.controller.logfile
dlg.tcLog.LoadFile(logfile)
dlg.ShowModal()
dlg.Destroy()
def OnClearWindow(self, event):
self.m_dataViewListCtrlRunning.DeleteAllItems()
def OnStopProcessing( self, event ):
self.m_stOutputlog.SetLabelText('Called Stop processing ... will end after current image processed')
self.controller.shutdown()
while not self.controller._stopevent.isSet():
time.sleep(1)
self.m_stOutputlog.SetLabelText('Called Stop processing ... will end after current image processed')
self.m_stOutputlog.SetLabelText('Called Stop processing -complete')
########################################################################
class AppMain(wx.Listbook):
def __init__(self, parent):
"""Constructor"""
wx.Listbook.__init__(self, parent, wx.ID_ANY, style=wx.BK_DEFAULT)
self.controller = Controller()
self.InitUI()
self.Centre(wx.BOTH)
self.Show()
def InitUI(self):
# make an image list using the LBXX images
il = wx.ImageList(32, 32)
bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_FRAME_ICON, (32, 32))
il.Add(bmp)
bmp = wx.ArtProvider.GetBitmap(wx.ART_TIP, wx.ART_FRAME_ICON, (32, 32))
il.Add(bmp)
bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_FRAME_ICON, (32, 32))
il.Add(bmp)
bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD, wx.ART_FRAME_ICON, (32, 32))
il.Add(bmp)
self.AssignImageList(il)
pages = [(HomePanel(self), "Welcome"),
(Config(self), "Configure"),
(FileSelectPanel(self), "Select Files"),
(ProcessRunPanel(self), "Run Processes")]
imID = 0
for page, label in pages:
self.AddPage(page, label, imageId=imID)
# self.AddPage(page, label)
imID += 1
if sys.platform == 'win32':
self.GetListView().SetColumnWidth(0, wx.LIST_AUTOSIZE)
def Warn(self, message, caption='Warning!'):
dlg = wx.MessageDialog(self, message, caption, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def Info(self, message, caption='Information'):
dlg = wx.MessageDialog(self, message, caption, wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
########################################################################
class AppFrame(wx.Frame):
"""
Frame that holds all other widgets
"""
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
title = "Batch Crop Application [v %s]" % __version__
wx.Frame.__init__(self, None, wx.ID_ANY,
title,
size=(1000, 720)
)
self.Bind(wx.EVT_CLOSE, self.OnExit)
panel = wx.Panel(self)
notebook = AppMain(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(notebook, 1, wx.ALL | wx.EXPAND, 5)
panel.SetSizer(sizer)
self.Layout()
self.Center(wx.BOTH)
self.Show()
def OnExit(self, e):
dial = wx.MessageDialog(None, 'Are you sure you want to quit?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
ret = dial.ShowModal()
if ret == wx.ID_YES:
self.Destroy()
else:
e.Veto()
# ----------------------------------------------------------------------
if __name__ == "__main__":
app = wx.App()
frame = AppFrame()
app.MainLoop()
|
import os
import zipfile
path = 'horse-or-human'
local_zip = './data/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('./data/horse-or-human')
local_zip = './data/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('./data/validation-horse-or-human')
zip_ref.close()
# Directory with our training horse pictures
train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
# Directory with our training human pictures
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
# Directory with our training horse pictures
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/validation-horses')
# Directory with our training human pictures
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/validation-humans')
import tensorflow as tf
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
from tensorflow.keras.optimizers import RMSprop
model.compile(
optimizer=RMSprop(lr=0.01),
loss='binary_crossentropy',
metrics=['acc'],
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255.0)
validation_datagen = ImageDataGenerator(rescale=1/255.0)
train_generator = train_datagen.flow_from_directory(
directory='./data/horse-or-human',
target_size=(150,150),
batch_size=128,
class_mode='binary',
)
validation_generator = validation_datagen.flow_from_directory(
directory='./data/validation-horse-or-human',
target_size=(150, 150),
batch_size=32,
class_mode='binary',
)
model.fit_generator(
generatÎor=train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data=validation_generator,
validation_steps=8,
)
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
import matplotlib.pyplot as plt
train_horse_names = os.listdir('./data/horse-or-human/horses')
train_human_names = os.listdir('./data/horse-or-human/humans')
# Let's define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model after
# the first.
successive_outputs = [layer.output for layer in model.layers[1:]]
#visualization_model = Model(img_input, successive_outputs)
visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)
# Let's prepare a random input image from the training set.
horse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names]
human_img_files = [os.path.join(train_human_dir, f) for f in train_human_names]
img_path = random.choice(horse_img_files + human_img_files)
img = load_img(img_path, target_size=(300, 300)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)
# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv / maxpool layers, not the fully-connected layers
n_features = feature_map.shape[-1] # number of features in feature map
# The feature map has shape (1, size, size, n_features)
size = feature_map.shape[1]
# We will tile our images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature to make it visually palatable
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('uint8')
# We'll tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x
# Display the grid
scale = 20. / n_features
plt.figure(figsize=(scale * n_features, scale))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
|
"""Tests the appimage activity."""
from rever import vcsutils
from rever.logger import current_logger
from rever.main import env_main
from pathlib import Path
REVER_XSH = """
$ACTIVITIES = ['appimage']
"""
SETUP_FILE = """
import setuptools
setuptools.setup(
name="rever-activity-appimage-test",
version="42.1.1",
description="Rever appimage activity test",
url="https://github.com/regrp/rever",
python_requires='>=3.6',
install_requires=[],
package_data={'dir': ['*.py']},
packages=setuptools.find_packages(),
author="anki-code",
author_email="anki-code@example.com"
)
"""
APPIMAGE_ENTRYPOINT_FILE = """
#! /bin/bash
echo "Hello"
"""
APPIMAGE_PRE_REQUIREMENTS_FILE = ""
APPIMAGE_APPDATA_FILE = """
<?xml version="1.0" encoding="UTF-8"?>
<component type="desktop-application">
<id>xonsh</id>
<metadata_license>BSD</metadata_license>
<project_license>Python-2.0</project_license>
<name>Xonsh</name>
<summary>Xonsh on Python {{ python-fullversion }}</summary>
<description>
<p> Python {{ python-fullversion }} + Xonsh bundled in an AppImage.
</p>
</description>
<launchable type="desktop-id">xonsh.desktop</launchable>
<url type="homepage">http://xon.sh</url>
<provides>
<binary>python{{ python-version }}</binary>
</provides>
</component>
"""
APPIMAGE_DESKTOP_FILE = """
[Desktop Entry]
Type=Application
Name=xonsh
Exec=xonsh
Comment=Xonsh on Python {{ python-fullversion }}
Icon=python
Categories=System;
Terminal=true
"""
def test_appimage(gitrepo):
Path('appimage').mkdir(exist_ok=True)
files = [('rever.xsh', REVER_XSH), ('setup.py', SETUP_FILE),
('appimage/entrypoint.sh', APPIMAGE_ENTRYPOINT_FILE),
('appimage/pre-requirements.txt', APPIMAGE_PRE_REQUIREMENTS_FILE),
('appimage/xonsh.appdata.xml', APPIMAGE_APPDATA_FILE),
('appimage/xonsh.desktop', APPIMAGE_DESKTOP_FILE)]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('Some versioned files')
env_main(['42.1.1'])
assert Path('xonsh-x86_64.AppImage') in Path('.').glob('*')
|
#!/usr/bin/python3
def main():
print(LatticePath(20,20))
def LatticePath(m,n, memo={}):
if m == 0 or n == 0:
return 1
if (m, n) in memo.keys():
return memo[(m, n)]
result = LatticePath(m - 1, n) + LatticePath(m, n - 1)
memo[(m , n)] = result
return result
if __name__ == "__main__": main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.