content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Jiawei Liu on 2018/06/18
import os
import logging
import random
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
import xgboost as xgb
import csv
from bert import optimization
from qwk import *
from util import *
Logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s %(name)s %(levelname)s %(message)s", level=logging.INFO)
class BasicScorePredictor:
""" essay的semantic方面的评分,也称基础得分,后续的overall score是在其基础上进一步得到的
Attributes:
bert_emb_dim: 因为输入的句子是用bert进行encoded, 该属性记录了所用bert模型的输出维度
dropout_prob: dropout的概率
lstm_layers_num: lstm的层数
lstm_hidden_size: lstm的隐层单元的宽度
fnn_hidden_size: 输出接的fnn的每个隐层的宽度,
bidirectional: 标注是否使用双向模型
tdnn_step: 如果使用tdnn的话,tdnn跳跃的步长
"""
def __init__(self):
self.bert_emb_dim = 768
self.dropout_prob = 0.3
self.lstm_layers_num = 1
self.lstm_hidden_size = 1024
self.fnn_hidden_size = []
self.bidirectional = False
self.tdnn_step = 4
@staticmethod
def generate_asap_train_and_test_set(generate_type="shuffle_prompt"):
""" 生成asap的训练数据集合
Args:
generate_type: 以何种方式产生训练集和测试集,
shuffle_prompt: 表示从某个prompt下,选取一定的数据训练,
other_prompt: 表示利用所有别的prompt下的样本对该样本进行训练,一般不推荐此种方式,效果最差
shuffle_all: 表示shuffle所有的样本,然后抽样,为缺省方式
Returns:
articles_id: 文章的id
articles_set: 文章的所属的prompt的id集合
handmark_scores: 手工标注的分数
correspond_train_id_set: 分割的训练集id
correspond_test_id_set: 分割的测试集id
"""
articles_id, articles_set, set_ids, handmark_scores = read_asap_dataset()
np.random.seed(train_conf["random_seed"])
if generate_type == "shuffle_prompt":
# 在每个set内80%用来训练,20%用来测试
permutation_ids = np.random.permutation(set_ids[train_conf["prompt_id"]])
correspond_train_id_set = permutation_ids[
0:int(len(permutation_ids) * train_conf["train_set_prob"])]
print("train set", len(correspond_train_id_set))
correspond_test_id_set = permutation_ids[
int(len(permutation_ids) * train_conf["train_set_prob"]):]
print("test set", len(correspond_test_id_set))
elif generate_type == "other_prompt":
# 对每个set,用其他set的数据进行训练
correspond_test_id_set = set_ids[train_conf["prompt_id"]]
correspond_train_id_set = []
for i in range(1, 9):
if i == train_conf["prompt_id"]:
continue
else:
correspond_train_id_set.extend(set_ids[i])
elif generate_type == "shuffle_all":
# 将所有set的数据混合打散,取80%进行训练,剩下测试
permutation_ids = np.random.permutation(articles_id)
correspond_test_id_set = permutation_ids[int(len(articles_id) * train_conf["train_set_prob"]):]
correspond_train_id_set = permutation_ids[:int(len(articles_id) * train_conf["train_set_prob"])]
else:
raise ValueError("generate_type must be choose in ('shuffle_prompt','other_prompt','shuffle_all')")
return articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set
def build_graph(self, batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_size,
is_training):
""" 建立模型的图
Args:
batch_doc_encodes: 一个batch的文章的bert encoding的结果
batch_doc_sent_nums: 记录一个batch内每个doc的句数
batch_article_set: 记录一个batch内每个doc所属的类别,按prompt分
batch_domain1_score: 记录一个batch内每个doc的人工评分
batch_size: 将batch的大小放在图中,
is_training: 标致是否为train的状态
Returns: loss 和 logits
"""
def normalize_value(score, min_value, max_value):
result = tf.div(tf.subtract(score, min_value), tf.to_float(tf.subtract(max_value, min_value)))
return result
batch_index_o = tf.constant(0)
standard_batch_domain_score_o = tf.convert_to_tensor([])
def cond(batch_index, normalized_score):
return tf.less(batch_index, batch_size)
def body(batch_index, normalized_score):
min_value = tf.convert_to_tensor([-1, 2, 1, 0, 0, 0, 0, 0, 0, 0], tf.float32)[
batch_article_set[batch_index]]
max_value = tf.convert_to_tensor([-1, 12, 6, 3, 3, 4, 4, 30, 60, 9], tf.float32)[
batch_article_set[batch_index]]
temp_score = batch_domain1_score[batch_index]
temp_score = normalize_value(tf.to_float(temp_score), min_value, max_value)
normalized_score = tf.concat([normalized_score, [temp_score]], axis=0)
return tf.add(batch_index, 1), normalized_score
_, standard_batch_domain_score = tf.while_loop(cond,
body,
[batch_index_o, standard_batch_domain_score_o],
shape_invariants=[batch_index_o.get_shape(),
tf.TensorShape([None])])
if self.bidirectional:
fw_cell, bw_cell = create_rnn_cell(self.lstm_hidden_size,
self.dropout_prob,
self.lstm_layers_num,
self.bidirectional,
is_training)
(output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,
cell_bw=bw_cell,
inputs=batch_doc_encodes,
sequence_length=batch_doc_sent_nums,
dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=2)
# padding for output is 0, hence can mean directly
mean_time_output = tf.reduce_mean(output, axis=1)
w = tf.get_variable(shape=[self.lstm_hidden_size * 2, 1],
initializer=create_initializer(),
name="weight",
dtype=tf.float32)
b = tf.get_variable(initializer=tf.zeros_initializer(),
shape=[1],
name="bias",
dtype=tf.float32)
logit = tf.squeeze(tf.sigmoid(tf.matmul(mean_time_output, w) + b))
loss = tf.losses.mean_squared_error(labels=standard_batch_domain_score, predictions=logit)
else:
fw_cell = create_rnn_cell(self.lstm_hidden_size,
self.dropout_prob,
self.lstm_layers_num,
self.bidirectional,
is_training)
output, states = tf.nn.dynamic_rnn(cell=fw_cell,
inputs=batch_doc_encodes,
sequence_length=batch_doc_sent_nums,
dtype=tf.float32)
# 增加TDNN
if 0:
with tf.variable_scope("tdnn"):
tdnn_fw_cell = create_rnn_cell(self.lstm_hidden_size,
self.dropout_prob,
self.lstm_layers_num,
self.bidirectional,
is_training)
tdnn_input_slice = tf.range(0, tf.reduce_max(batch_doc_sent_nums), self.tdnn_step)
tdnn_input = tf.gather(output, tdnn_input_slice, axis=1)
batch_index_o = tf.constant(0)
actual_length_o = tf.convert_to_tensor([])
def cond(batch_index, actual_length):
return tf.less(batch_index, batch_size)
def body(batch_index, actual_length):
temp_slice = tf.range(0, batch_doc_sent_nums[batch_index], self.tdnn_step)
actual_length = tf.concat([actual_length, [tf.shape(temp_slice)[0]]], axis=0)
return tf.add(batch_index, 1), actual_length
_, actual_length = tf.while_loop(cond,
body,
[batch_index_o, actual_length_o],
shape_invariants=[batch_index_o.get_shape(),
tf.TensorShape([None])])
output, states = tf.nn.dynamic_rnn(cell=tdnn_fw_cell,
inputs=tdnn_input,
sequence_length=actual_length,
dtype=tf.float32)
# 因为cell没有设置num_proj,所以hidden不会被投影, output等于states, output用0padding, states用最后一个state来填充,
# 本处使用hiddenstate的mean,和最后一个hidden分别试验。
# mean_time_output = tf.reduce_mean(output, axis=1)
# last_time_hidden = states
# mean_time_output = tf.reduce_mean(batch_doc_encodes, axis=1)
mean_time_output = states[0].h
for hs in self.fnn_hidden_size:
if is_training:
mean_time_output = tf.nn.dropout(mean_time_output, keep_prob=1 - self.dropout_prob)
mean_time_output = tf.layers.dense(mean_time_output,
hs,
activation=tf.nn.relu,
kernel_initializer=create_initializer())
if self.fnn_hidden_size:
x_dim = self.fnn_hidden_size[-1]
else:
x_dim = self.lstm_hidden_size
w = tf.get_variable(shape=[x_dim, 1],
initializer=create_initializer(),
name="weight",
dtype=tf.float32)
b = tf.get_variable(initializer=tf.zeros_initializer(),
shape=[1],
name="bias",
dtype=tf.float32)
if is_training:
mean_time_output = tf.nn.dropout(mean_time_output, keep_prob=1 - self.dropout_prob)
logit = tf.squeeze(tf.sigmoid(tf.matmul(mean_time_output, w) + b))
loss = tf.losses.mean_squared_error(labels=standard_batch_domain_score, predictions=logit)
return loss, logit
def model_fn_builder(self, learning_rate, num_train_step, num_warmup_steps):
"""
Args:
learning_rate: 学习速率
num_train_step: 学习步数
num_warmup_steps: 预热的步数
Returns: 函数model_fn的函数句柄
"""
def model_fn(features, labels, mode, params):
batch_doc_encodes = tf.identity(features["doc_encodes"]) # shape = [batch_size, None, bert_dim]
batch_article_set = tf.identity(features["article_set"]) # shape = [batch_size]
batch_doc_sent_nums = tf.identity(features["doc_sent_num"]) # shape = [batch_size]
batch_domain1_score = tf.identity(features["domain1_score"]) # shape = [batch_size]
batch_doc_id = tf.identity(features["article_id"]) # shape = [batch_size]
batch_size = tf.shape(batch_doc_sent_nums)[0]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
loss, logit = self.build_graph(batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_size,
is_training)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(loss=loss,
init_lr=learning_rate,
num_train_steps=num_train_step,
num_warmup_steps=num_warmup_steps,
use_tpu=False)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
# predictions = logit
# accuracy = tf.metrics.accuracy(fnn_labels, predictions)
# eval_metrics = {
# "eval_accuracy": accuracy
# }
# tf.summary.scalar("eval_accuracy", accuracy)
# output_spec = tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# eval_metric_ops=eval_metrics
# )
pass
else:
predictions = {
"batch_scores": logit,
"batch_doc_id": batch_doc_id,
"batch_article_set": batch_article_set
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
"predictions": tf.estimator.export.PredictOutput(predictions)
}
)
return output_spec
return model_fn
@staticmethod
def eval_metric(result, handmark_scores, articles_set, articles_id):
""" 评价函数
Args:
result: predictor 推理出的结果
handmark_scores: 手工标注的结果,会被约束到0-10的范围
articles_set: 文章所属的prompt_id的集合,list类似
articles_id: 文章自己的id.
Returns: qwk的指标
"""
predict_scores = {}
print("RESULT")
for item in result:
print("ITEM", item)
min_value = 1
max_value = 10
normalize_score = item["batch_scores"]
# print(normalize_score)
overall_score = round(normalize_score * (max_value - min_value) + min_value)
predict_scores[item["batch_doc_id"]] = overall_score
test_handmark_scores = []
test_predict_scores = []
for key, value in predict_scores.items():
article_set_id = articles_set[articles_id.index(key)]
min_value = dataset_score_range[article_set_id][0]
max_value = dataset_score_range[article_set_id][1]
hs = handmark_scores[key]
temp_hs = round(((hs - min_value) / (max_value - min_value)) * 10)
test_predict_scores.append(value)
test_handmark_scores.append(temp_hs)
# print("id: {}, predict: {}, handmark: {}".format(key, value, handmark_scores[key]))
test_handmark_scores = np.asarray(test_handmark_scores, dtype=np.int32)
test_predict_scores = np.asarray(test_predict_scores, dtype=np.int32)
print(test_predict_scores)
print(test_handmark_scores)
qwk = quadratic_weighted_kappa(test_predict_scores, test_handmark_scores)
print("##############qwk value is {}".format(qwk))
class CoherenceScore:
""" essay的coherence方面的评分,也称连贯性得分,后续的overall score是在其基础上进一步得到的
Attributes:
bert_emb_dim: 因为输入的句子是用bert进行encoded, 该属性记录了所用bert模型的输出维度
dropout_prob: dropout的概率
lstm_layers_num: lstm的层数
lstm_hidden_size: lstm的隐层单元的宽度
fnn_hidden_size: 输出接的fnn的每个隐层的宽度,
bidirectional: 标注是否使用双向模型
"""
def __init__(self):
self.bert_emb_dim = 768
self.dropout_prob = 0.5
self.lstm_hidden_size = 1024
self.lstm_layers_num = 1
self.fnn_hidden_size = []
self.bidirectional = False
@staticmethod
def generate_asap_train_and_test_set(generate_type="shuffle_prompt"):
""" 生成asap的训练数据集合
Args:
generate_type: 以何种方式产生训练集和测试集,
shuffle_prompt: 表示从某个prompt下,选取一定的数据训练,
other_prompt: 表示利用所有别的prompt下的样本对该样本进行训练,一般不推荐此种方式,效果最差
shuffle_all: 表示shuffle所有的样本,然后抽样,为缺省方式
Returns:
articles_id: 文章的id
articles_set: 文章的所属的prompt的id集合
handmark_scores: 手工标注的分数
correspond_train_id_set: 分割的训练集id
correspond_test_id_set: 分割的测试集id
"""
articles_id, articles_set, set_ids, handmark_scores = read_asap_dataset()
np.random.seed(train_conf["random_seed"])
if generate_type == "shuffle_prompt":
# 在每个set内80%用来训练,20%用来测试
permutation_ids = np.random.permutation(set_ids[train_conf["prompt_id"]])
correspond_train_id_set = permutation_ids[
0:int(len(permutation_ids) * train_conf["train_set_prob"])]
correspond_test_id_set = permutation_ids[
int(len(permutation_ids) * train_conf["train_set_prob"]):]
elif generate_type == "other_prompt":
# 对每个set,用其他set的数据进行训练
correspond_test_id_set = set_ids[train_conf["prompt_id"]]
correspond_train_id_set = []
for i in range(1, 9):
if i == train_conf["prompt_id"]:
continue
else:
correspond_train_id_set.extend(set_ids[i])
elif generate_type == "shuffle_all":
# 将所有set的数据混合打散,取80%进行训练,剩下测试
permutation_ids = np.random.permutation(articles_id)
correspond_test_id_set = permutation_ids[int(len(articles_id) * train_conf["train_set_prob"]):]
correspond_train_id_set = permutation_ids[:int(len(articles_id) * train_conf["train_set_prob"])]
# print("DONE: ", len(permutation_ids), len(correspond_test_id_set), len(correspond_train_id_set))
else:
raise ValueError("generate_type must be choose in ('shuffle_prompt','other_prompt','shuffle_all')")
correspond_train_id_set = list(correspond_train_id_set)
correspond_test_id_set = list(correspond_test_id_set)
print("DONE ", len(correspond_train_id_set))
print("DONE ", len(correspond_test_id_set))
# print(item)
negative_train_id_permuted = [item + 100000 for item in correspond_train_id_set]
negative_test_id_permuted = [item + 100000 for item in correspond_test_id_set]
correspond_train_id_set.extend(negative_train_id_permuted)
correspond_test_id_set.extend(negative_test_id_permuted)
return articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set
def build_graph(self,
batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_doc_id,
batch_size,
is_training):
""" 建立模型的图
Args:
batch_doc_encodes: 一个batch的文章的bert encoding的结果
batch_doc_sent_nums: 记录一个batch内每个doc的句数
batch_article_set: 记录一个batch内每个doc所属的类别,按prompt分
batch_domain1_score: 记录一个batch内每个doc的人工评分
batch_doc_id: 记录一个batch内每个doc的id
batch_size: 将batch的大小放在图中,
is_training: 标致是否为train的状态
Returns: loss 和 logits
"""
def normalize_value(score, min_value, max_value):
result = tf.div(tf.subtract(score, min_value), tf.to_float(tf.subtract(max_value, min_value)))
return result
batch_index_o = tf.constant(0)
standard_batch_domain_score_o = tf.convert_to_tensor([])
def cond(batch_index, normalized_score):
return tf.less(batch_index, batch_size)
def body(batch_index, normalized_score):
min_value = tf.convert_to_tensor([-1, 2, 1, 0, 0, 0, 0, 0, 0, 0], tf.float32)[batch_article_set[batch_index]]
max_value = tf.convert_to_tensor([-1, 12, 6, 3, 3, 4, 4, 30, 60, 9], tf.float32)[
batch_article_set[batch_index]]
temp_score = tf.cond(tf.greater(100000, batch_doc_id[batch_index]),
lambda: batch_domain1_score[batch_index],
lambda: min_value)
temp_score = normalize_value(tf.to_float(temp_score), min_value, max_value)
normalized_score = tf.concat([normalized_score, [temp_score]], axis=0)
return tf.add(batch_index, 1), normalized_score
_, standard_batch_domain_score = tf.while_loop(cond,
body,
[batch_index_o,
standard_batch_domain_score_o],
shape_invariants=[batch_index_o.get_shape(),
tf.TensorShape([None])])
fw_cell = create_rnn_cell(self.lstm_hidden_size,
self.dropout_prob,
self.lstm_layers_num,
self.bidirectional,
is_training)
output, states = tf.nn.dynamic_rnn(cell=fw_cell,
inputs=batch_doc_encodes,
sequence_length=batch_doc_sent_nums,
dtype=tf.float32)
last_state = states[0].h
for hs in self.fnn_hidden_size:
if is_training:
last_state = tf.nn.dropout(last_state, keep_prob=1 - self.dropout_prob)
last_state = tf.layers.dense(last_state,
hs,
activation=tf.nn.relu,
kernel_initializer=create_initializer())
if self.fnn_hidden_size:
x_dim = self.fnn_hidden_size[-1]
else:
x_dim = self.lstm_hidden_size
w = tf.get_variable(shape=[x_dim, 1],
initializer=create_initializer(),
name="weight",
dtype=tf.float32)
b = tf.get_variable(initializer=tf.zeros_initializer(),
shape=[1],
name="bias",
dtype=tf.float32)
if is_training:
last_state = tf.nn.dropout(last_state, keep_prob=1 - self.dropout_prob)
logit = tf.squeeze(tf.sigmoid(tf.matmul(last_state, w) + b))
loss = tf.losses.mean_squared_error(labels=standard_batch_domain_score, predictions=logit)
return loss, logit
def model_fn_builder(self, learning_rate, num_train_step, num_warmup_steps):
"""
Args:
learning_rate: 学习速率
num_train_step: 学习步数
num_warmup_steps: 预热的步数
Returns: 函数model_fn的函数句柄
"""
def model_fn(features, labels, mode, params):
batch_doc_encodes = tf.identity(features["doc_encodes"]) # shape = [batch_size, None, bert_dim]
batch_article_set = tf.cast(tf.identity(features["article_set"]), tf.int32) # shape = [batch_size]
batch_doc_sent_nums = tf.cast(tf.identity(features["doc_sent_num"]), tf.int32) # shape = [batch_size]
batch_domain1_score = tf.identity(features["domain1_score"]) # shape = [batch_size]
batch_doc_id = tf.cast(tf.identity(features["article_id"]), tf.int32) # shape = [batch_size]
batch_size = tf.shape(batch_doc_sent_nums)[0]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
loss, logit = self.build_graph(batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_doc_id,
batch_size,
is_training)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(loss=loss,
init_lr=learning_rate,
num_train_steps=num_train_step,
num_warmup_steps=num_warmup_steps,
use_tpu=False)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
# predictions = logit
# accuracy = tf.metrics.accuracy(fnn_labels, predictions)
# eval_metrics = {
# "eval_accuracy": accuracy
# }
# tf.summary.scalar("eval_accuracy", accuracy)
# output_spec = tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# eval_metric_ops=eval_metrics
# )
pass
else:
predictions = {
"batch_scores": logit,
"batch_doc_id": batch_doc_id,
"batch_article_set": batch_article_set
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
"predictions": tf.estimator.export.PredictOutput(predictions)
}
)
return output_spec
return model_fn
@staticmethod
def eval_metric(result, handmark_scores, articles_set, articles_id):
"""
Args:
result:
handmark_scores:
articles_set:
articles_id:
Returns:
"""
predict_scores = {}
for item in result:
min_value = 1
max_value = 10
normalize_score = item["batch_scores"]
overall_score = round(normalize_score * (max_value - min_value) + min_value)
predict_scores[item["batch_doc_id"]] = overall_score
for key, value in predict_scores.items():
temp_key = key
if key > 100000:
temp_key -= 100000
article_set_id = articles_set[articles_id.index(temp_key)]
min_value = dataset_score_range[article_set_id][0]
max_value = dataset_score_range[article_set_id][1]
if key > 100000:
hs = min_value
else:
hs = handmark_scores[key]
temp_hs = round(((hs - min_value) / (max_value - min_value)) * 10)
print("id:{}, predict:{}, handmark:{}".format(key, value, temp_hs))
class PromptRelevantScore:
""" essay的coherence方面的评分,也称连贯性得分,后续的overall score是在其基础上进一步得到的
Attributes:
bert_emb_dim: 因为输入的句子是用bert进行encoded, 该属性记录了所用bert模型的输出维度
dropout_prob: dropout的概率
prompts_embedding: 用于训练的所有的题目的embedding, 也是使用bert进行encode
lstm_layers_num: lstm的层数
lstm_hidden_size: lstm的隐层单元的宽度
fnn_hidden_size: 输出接的fnn的每个隐层的宽度,
bidirectional: 标注是否使用双向模型
"""
def __init__(self):
self.bert_emb_dim = 768
self.dropout_prob = 0.5
self.lstm_hidden_size = 1024
self.lstm_layers_num = 1
self.fnn_hidden_size = []
self.bidirectional = False
def generate_asap_train_and_test_set(self, generate_type="shuffle_prompt"):
""" 生成asap的训练数据集合
Args:
generate_type: 以何种方式产生训练集和测试集,
shuffle_prompt: 表示从某个prompt下,选取一定的数据训练,
other_prompt: 表示利用所有别的prompt下的样本对该样本进行训练,一般不推荐此种方式,效果最差
shuffle_all: 表示shuffle所有的样本,然后抽样,为缺省方式
Returns:
articles_id: 文章的id
articles_set: 文章的所属的prompt的id集合
handmark_scores: 手工标注的分数
correspond_train_id_set: 分割的训练集id
correspond_test_id_set: 分割的测试集id
"""
articles_id, articles_set, set_ids, handmark_scores = read_asap_dataset()
np.random.seed(train_conf["random_seed"])
permutation_ids = np.random.permutation(set_ids[train_conf["prompt_id"]])
correspond_train_id_set = permutation_ids[
0:int(len(permutation_ids) * train_conf["train_set_prob"])]
correspond_test_id_set = permutation_ids[
int(len(permutation_ids) * train_conf["train_set_prob"]):]
correspond_train_id_set = list(correspond_train_id_set)
correspond_test_id_set = list(correspond_test_id_set)
other_ids = []
for i in range(1, 9):
if i == train_conf["prompt_id"]:
continue
else:
other_ids.extend(set_ids[i])
self.negative_samples = random.sample(other_ids, len(set_ids[train_conf["prompt_id"]]))
negative_samples_train = self.negative_samples[
0:int(len(permutation_ids) * train_conf["train_set_prob"])]
negative_samples_test = self.negative_samples[
int(len(permutation_ids) * train_conf["train_set_prob"]):]
correspond_train_id_set.extend(negative_samples_train)
correspond_test_id_set.extend(negative_samples_test)
return articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set
def build_graph(self,
batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_size,
prompt_encodes,
is_training):
""" 建立模型的图
Args:
batch_doc_encodes: 一个batch的文章的bert encoding的结果
batch_doc_sent_nums: 记录一个batch内每个doc的句数
batch_article_set: 记录一个batch内每个doc所属的类别,按prompt分
batch_domain1_score: 记录一个batch内每个doc的人工评分
batch_size: 将batch的大小放在图中,
prompt_encodes: 题目的标签
is_training: 标致是否为train的状态
Returns: loss 和 logits
"""
def normalize_value(score, min_value, max_value):
result = tf.div(tf.subtract(score, min_value), tf.to_float(tf.subtract(max_value, min_value)))
return result
prompt_encodes = prompt_encodes[0] # 因为一个batch内的sample的prompt_encodes都是一样的,我们取地0个就好了
p_shape = tf.shape(prompt_encodes)
prompt_encodes = tf.convert_to_tensor(prompt_encodes)
batch_index_o = tf.constant(0)
standard_batch_domain_score_o = tf.convert_to_tensor([])
batch_prompt_doc_encodes_o = tf.zeros([1, self.bert_emb_dim], dtype=tf.float32)
def cond(batch_index, normalized_score, batch_prompt_doc_encodes):
return tf.less(batch_index, batch_size)
def body(batch_index, normalized_score, batch_prompt_doc_encodes):
min_value = tf.convert_to_tensor([-1, 2, 1, 0, 0, 0, 0, 0, 0, 0], tf.float32)[batch_article_set[batch_index]]
max_value = tf.convert_to_tensor([-1, 12, 6, 3, 3, 4, 4, 30, 60, 9], tf.float32)[
batch_article_set[batch_index]]
temp_score = tf.cond(tf.equal(train_conf["prompt_id"], batch_article_set[batch_index]),
lambda: batch_domain1_score[batch_index],
lambda: min_value)
temp_score = normalize_value(tf.to_float(temp_score), min_value, max_value)
normalized_score = tf.concat([normalized_score, [temp_score]], axis=0)
temp_encodes = tf.concat([prompt_encodes, batch_doc_encodes[batch_index]], 0)
batch_prompt_doc_encodes = tf.concat([batch_prompt_doc_encodes, temp_encodes], 0)
return tf.add(batch_index, 1), normalized_score, batch_prompt_doc_encodes
_, standard_batch_domain_score, batch_prompt_doc_encodes = tf.while_loop(cond,
body,
[batch_index_o,
standard_batch_domain_score_o,
batch_prompt_doc_encodes_o],
shape_invariants=[
batch_index_o.get_shape(),
tf.TensorShape([None]),
tf.TensorShape(
[None, self.bert_emb_dim])])
batch_doc_sent_nums = tf.add(batch_doc_sent_nums, p_shape[0])
batch_prompt_doc_encodes = tf.reshape(batch_prompt_doc_encodes[1:], [batch_size, -1, self.bert_emb_dim])
fw_cell = create_rnn_cell(self.lstm_hidden_size,
self.dropout_prob,
self.lstm_layers_num,
self.bidirectional,
is_training)
output, states = tf.nn.dynamic_rnn(cell=fw_cell,
inputs=batch_prompt_doc_encodes,
sequence_length=batch_doc_sent_nums,
dtype=tf.float32)
last_state = states[0].h
for hs in self.fnn_hidden_size:
if is_training:
last_state = tf.nn.dropout(last_state, keep_prob=1 - self.dropout_prob)
last_state = tf.layers.dense(last_state,
hs,
activation=tf.nn.relu,
kernel_initializer=create_initializer()
)
if self.fnn_hidden_size:
x_dim = self.fnn_hidden_size[-1]
else:
x_dim = self.lstm_hidden_size
w = tf.get_variable(shape=[x_dim, 1],
initializer=create_initializer(),
name="weight",
dtype=tf.float32)
b = tf.get_variable(initializer=tf.zeros_initializer(),
shape=[1],
name="bias",
dtype=tf.float32)
if is_training:
last_state = tf.nn.dropout(last_state, keep_prob=1 - self.dropout_prob)
logit = tf.squeeze(tf.sigmoid(tf.matmul(last_state, w) + b))
loss = tf.losses.mean_squared_error(labels=standard_batch_domain_score, predictions=logit)
return loss, logit
def model_fn_builder(self, learning_rate, num_train_step, num_warmup_steps):
"""
Args:
learning_rate: 学习速率
num_train_step: 学习步数
num_warmup_steps: 预热的步数
Returns: 函数model_fn的函数句柄
"""
def model_fn(features, labels, mode, params):
batch_doc_encodes = tf.identity(features["doc_encodes"]) # shape = [batch_size, None, bert_dim]
batch_article_set = tf.cast(tf.identity(features["article_set"]), tf.int32) # shape = [batch_size]
batch_doc_sent_nums = tf.cast(tf.identity(features["doc_sent_num"]), tf.int32) # shape = [batch_size]
batch_domain1_score = tf.identity(features["domain1_score"]) # shape = [batch_size]
batch_doc_id = tf.cast(tf.identity(features["article_id"]), tf.int32) # shape = [batch_size]
prompt_encodes = tf.identity(features["prompt_encodes"]) # shape = [batch_size, None, bert_dim]
batch_size = tf.shape(batch_doc_sent_nums)[0]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
loss, logit = self.build_graph(batch_doc_encodes,
batch_doc_sent_nums,
batch_article_set,
batch_domain1_score,
batch_size,
prompt_encodes,
is_training)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(loss=loss,
init_lr=learning_rate,
num_train_steps=num_train_step,
num_warmup_steps=num_warmup_steps,
use_tpu=False)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
# predictions = logit
# accuracy = tf.metrics.accuracy(fnn_labels, predictions)
# eval_metrics = {
# "eval_accuracy": accuracy
# }
# tf.summary.scalar("eval_accuracy", accuracy)
# output_spec = tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# eval_metric_ops=eval_metrics
# )
pass
else:
predictions = {
"batch_scores": logit,
"batch_doc_id": batch_doc_id,
"batch_article_set": batch_article_set
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
"predictions": tf.estimator.export.PredictOutput(predictions)
}
)
return output_spec
return model_fn
def eval_metric(self, result, handmark_scores, articles_set, articles_id):
"""
Args:
result:
handmark_scores:
articles_set:
articles_id:
Returns:
"""
predict_scores = {}
for item in result:
min_value = 1
max_value = 10
normalize_score = item["batch_scores"]
overall_score = round(normalize_score * (max_value - min_value) + min_value)
predict_scores[item["batch_doc_id"]] = overall_score
for key, value in predict_scores.items():
article_set_id = articles_set[articles_id.index(key)]
min_value = dataset_score_range[article_set_id][0]
max_value = dataset_score_range[article_set_id][1]
# if key in self.negative_samples:
# hs = min_value
# else:
hs = handmark_scores[key]
temp_hs = round(((hs - min_value) / (max_value - min_value)) * 10)
print("id:{}, predict:{}, handmark:{}".format(key, value, temp_hs))
class scores:
def __init__(self,
bsp_estimator: tf.estimator.Estimator,
csp_estimator: tf.estimator.Estimator,
psp_estimator: tf.estimator.Estimator):
self.__bsp_estimator = bsp_estimator
self.__csp_estimator = csp_estimator
self.__psp_estimator = psp_estimator
def three_scores(self):
input_fn = input_fn_from_tfrecord(tfrecord_path=tfrecord_file_path,
batch_size=train_conf["predict_batch_size"],
is_training=False,
element_ids=articles_id)
bsp_result = self.__bsp_estimator.predict(input_fn)
psp_result = self.__psp_estimator.predict(input_fn)
csp_result = self.__csp_estimator.predict(input_fn)
basic_scores = {}
for item in bsp_result:
print("BS", item)
basic_scores[item["batch_doc_id"]] = item["batch_scores"]
promp_scores = {}
for item in psp_result:
promp_scores[item["batch_doc_id"]] = item["batch_scores"]
coher_scores = {}
for item in csp_result:
coher_scores[item["batch_doc_id"]] = item["batch_scores"]
return basic_scores, promp_scores, coher_scores
class OverallScorePredictor:
""" 融合deep semantic的特征和handcrafted的特征,所得到的最终的分数
Attributes:
__bsp_estimator: basic score 模型的estimator对象
__csp_estimator: coherence score 模型的estimator对象
__psp_estimator: prompt relevant score 模型的estimator对象
"""
def __init__(self,
bsp_estimator: tf.estimator.Estimator,
csp_estimator: tf.estimator.Estimator,
psp_estimator: tf.estimator.Estimator):
self.__bsp_estimator = bsp_estimator
self.__csp_estimator = csp_estimator
self.__psp_estimator = psp_estimator
def generate_adv_test(self):
articles_adv_id, set_adv_ids = read_adv_sample()
permutation_adv_ids = np.random.permutation(set_adv_ids[train_conf["prompt_id"]])
print(permutation_adv_ids)
correspond_adv_id_set = permutation_adv_ids
return articles_adv_id, set_adv_ids, correspond_adv_id_set
def generate_asap_train_and_test_set(self,
ps_generate_type="shuffle_prompt",
ns_generate_type="no_negative"):
""" 生成asap的训练数据集合
Args:
ns_generate_type: 以何种方式产生训练集和测试集的负样本,
no_negative: 表示没有负样本,此情况只适用于只考虑basic score 不考虑负样本的情况
prompt_irrelevant: 表示只产生与话题无关的负样本的id,
permuted: 表示只生产permuted essay 作为负样本,
both: 表示生产数量相等的prompt_irrelevant和permuted essay 作为负样本训练
ps_generate_type: 以何种方式产生训练集和测试集中的正样本,
shuffle_prompt: 表示从某个prompt下,选取一定的数据训练,
shuffle_all: 表示shuffle所有的样本,然后抽样,为缺省方式
Returns:
articles_id: 文章的id
articles_set: 文章的所属的prompt的id集合
handmark_scores: 手工标注的分数
correspond_train_id_set: 分割的训练集id
correspond_test_id_set: 分割的测试集id
"""
articles_id, articles_set, set_ids, handmark_scores = read_asap_dataset()
# print("handmark scores", handmark_scores)
# handmark_normalized_scores = {}
# for key, value in handmark_scores.items():
# article_set_id = articles_set[articles_id.index(key)]
# min_value = dataset_score_range[article_set_id][0]
# max_value = dataset_score_range[article_set_id][1]
# print(max_value, min_value)
# normalize_value = (value - min_value) / (max_value - min_value)
# handmark_normalized_scores[key] = normalize_value
# print(handmark_normalized_scores)
np.random.seed(train_conf["random_seed"])
self.__ns_from_other_prompt_train = []
self.__ns_from_other_prompt_test = []
self.__ns_from_permuted_train = []
self.__ns_from_permuted_test = []
if ps_generate_type == "shuffle_all":
# 将所有set的数据混合打散,取80%进行训练,剩下测试
permutation_ids = np.random.permutation(articles_id)
correspond_test_id_set = permutation_ids[int(len(articles_id) * train_conf["train_set_prob"]):]
correspond_train_id_set = permutation_ids[:int(len(articles_id) * train_conf["train_set_prob"])]
elif ps_generate_type == "shuffle_prompt":
permutation_ids = np.random.permutation(set_ids[train_conf["prompt_id"]])
correspond_train_id_set = permutation_ids[
0:int(len(permutation_ids) * train_conf["train_set_prob"])]
correspond_test_id_set = permutation_ids[
int(len(permutation_ids) * train_conf["train_set_prob"]):]
else:
raise ValueError("generate_type must be choose in ('shuffle_prompt', 'shuffle_all')")
correspond_train_id_set = list(correspond_train_id_set)
correspond_test_id_set = list(correspond_test_id_set)
print("################")
print(correspond_test_id_set)
other_ids = []
for i in range(1, 9):
if i == train_conf["prompt_id"]:
continue
else:
other_ids.extend(set_ids[i])
self.__ns_from_permuted_train = [item + 100000 for item in correspond_train_id_set]
self.__ns_from_permuted_test = [item + 100000 for item in correspond_test_id_set]
# negative_samples_from_other_prompt = random.sample(other_ids, len(set_ids[train_conf["prompt_id"]]))
# self.__ns_from_other_prompt_train = negative_samples_from_other_prompt[
# 0:int(len(negative_samples_from_other_prompt) * train_conf["train_set_prob"])]
# self.__ns_from_other_prompt_test = negative_samples_from_other_prompt[
# int(len(negative_samples_from_other_prompt) * train_conf["train_set_prob"]):]
if ns_generate_type == "permuted":
# 在每个set内80%用来训练,20%用来测试
correspond_train_id_set.extend(self.__ns_from_permuted_train)
correspond_test_id_set.extend(self.__ns_from_permuted_test)
articles_id.extend(self.__ns_from_permuted_train)
articles_id.extend(self.__ns_from_permuted_test)
elif ns_generate_type == "prompt_irrelevant":
correspond_train_id_set.extend(self.__ns_from_other_prompt_train)
correspond_test_id_set.extend(self.__ns_from_other_prompt_test)
elif ns_generate_type == "both":
correspond_train_id_set.extend(self.__ns_from_permuted_train)
correspond_train_id_set.extend(self.__ns_from_other_prompt_train)
correspond_test_id_set.extend(self.__ns_from_permuted_test[0:int(len(self.__ns_from_permuted_test) / 2)])
correspond_test_id_set.extend(
self.__ns_from_other_prompt_test[0:int(len(self.__ns_from_other_prompt_test) / 2)])
articles_id.extend(self.__ns_from_permuted_train)
articles_id.extend(self.__ns_from_permuted_test)
elif ns_generate_type == "no_negative":
pass
else:
raise ValueError("generate_type must be choose in ('shuffle_prompt','other_prompt','shuffle_all')")
return articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set
def train(self,
articles_id,
correspond_train_id_set,
correspond_test_id_set,
correspond_adv_id_set,
tfrecord_file_path,
xgboost_train_file_path,
xgboost_adv_file_path,
saved_model_dir):
print("XGBOOST TRAIN FILE PATH", xgboost_train_file_path)
input_fn = input_fn_from_tfrecord(tfrecord_path=tfrecord_file_path,
batch_size=train_conf["predict_batch_size"],
is_training=False,
element_ids=articles_id)
bsp_result = self.__bsp_estimator.predict(input_fn)
psp_result = self.__psp_estimator.predict(input_fn)
csp_result = self.__csp_estimator.predict(input_fn)
# print("BSP", len(bsp_result))
# normalized_scores
# counter=0
# for item2 in bsp_result:
# counter+=1
# print("counter", counter)
basic_scores = {}
for item in bsp_result:
print("BS", item)
basic_scores[item["batch_doc_id"]] = item["batch_scores"]
promp_scores = {}
for item in psp_result:
promp_scores[item["batch_doc_id"]] = item["batch_scores"]
coher_scores = {}
for item in csp_result:
coher_scores[item["batch_doc_id"]] = item["batch_scores"]
print(len(basic_scores), len(coher_scores), len(promp_scores))
features = np.load(xgboost_train_file_path, allow_pickle=True)["features"][()]
features_adv = np.load(xgboost_adv_file_path, allow_pickle=True)["features"][()]
print(len(features))
print("DONE XGBOOST_TRAIN_FILE_PATH LOADED")
articles_id, articles_set, set_ids, handmark_scores = read_asap_dataset()
handmark_normalized_scores = {}
for key, value in handmark_scores.items():
article_set_id = articles_set[articles_id.index(key)]
min_value = dataset_score_range[article_set_id][0]
max_value = dataset_score_range[article_set_id][1]
normalize_value = (value - min_value) / (max_value - min_value)
handmark_normalized_scores[key] = normalize_value
# print(value, normalize_value)
# print(handmark_normalized_scores)
train_features = []
train_handmark_normalized_scores = []
for i in correspond_train_id_set:
# print("DONE HERE", i)
# if i in features.keys():
# print("OKAY1")
# if i - 100000 in features.keys():
# print("OKAY2")
# if i in basic_scores:
# print("OKAY3")
# if i in promp_scores:
# print("OKAY4")
# if i in coher_scores:
# print("OKAY5")
if (i in features.keys() or (
i - 100000) in features.keys()) or i in basic_scores and i in promp_scores and i in coher_scores:
# print("DONE1")
temp_i = i
if temp_i > 100000:
temp_i -= 100000
temp_i = str(temp_i)
# for ft in features[temp_i]:
# temp_features = ft
# print(temp_features)
temp_features = features[temp_i]
# print(type(temp_features))
# temp_features.tolist()
# basic_scores.tolist()
# print(basic_scores[i], type(basic_scores[i]), type(basic_scores))
temp_features = np.append(temp_features, basic_scores[i])
temp_features = np.append(temp_features, coher_scores[i])
temp_features = np.append(temp_features, promp_scores[i])
# temp_features.append(basic_scores[i])
# temp_features.append(coher_scores[i])
# temp_features.append(promp_scores[i])
# temp_features.append(1.0)
# temp_features.append(1.0)
# print("TF", temp_features.shape)
temp_features.tolist()
train_features.append(temp_features)
if i in self.__ns_from_other_prompt_train or i in self.__ns_from_permuted_train:
train_handmark_normalized_scores.append(0)
else:
# i = str(i)
train_handmark_normalized_scores.append(handmark_normalized_scores[i])
# print("train_features", train_features)
# print("test_handmark_normalized_scores", handmark_normalized_scores.keys())
print("HERE", len(correspond_test_id_set), correspond_test_id_set)
# apple = ekjfnqwlkdn
test_features = []
test_handmark_normalized_scores = []
for i in correspond_test_id_set:
# print("DONE HERE2", i)
# print(i)
if (i in features.keys() or (
i - 100000) in features.keys()) or i in basic_scores and i in promp_scores and i in coher_scores:
# print("DONE2")
temp_i = i
if temp_i > 100000:
temp_i = temp_i - 100000
print("bigger", temp_i)
temp_i = str(temp_i)
temp_features = features[temp_i]
temp_features = np.append(temp_features, basic_scores[i])
temp_features = np.append(temp_features, coher_scores[i])
temp_features = np.append(temp_features, promp_scores[i])
# temp_features.append(basic_scores[i])
# temp_features.append(coher_scores[i])
# temp_features.append(promp_scores[i])
# temp_features.append(1.0)
# temp_features.append(1.0)
test_features.append(temp_features)
if i in self.__ns_from_other_prompt_train or i in self.__ns_from_permuted_train:
test_handmark_normalized_scores.append(0)
else:
test_handmark_normalized_scores.append(handmark_normalized_scores[i])
# i = str(i)
# if i == handmark_normalized_scores.keys():
# print("here", /handmark_normalized_scores[i])
# test_handmark_normalized_scores.append(handmark_normalized_scores[i])
# else:
# continue
# i = str(i)
# test_handmark_normalized_scores.append(handmark_normalized_scores[i])
xgb_rg = xgb.XGBRegressor(n_estimators=5000, learning_rate=0.001, max_depth=6, gamma=0.05,
objective="reg:logistic")
train_features = np.array(train_features)
train_handmark_normalized_scores = np.array(train_handmark_normalized_scores)
print("train norm scores", len(train_handmark_normalized_scores), len(train_features))
test_features = np.array(test_features)
# print("test norm scores", test_handmark_normalized_scores)
# print("test norm scores", type(test_handmark_normalized_scores))
test_handmark_normalized_scores = np.array(test_handmark_normalized_scores)
print("test norm scores", len(test_handmark_normalized_scores), len(test_features))
# print("DONEEEE", train_features, type(train_handmark_normalized_scores))
# train_features = np.reshape(train_features,(train_features.size, 1))
print(train_features.shape)
print(train_handmark_normalized_scores.shape)
# test_features = np.reshape(test_features,(test_features.size, 1))
print(test_features.shape)
print(test_handmark_normalized_scores.shape)
# train_features = train_features.reshape(-1, 1)
# test_features = test_features.reshape(-1, 1)
xgb_rg.fit(train_features,
train_handmark_normalized_scores,
eval_set=[(test_features, test_handmark_normalized_scores)],
early_stopping_rounds=100,
verbose=True)
# xgb_rg.save_model(os.path.join(saved_model_dir, "osp3.xgboost"))
xgb_rg.load_model(os.path.join(saved_model_dir, "osp3.xgboost"))
print("MAX AND MIN", max_value, min_value)
print("ADV", features_adv)
test_features_adv = []
# test_handmark_normalized_scores = []
for i in correspond_adv_id_set:
# print("DONE HERE2", i)
# print(i)
if (i in features_adv.keys() or (
i - 100000) in features_adv.keys()) or i in basic_scores and i in promp_scores and i in coher_scores:
# print("DONE2")
temp_i = i
if temp_i > 100000:
temp_i = temp_i - 100000
print("bigger", temp_i)
temp_i = str(temp_i)
temp_features_adv = features_adv[temp_i]
temp_features_adv = np.append(temp_features_adv, basic_scores[i])
temp_features_adv = np.append(temp_features_adv, coher_scores[i])
temp_features_adv = np.append(temp_features_adv, promp_scores[i])
# temp_features.append(basic_scores[i])
# temp_features.append(coher_scores[i])
# temp_features.append(promp_scores[i])
# temp_features.append(1.0)
# temp_features.append(1.0)
test_features_adv.append(temp_features_adv)
pred_scores = xgb_rg.predict(test_features_adv)
print("pred_scores", pred_scores)
# print("train norm scores", train_handmark_normalized_scores)
# print("test norm scores", test_handmark_normalized_scores)
test_predict_scores = []
# test_handmark_scores = [round(item * (10)+2) for item in test_handmark_normalized_scores]
for i in range(len(correspond_test_id_set)):
min_value = 2
max_value = 12
overall_score = round(pred_scores[i] * (max_value - min_value) + min_value)
test_predict_scores.append(overall_score)
print("id:{}, basic:{}, coher:{}, prompt:{}, predict:{}".format(correspond_test_id_set[i],
basic_scores[
correspond_test_id_set[
i]] * 10,
coher_scores[
correspond_test_id_set[
i]] * 10,
promp_scores[
correspond_test_id_set[
i]] * 10,
overall_score))
# test_predict_scores.tolist()
print(type(test_predict_scores), test_predict_scores)
# opening the csv file in 'w+' mode
file = open('prompt1_adv_predict.csv', 'w+', newline ='')
# writing the data into the file
with file:
write = csv.writer(file)
write.writerows(map(lambda x: [x], test_predict_scores))
print("save to csv")
# test_handmark_scores = np.asarray(test_handmark_scores, dtype=np.int32)
# test_predict_scores = np.asarray(test_predict_scores, dtype=np.int32)
# print(handmark_normalized_scores)
# print(len(handmark_normalized_scores))
# print(test_predict_scores)
# print(test_handmark_scores)
# qwk = quadratic_weighted_kappa(test_predict_scores, test_handmark_scores)
# print("##############qwk value is {}".format(qwk))
def eval_metric(self, result, handmark_scores, articles_set, articles_id):
pass
def train(estm, train_file_path, correspond_train_id_set, saved_model_dir):
train_set_length = len(correspond_train_id_set)
print("DONE 1 train_set_length", train_set_length)
num_train_steps = int((train_set_length * train_conf["num_train_epochs"]) / train_conf["train_batch_size"])
print("DONE 2 num_train_steps", num_train_steps)
input_fn = input_fn_from_tfrecord(tfrecord_path=train_file_path,
batch_size=train_conf["train_batch_size"],
is_training=True,
element_ids=correspond_train_id_set)
estm.train(input_fn=input_fn, steps=num_train_steps)
estm.export_saved_model(saved_model_dir, serving_input_receiver_fn())
def test(estm, test_file_path, correspond_test_id_set, handmark_scores, articles_set, articles_id, sp):
input_fn = input_fn_from_tfrecord(tfrecord_path=test_file_path,
batch_size=train_conf["predict_batch_size"],
is_training=False,
element_ids=correspond_test_id_set)
predict_result = estm.predict(input_fn)
print(predict_result)
sp.eval_metric(result=predict_result,
handmark_scores=handmark_scores,
articles_set=articles_set,
articles_id=articles_id)
def generate_tf_estimator(model_dir, sp, num_train_steps=-1, num_warmup_steps=-1):
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
save_checkpoints_steps=train_conf["save_checkpoints_step"],
save_summary_steps=20
)
model_fn = sp.model_fn_builder(
learning_rate=train_conf["learning_rate"],
num_train_step=num_train_steps,
num_warmup_steps=num_warmup_steps
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config
)
return estimator
def main():
parser = argparse.ArgumentParser(description="四种score的训练以及公共数据集测试")
parser.add_argument("-model", help="训练模型的类别")
args = parser.parse_args()
if args.model == "bsp":
model_dir = sys_conf["bsp_output_dir"]
sp = BasicScorePredictor()
elif args.model == "csp":
model_dir = sys_conf["csp_output_dir"]
sp = CoherenceScore()
elif args.model == "psp":
model_dir = sys_conf["psp_output_dir"]
sp = PromptRelevantScore()
elif args.model == "osp":
bsp = BasicScorePredictor()
bsp_model_dir = sys_conf["bsp_output_dir"]
bsp_estimator = generate_tf_estimator(bsp_model_dir, bsp)
csp = CoherenceScore()
csp_model_dir = sys_conf["csp_output_dir"]
csp_estimator = generate_tf_estimator(csp_model_dir, csp)
psp = PromptRelevantScore()
psp_model_dir = sys_conf["psp_output_dir"]
psp_estimator = generate_tf_estimator(psp_model_dir, psp)
osp = OverallScorePredictor(bsp_estimator, csp_estimator, psp_estimator)
else:
raise ValueError("model need to be chosen from bsp, csp, psp and osp")
if args.model == "osp":
model_dir = sys_conf["osp_output_dir"]
tfrecord_file_path = os.path.join(sys_conf["data_dir"], "asap_dataset_prompt.tfrecord")
xgboost_train_file_path = os.path.join(sys_conf["data_dir"], "asap_xgboost_prompt.npz")
xgboost_adv_file_path = os.path.join(sys_conf["data_dir"], "asap_xgboost_adv.npz")
if not (os.path.exists(tfrecord_file_path) or os.path.exists(xgboost_train_file_path)):
raise ValueError("tfrecord file path or xgboost train file path is invalid.")
articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set = \
osp.generate_asap_train_and_test_set()
articles_id_adv, set_adv_ids, correspond_adv_id_set = osp.generate_adv_test()
# articles_id = np.array(art_id)
print(type(articles_id), len(articles_id))
print(type(articles_set))
print(type(handmark_scores))
print(type(correspond_train_id_set), len(correspond_train_id_set))
print(type(correspond_test_id_set), len(correspond_test_id_set))
osp.train(articles_id, correspond_train_id_set, correspond_test_id_set, correspond_adv_id_set, tfrecord_file_path, xgboost_train_file_path, xgboost_adv_file_path, model_dir)
else:
# saved_model_dir = model_dir
saved_model_dir = os.path.join(model_dir, "SavedModel")
articles_id, articles_set, handmark_scores, correspond_train_id_set, correspond_test_id_set = sp.generate_asap_train_and_test_set()
train_set_length = len(correspond_train_id_set)
num_train_steps = int((train_set_length * train_conf["num_train_epochs"]) / train_conf["train_batch_size"])
num_warmup_steps = int(num_train_steps * train_conf["warmup_proportion"])
estimator = generate_tf_estimator(model_dir, sp, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps)
if train_conf["do_train"]:
train_file_path = os.path.join(sys_conf["data_dir"], "asap_dataset_prompt.tfrecord")
if not os.path.exists(train_file_path):
raise ValueError("train_file_path is invalid.")
print("DONE 1 estimator", estimator)
print("DONE 2 train_file_path", train_file_path)
print("DONE 3 correspond_train_id_set", correspond_train_id_set)
print("saved_model_dir", saved_model_dir)
train(estimator, train_file_path, correspond_train_id_set, saved_model_dir)
if train_conf["do_predict"]:
test_file_path = os.path.join(sys_conf["data_dir"], "asap_dataset_prompt.tfrecord")
if not os.path.exists(train_file_path):
raise ValueError("train_file_path is invalid.")
test(estimator, test_file_path, correspond_test_id_set, handmark_scores, articles_set, articles_id, sp)
if __name__ == "__main__":
main()
|
from alpine import APIClient
from alpine.exception import *
from alpine.workspace import *
from .alpineunittest import AlpineTestCase
class TestWorkspace(AlpineTestCase):
def setUp(self):
super(TestWorkspace, self).setUp()
# Creating Alpine Client in setUp Function for tests
global alpine_client
global login_info
alpine_client = APIClient(self.host, self.port)
login_info = alpine_client.login(self.username, self.password)
def test_create_new_workspace(self):
try:
workspace_id = alpine_client.workspace.get_id("test_workspace1")
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name="test_workspace1", public=True, summary="Summary")
self.assertEqual(workspace_info['name'],"test_workspace1")
self.assertEqual(workspace_info['public'], True)
self.assertEqual(workspace_info['summary'], "Summary")
def test_get_workspace_details(self):
try:
workspace_id = alpine_client.workspace.get_id("test_workspace2")
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info_created = alpine_client.workspace.create(workspace_name="test_workspace2")
workspace_info = alpine_client.workspace.get(workspace_info_created['id'])
self.assertEqual(workspace_info_created, workspace_info)
def test_get_workspace_id(self):
try:
workspace_id = alpine_client.workspace.get_id("test_workspace3")
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info_created = alpine_client.workspace.create(workspace_name="test_workspace3")
workspace_id = alpine_client.workspace.get_id(workspace_name="test_workspace3")
self.assertEqual(workspace_id, workspace_info_created['id'])
def test_get_member_list_for_workspace(self):
try:
workspace_id = alpine_client.workspace.get_id("test_workspace4")
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info_created = alpine_client.workspace.create(workspace_name="test_workspace4")
member_list = alpine_client.workspace.member.get_list(workspace_info_created['id'])
self.assertEqual(workspace_info_created["members_count"], member_list.__len__())
fail = True
for member in member_list:
if member['username'] == self.username:
fail = False
pass
if fail:
self.fail("failed to find owner {0} in member_list {1}".format(self.username, member_list))
def test_get_workspaces_list(self):
# user_id = alpine_client.user.get_id(self.username)
# workspace_list1 = alpine_client.workspace.get_list(active=True, user_id=user_id, per_page=10)
# workspace_list2 = alpine_client.workspace.get_list(active=True, user_id=user_id, per_page=100)
user_id = alpine_client.user.get_id(self.username)
workspace_list1 = alpine_client.workspace.get_list(active=True, user_id=user_id, per_page=10)
workspace_list2 = alpine_client.workspace.get_list(active=True, user_id=user_id, per_page=100)
self.assertEqual(workspace_list1, workspace_list2)
workspace_list_all = alpine_client.workspace.get_list(active=True, per_page=10)
workspace_number= 0
for ws in workspace_list_all:
member_list = alpine_client.workspace.member.get_list(ws['id'])
contain_member = False
for member in member_list:
if member['username'] == self.username:
contain_member = True
print(ws['name'])
break
if contain_member:
workspace_number = workspace_number + 1
self.assertEqual(workspace_number, workspace_list1.__len__())
def test_update_workspace_details(self):
test_workspace_name = "test_workspace0"
test_workspace_summary1 = "Summary 1"
test_workspace_summary2 = "Summary 2"
test_workspace_is_public1 = False
test_workspace_is_public2 = True
test_workspace_is_public1 = False
test_workspace_is_public2 = True
test_workspace_stage2 = 2
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=test_workspace_is_public1,
summary=test_workspace_summary1)
workspace_info = alpine_client.workspace.update(workspace_info['id'], test_workspace_is_public2,
is_active=True, summary=test_workspace_summary2,
stage=test_workspace_stage2)
self.assertEqual(workspace_info['summary'], test_workspace_summary2)
self.assertEqual(workspace_info['public'], test_workspace_is_public2)
self.assertEqual(workspace_info['workspace_stage']['id'], test_workspace_stage2)
def test_update_workspace_membership(self):
test_workspace_name = "test_workspace0"
new_role = "Business Analyst"
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
user_id = alpine_client.user.get_id(self.username)
member_list = alpine_client.workspace.member.update_role(workspace_info['id'], user_id, new_role)
fail = True
for member in member_list:
if member['username'] == self.username:
if member['role'] == new_role:
fail = False
break
if fail:
self.fail("User {0} Role is not update to {1}".format(self.username, member_list))
alpine_client.workspace.delete(workspace_info['id'])
def test_add_workspace_member(self):
test_workspace_name = "test_workspace0"
new_role = alpine_client.workspace.memberRole.BusinessOwner
try:
user_id = alpine_client.user.get_id("test_user1")
alpine_client.user.delete(user_id)
except UserNotFoundException:
pass
user_info = alpine_client.user.create("test_user1", "password", "firstName", "lastName", "testuser1@alpine.test",
"QA", "Developement")
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
member_list = alpine_client.workspace.member.add(workspace_info['id'], user_info['id'], alpine_client.workspace.memberRole.BusinessOwner)
fail = True
for member in member_list:
if member['username'] == "test_user1":
if member['role'] == new_role:
fail = False
break
if fail:
self.fail("User {0} Role is not update to {1}".format(self.username, member_list))
alpine_client.workspace.delete(workspace_info['id'])
def test_update_workspace_stage(self):
test_workspace_name = "test_workspace1"
# stages = ["Define", "Transform", "Model", "Deploy", "Act"]
stages = alpine_client.workspace.stage
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
for stage in stages:
workspace_info = alpine_client.workspace.update(workspace_info['id'], stage=stage)
self.assertEqual(workspace_info['workspace_stage']['id'], stage)
def test_update_workspace_name(self):
test_workspace_name = "test_workspace1"
test_workspace_name_new = "test_workspace1_new"
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name_new)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
workspace_info = alpine_client.workspace.update(workspace_info['id'], name=test_workspace_name_new)
self.assertEqual(workspace_info['name'], test_workspace_name_new)
alpine_client.workspace.delete(workspace_info['id'])
def test_update_workspace_owner(self):
test_workspace_name = "test_workspace0"
new_user = "new_user1"
try:
user_id = alpine_client.user.get_id(new_user)
alpine_client.user.delete(user_id)
except UserNotFoundException:
pass
new_user_info = alpine_client.user.create(new_user, "password", "firstName", "lastName", "testuser1@alpine.test",
"QA", "Developement")
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
alpine_client.workspace.member.add(workspace_info['id'], new_user_info['id'], "Data Engineer")
workspace_info = alpine_client.workspace.update(workspace_info['id'], owner_id=new_user_info['id'])
self.assertEqual(workspace_info['owner'], new_user_info)
alpine_client.workspace.delete(workspace_info['id'])
alpine_client.user.delete(new_user_info['id'])
def test_update_workspace_owner_not_a_member(self):
test_workspace_name = "test_workspace0"
new_user = "new_user1"
try:
user_id = alpine_client.user.get_id(new_user)
alpine_client.user.delete(user_id)
except UserNotFoundException:
pass
new_user_info = alpine_client.user.create(new_user, "password", "firstName", "lastName", "testuser1@alpine.test",
"QA", "Developement")
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
try:
workspace_info = alpine_client.workspace.update(workspace_info['id'], owner_id=new_user_info['id'])
except WorkspaceMemberNotFoundException:
pass
alpine_client.workspace.delete(workspace_info['id'])
alpine_client.user.delete(new_user_info['id'])
def test_update_workspace_privacy(self):
test_workspace_name = "test_workspace1"
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
self.assertEqual(workspace_info['public'], True)
for public in [False, True]:
workspace_info = alpine_client.workspace.update(workspace_info['id'], is_public=public)
self.assertEqual(workspace_info['public'], public)
def test_update_workspace_status(self):
test_workspace_name = "test_workspace1"
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
self.assertEqual(workspace_info['archived'], False)
for is_active in [True, False]:
workspace_info = alpine_client.workspace.update(workspace_info['id'], is_active = is_active)
self.assertEqual(workspace_info['archived'], not is_active)
def test_delete_workspace(self):
test_workspace_name = "test_workspace0"
try:
workspace_id = alpine_client.workspace.get_id(test_workspace_name)
alpine_client.workspace.delete(workspace_id)
except WorkspaceNotFoundException:
pass
workspace_info = alpine_client.workspace.create(workspace_name=test_workspace_name, public=True,
summary="Summary")
alpine_client.workspace.delete(workspace_info['id'])
# Verify the alpine_client.workspace is successfully deleted
try:
alpine_client.workspace.get(workspace_info['id'])
except WorkspaceNotFoundException:
pass
else:
self.fail("Failed to Delete the alpine_client.workspace {0}".format(test_workspace_name))
alpine_client.workspace.delete(workspace_info['id'])
|
# -*- coding: utf-8 -*-
from .zappa_async import task_sns
import logging
logger = logging.getLogger(__name__)
@task_sns
def process(message):
pass
|
# encoding=utf-8
import logging
import pytest
import requests
from brunns.matchers.data import json_matching
from brunns.matchers.html import has_title
from brunns.matchers.object import between
from brunns.matchers.response import is_response
from contexttimer import Timer
from hamcrest import assert_that, contains_string, has_entries, has_entry
from mbtest.imposters import Imposter, Predicate, Proxy, Stub
from mbtest.imposters.responses import PredicateGenerator
from mbtest.matchers import had_request
from tests.utils.network import internet_connection
logger = logging.getLogger(__name__)
INTERNET_CONNECTED = internet_connection()
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy(mock_server):
imposter = Imposter(Stub(responses=Proxy(to="http://example.com")))
with mock_server(imposter):
response = requests.get(imposter.url)
assert_that(
response, is_response().with_status_code(200).and_body(has_title("Example Domain"))
)
assert_that(imposter, had_request().with_path("/").and_method("GET"))
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy_playback(mock_server):
proxy_imposter = Imposter(Stub(responses=Proxy(to="https://httpbin.org", mode=Proxy.Mode.ONCE)))
with mock_server(proxy_imposter):
response = requests.get(proxy_imposter.url / "status/418")
assert_that(
response, is_response().with_status_code(418).and_body(contains_string("teapot"))
)
response = requests.get(proxy_imposter.url / "status/200")
assert_that(
response, is_response().with_status_code(418).and_body(contains_string("teapot"))
)
recorded_stubs = proxy_imposter.playback()
playback_impostor = Imposter(recorded_stubs)
with mock_server(playback_impostor):
response = requests.get(playback_impostor.url)
assert_that(
response, is_response().with_status_code(418).and_body(contains_string("teapot"))
)
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy_uses_path_predicate_generator(mock_server):
proxy_imposter = Imposter(
Stub(
responses=Proxy(
to="https://httpbin.org",
mode=Proxy.Mode.ONCE,
predicate_generators=[PredicateGenerator(path=True)],
)
)
)
with mock_server(proxy_imposter):
response = requests.get(proxy_imposter.url / "status/418")
assert_that(
response, is_response().with_status_code(418).and_body(contains_string("teapot"))
)
response = requests.get(proxy_imposter.url / "status/200")
assert_that(response, is_response().with_status_code(200))
recorded_stubs = proxy_imposter.playback()
playback_impostor = Imposter(recorded_stubs)
with mock_server(playback_impostor):
response = requests.get(playback_impostor.url / "status/418")
assert_that(
response, is_response().with_status_code(418).and_body(contains_string("teapot"))
)
response = requests.get(playback_impostor.url / "status/200")
assert_that(response, is_response().with_status_code(200))
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy_uses_query_predicate_generator(mock_server):
proxy_imposter = Imposter(
Stub(
responses=Proxy(
to="https://httpbin.org",
mode=Proxy.Mode.ONCE,
predicate_generators=[PredicateGenerator(query=True)],
)
)
)
with mock_server(proxy_imposter):
response = requests.get(proxy_imposter.url / "get", params={"foo": "bar"})
assert_that(
response,
is_response().with_body(json_matching(has_entries(args=has_entries(foo="bar")))),
)
response = requests.get(proxy_imposter.url / "get", params={"foo": "baz"})
assert_that(
response,
is_response().with_body(json_matching(has_entries(args=has_entries(foo="baz")))),
)
recorded_stubs = proxy_imposter.playback()
playback_impostor = Imposter(recorded_stubs)
with mock_server(playback_impostor):
response = requests.get(playback_impostor.url / "get", params={"foo": "bar"})
assert_that(
response,
is_response().with_body(json_matching(has_entries(args=has_entries(foo="bar")))),
)
response = requests.get(playback_impostor.url / "get", params={"foo": "baz"})
assert_that(
response,
is_response().with_body(json_matching(has_entries(args=has_entries(foo="baz")))),
)
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy_uses_query_predicate_generator_with_key(mock_server):
proxy_imposter = Imposter(
Stub(
responses=Proxy(
to="https://httpbin.org",
mode=Proxy.Mode.ONCE,
predicate_generators=[PredicateGenerator(query={"foo": "whatever"})],
)
)
)
with mock_server(proxy_imposter):
response = requests.get(proxy_imposter.url / "get", params={"foo": "bar", "quxx": "buzz"})
assert_that(
response,
is_response().with_body(
json_matching(has_entries(args=has_entries(foo="bar", quxx="buzz")))
),
)
response = requests.get(proxy_imposter.url / "get", params={"foo": "baz", "quxx": "buxx"})
assert_that(
response,
is_response().with_body(json_matching(has_entries(args=has_entries(foo="baz")))),
)
recorded_stubs = proxy_imposter.playback()
playback_impostor = Imposter(recorded_stubs)
with mock_server(playback_impostor):
response = requests.get(
playback_impostor.url / "get", params={"foo": "bar", "quxx": "whatever"}
)
assert_that(
response,
is_response().with_body(
json_matching(has_entries(args=has_entries(foo="bar", quxx="buzz")))
),
)
response = requests.get(
playback_impostor.url / "get", params={"foo": "baz", "quxx": "anything"}
)
assert_that(
response,
is_response().with_body(
json_matching(has_entries(args=has_entries(foo="baz", quxx="buxx")))
),
)
@pytest.mark.skipif(not INTERNET_CONNECTED, reason="No internet connection.")
def test_proxy_without_stub(mock_server):
imposter = Imposter(Proxy(to="http://example.com"))
with mock_server(imposter):
response = requests.get(imposter.url)
assert_that(
response, is_response().with_status_code(200).and_body(has_title("Example Domain"))
)
def test_proxy_delay(mock_server):
target_imposter = Imposter(Stub(Predicate(path="/test")))
with mock_server(target_imposter) as server:
proxy_imposter = Imposter(Stub(responses=Proxy(to=target_imposter.url, wait=100)))
server.add_imposters(proxy_imposter)
with Timer() as timer:
requests.get(proxy_imposter.url / "test")
assert_that(timer.elapsed, between(0.1, 0.5))
def test_inject_headers(mock_server):
target_imposter = Imposter(Stub(Predicate(path="/test")))
with mock_server(target_imposter) as server:
proxy_imposter = Imposter(
Stub(
responses=Proxy(
to=target_imposter.url,
inject_headers={"X-Clacks-Overhead": "GNU Terry Pratchett"},
)
)
)
server.add_imposters(proxy_imposter)
requests.get(proxy_imposter.url / "test")
assert_that(
target_imposter,
had_request()
.with_path("/test")
.and_headers(has_entry("X-Clacks-Overhead", "GNU Terry Pratchett")),
)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
from AlexRobotics.dynamic import Hybrid_Manipulator as HM
from AlexRobotics.planning import RandomTree as RPRT
from AlexRobotics.control import RminComputedTorque as RminCTC
import numpy as np
import matplotlib.pyplot as plt
R = HM.HybridTwoLinkManipulator()
R.ubar = np.array([0,0,3])
x_start = np.array([3,0,0,0])
x_goal = np.array([0,0,0,0])
RRT = RPRT.RRT( R , x_start )
T = 5
#
RRT.U = np.array([[T,0,0],[0,0,0],[-T,0,0],[0,T,0],[0,-T,0],[T,T,0],[-T,-T,0],[-T,T,0],[T,-T,0],
[T,0,1],[0,0,1],[-T,0,1],[0,T,1],[0,-T,1],[T,T,1],[-T,-T,1],[-T,T,1],[T,-T,1],
[T,0,2],[0,0,2],[-T,0,2],[0,T,2],[0,-T,2],[T,T,2],[-T,-T,2],[-T,T,2],[T,-T,2],
[T,0,3],[0,0,3],[-T,0,3],[0,T,3],[0,-T,3],[T,T,3],[-T,-T,3],[-T,T,3],[T,-T,3]])
#RRT.U = np.array([[T,0,1],[0,0,1],[-T,0,1],[0,T,1],[0,-T,1],[T,T,1],[-T,-T,1],[-T,T,1],[T,-T,1]])
#RRT.U = np.array([[T,0,2],[0,0,2],[-T,0,2],[0,T,2],[0,-T,2],[T,T,2],[-T,-T,2],[-T,T,2],[T,-T,2]])
#RRT.U = np.array([[T,0,3],[0,0,3],[-T,0,3],[0,T,3],[0,-T,3],[T,T,3],[-T,-T,3],[-T,T,3],[T,-T,3]])
#RRT.U = np.array([[T,0,0],[0,0,0],[-T,0,0],[0,T,0],[0,-T,0],[T,T,0],[-T,-T,0],[-T,T,0],[T,-T,0]])
#RRT.U = np.array([[0,T,0],[0,0,0],[0,-T,0]])
RRT.dt = 0.2
RRT.goal_radius = 1.0
RRT.max_nodes = 12000
RRT.max_solution_time = 12
#RRT.compute_steps(1000,True)
RRT.find_path_to_goal( x_goal )
# Assign controller
# Assign controller
CTC_controller = RminCTC.RminSlidingModeController( R )
CTC_controller.load_trajectory( RRT.solution )
R.ctl = CTC_controller.ctl
CTC_controller.lam = 1.0
CTC_controller.D = 10.0
CTC_controller.traj_ref_pts = 'closest'
#CTC_controller.traj_ref_pts = 'interpol'
CTC_controller.hysteresis = True
CTC_controller.min_delay = 0.5
# Plot
tf = RRT.time_to_goal + 5
n = int( np.round( tf / 0.05 ) ) + 1
R.plotAnimation( x_start , tf , n , solver = 'euler' )
R.Sim.plot_CL('x')
R.Sim.plot_CL('u')
#R.phase_plane_trajectory([0,0,3],x_start,tf,True,False,False,True)
RRT.plot_2D_Tree()
# Hold figures alive
plt.show()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'object_recognition_mainwindow_ui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 400)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(800, 400))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
MainWindow.setDocumentMode(False)
MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)
MainWindow.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks|QtWidgets.QMainWindow.AnimatedDocks)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_7.setContentsMargins(10, 10, 0, 5)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.verticalLayout_7.addItem(spacerItem)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(240, 0))
self.groupBox.setMaximumSize(QtCore.QSize(240, 200))
self.groupBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox.setAutoFillBackground(False)
self.groupBox.setObjectName("groupBox")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_3.setContentsMargins(12, 12, 12, 12)
self.gridLayout_3.setHorizontalSpacing(0)
self.gridLayout_3.setVerticalSpacing(4)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setMaximumSize(QtCore.QSize(50, 25))
self.label_3.setObjectName("label_3")
self.gridLayout_3.addWidget(self.label_3, 1, 0, 1, 1)
self.min_pts_doubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.min_pts_doubleSpinBox.sizePolicy().hasHeightForWidth())
self.min_pts_doubleSpinBox.setSizePolicy(sizePolicy)
self.min_pts_doubleSpinBox.setMaximumSize(QtCore.QSize(80, 25))
self.min_pts_doubleSpinBox.setObjectName("min_pts_doubleSpinBox")
self.gridLayout_3.addWidget(self.min_pts_doubleSpinBox, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setMaximumSize(QtCore.QSize(50, 25))
self.label_2.setObjectName("label_2")
self.gridLayout_3.addWidget(self.label_2, 5, 0, 1, 1)
self.radius_doubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radius_doubleSpinBox.sizePolicy().hasHeightForWidth())
self.radius_doubleSpinBox.setSizePolicy(sizePolicy)
self.radius_doubleSpinBox.setMaximumSize(QtCore.QSize(80, 25))
self.radius_doubleSpinBox.setMaximum(111111111.0)
self.radius_doubleSpinBox.setObjectName("radius_doubleSpinBox")
self.gridLayout_3.addWidget(self.radius_doubleSpinBox, 0, 1, 1, 1)
self.min_pts_doubleSpinBox2 = QtWidgets.QDoubleSpinBox(self.groupBox)
self.min_pts_doubleSpinBox2.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.min_pts_doubleSpinBox2.sizePolicy().hasHeightForWidth())
self.min_pts_doubleSpinBox2.setSizePolicy(sizePolicy)
self.min_pts_doubleSpinBox2.setMaximumSize(QtCore.QSize(80, 25))
self.min_pts_doubleSpinBox2.setObjectName("min_pts_doubleSpinBox2")
self.gridLayout_3.addWidget(self.min_pts_doubleSpinBox2, 1, 2, 1, 1)
self.radiusAllPictureCheckBox = QtWidgets.QCheckBox(self.groupBox)
self.radiusAllPictureCheckBox.setObjectName("radiusAllPictureCheckBox")
self.gridLayout_3.addWidget(self.radiusAllPictureCheckBox, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMaximumSize(QtCore.QSize(50, 25))
self.label.setObjectName("label")
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 1)
self.eps_horizontalSlider2 = QtWidgets.QSlider(self.groupBox)
self.eps_horizontalSlider2.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.eps_horizontalSlider2.sizePolicy().hasHeightForWidth())
self.eps_horizontalSlider2.setSizePolicy(sizePolicy)
self.eps_horizontalSlider2.setMinimumSize(QtCore.QSize(100, 0))
self.eps_horizontalSlider2.setMaximumSize(QtCore.QSize(160, 25))
self.eps_horizontalSlider2.setMaximum(1000)
self.eps_horizontalSlider2.setProperty("value", 100)
self.eps_horizontalSlider2.setOrientation(QtCore.Qt.Horizontal)
self.eps_horizontalSlider2.setObjectName("eps_horizontalSlider2")
self.gridLayout_3.addWidget(self.eps_horizontalSlider2, 7, 1, 1, 2)
self.eps_doubleSpinBox2 = QtWidgets.QDoubleSpinBox(self.groupBox)
self.eps_doubleSpinBox2.setEnabled(False)
self.eps_doubleSpinBox2.setMaximumSize(QtCore.QSize(60, 25))
self.eps_doubleSpinBox2.setSingleStep(0.01)
self.eps_doubleSpinBox2.setObjectName("eps_doubleSpinBox2")
self.gridLayout_3.addWidget(self.eps_doubleSpinBox2, 7, 0, 1, 1)
self.eps_doubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox)
self.eps_doubleSpinBox.setMaximumSize(QtCore.QSize(60, 25))
self.eps_doubleSpinBox.setMaximum(10.0)
self.eps_doubleSpinBox.setSingleStep(0.01)
self.eps_doubleSpinBox.setProperty("value", 1.0)
self.eps_doubleSpinBox.setObjectName("eps_doubleSpinBox")
self.gridLayout_3.addWidget(self.eps_doubleSpinBox, 6, 0, 1, 1)
self.eps_horizontalSlider = QtWidgets.QSlider(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.eps_horizontalSlider.sizePolicy().hasHeightForWidth())
self.eps_horizontalSlider.setSizePolicy(sizePolicy)
self.eps_horizontalSlider.setMinimumSize(QtCore.QSize(160, 0))
self.eps_horizontalSlider.setMaximumSize(QtCore.QSize(160, 25))
self.eps_horizontalSlider.setMaximum(500)
self.eps_horizontalSlider.setProperty("value", 100)
self.eps_horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.eps_horizontalSlider.setObjectName("eps_horizontalSlider")
self.gridLayout_3.addWidget(self.eps_horizontalSlider, 6, 1, 1, 2)
self.verticalLayout_7.addWidget(self.groupBox)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.verticalLayout_7.addItem(spacerItem1)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setMinimumSize(QtCore.QSize(230, 0))
self.groupBox_3.setMaximumSize(QtCore.QSize(200, 100))
self.groupBox_3.setObjectName("groupBox_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.points_label = QtWidgets.QLabel(self.groupBox_3)
self.points_label.setMaximumSize(QtCore.QSize(62, 20))
self.points_label.setObjectName("points_label")
self.verticalLayout_8.addWidget(self.points_label)
self.clusters_label = QtWidgets.QLabel(self.groupBox_3)
self.clusters_label.setMaximumSize(QtCore.QSize(62, 20))
self.clusters_label.setObjectName("clusters_label")
self.verticalLayout_8.addWidget(self.clusters_label)
self.clusters_label_2 = QtWidgets.QLabel(self.groupBox_3)
self.clusters_label_2.setMaximumSize(QtCore.QSize(16777215, 20))
self.clusters_label_2.setObjectName("clusters_label_2")
self.verticalLayout_8.addWidget(self.clusters_label_2)
self.horizontalLayout_3.addLayout(self.verticalLayout_8)
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setSpacing(0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.pointsLineEdit = QtWidgets.QLineEdit(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pointsLineEdit.sizePolicy().hasHeightForWidth())
self.pointsLineEdit.setSizePolicy(sizePolicy)
self.pointsLineEdit.setMaximumSize(QtCore.QSize(40, 20))
self.pointsLineEdit.setObjectName("pointsLineEdit")
self.verticalLayout_9.addWidget(self.pointsLineEdit)
self.clustersLineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.clustersLineEdit.setMaximumSize(QtCore.QSize(40, 20))
self.clustersLineEdit.setObjectName("clustersLineEdit")
self.verticalLayout_9.addWidget(self.clustersLineEdit)
self.outliersLineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.outliersLineEdit.setMaximumSize(QtCore.QSize(40, 20))
self.outliersLineEdit.setObjectName("outliersLineEdit")
self.verticalLayout_9.addWidget(self.outliersLineEdit)
self.horizontalLayout_3.addLayout(self.verticalLayout_9)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setSpacing(0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.clustersLineEdit2 = QtWidgets.QLineEdit(self.groupBox_3)
self.clustersLineEdit2.setMaximumSize(QtCore.QSize(40, 20))
self.clustersLineEdit2.setObjectName("clustersLineEdit2")
self.verticalLayout_10.addWidget(self.clustersLineEdit2)
self.pointsLineEdit2 = QtWidgets.QLineEdit(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pointsLineEdit2.sizePolicy().hasHeightForWidth())
self.pointsLineEdit2.setSizePolicy(sizePolicy)
self.pointsLineEdit2.setMaximumSize(QtCore.QSize(40, 20))
self.pointsLineEdit2.setObjectName("pointsLineEdit2")
self.verticalLayout_10.addWidget(self.pointsLineEdit2)
self.outliersLineEdit2 = QtWidgets.QLineEdit(self.groupBox_3)
self.outliersLineEdit2.setMaximumSize(QtCore.QSize(40, 20))
self.outliersLineEdit2.setObjectName("outliersLineEdit2")
self.verticalLayout_10.addWidget(self.outliersLineEdit2)
self.horizontalLayout_3.addLayout(self.verticalLayout_10)
self.verticalLayout_7.addWidget(self.groupBox_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.preProcessPushButton = QtWidgets.QPushButton(self.centralwidget)
self.preProcessPushButton.setMaximumSize(QtCore.QSize(125, 16777215))
self.preProcessPushButton.setObjectName("preProcessPushButton")
self.horizontalLayout_6.addWidget(self.preProcessPushButton)
self.showClustersPushButton = QtWidgets.QPushButton(self.centralwidget)
self.showClustersPushButton.setMaximumSize(QtCore.QSize(125, 16777215))
self.showClustersPushButton.setObjectName("showClustersPushButton")
self.horizontalLayout_6.addWidget(self.showClustersPushButton)
self.verticalLayout_7.addLayout(self.horizontalLayout_6)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem2)
self.horizontalLayout_5.addLayout(self.verticalLayout_7)
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setContentsMargins(-1, 10, 10, 5)
self.verticalLayout_11.setSpacing(0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setMinimumSize(QtCore.QSize(500, 320))
self.scrollArea.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 530, 318))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_11.addWidget(self.scrollArea)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.labelOutliersPushButton = QtWidgets.QPushButton(self.centralwidget)
self.labelOutliersPushButton.setMaximumSize(QtCore.QSize(250, 16777215))
self.labelOutliersPushButton.setObjectName("labelOutliersPushButton")
self.horizontalLayout_2.addWidget(self.labelOutliersPushButton)
self.clustersNumberToShowSpinBox = QtWidgets.QSpinBox(self.centralwidget)
self.clustersNumberToShowSpinBox.setMinimumSize(QtCore.QSize(50, 0))
self.clustersNumberToShowSpinBox.setMaximumSize(QtCore.QSize(50, 16777215))
self.clustersNumberToShowSpinBox.setMinimum(1)
self.clustersNumberToShowSpinBox.setProperty("value", 5)
self.clustersNumberToShowSpinBox.setObjectName("clustersNumberToShowSpinBox")
self.horizontalLayout_2.addWidget(self.clustersNumberToShowSpinBox)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.mergeClustersPushButton = QtWidgets.QPushButton(self.centralwidget)
self.mergeClustersPushButton.setMaximumSize(QtCore.QSize(250, 16777215))
self.mergeClustersPushButton.setObjectName("mergeClustersPushButton")
self.horizontalLayout_2.addWidget(self.mergeClustersPushButton)
self.verticalLayout_11.addLayout(self.horizontalLayout_2)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_11.addItem(spacerItem4)
self.horizontalLayout_5.addLayout(self.verticalLayout_11)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox.setTitle(_translate("MainWindow", "Clustering parameters"))
self.label_3.setText(_translate("MainWindow", "Min_pts"))
self.label_2.setText(_translate("MainWindow", "Eps"))
self.radiusAllPictureCheckBox.setText(_translate("MainWindow", "All picture"))
self.label.setText(_translate("MainWindow", "Radius"))
self.groupBox_3.setTitle(_translate("MainWindow", "Clustering info: Original | High contrast"))
self.points_label.setText(_translate("MainWindow", "Points #"))
self.clusters_label.setText(_translate("MainWindow", "Clusters #"))
self.clusters_label_2.setText(_translate("MainWindow", "Outliers #"))
self.preProcessPushButton.setText(_translate("MainWindow", "Pre-process"))
self.showClustersPushButton.setText(_translate("MainWindow", "Show clusters"))
self.labelOutliersPushButton.setText(_translate("MainWindow", "Label Outliers"))
self.mergeClustersPushButton.setText(_translate("MainWindow", "Merge clusters"))
|
# -*- coding: utf-8 -*-
"""
app
~~~~~
This module initializes flask app instance.
Registers blueprints.
"""
from flask import Flask
from config import config
from lib import FlaskSqlSession
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from app.models import Base
from lib.flask_sql_session import dbsession
def create_app(flask_config='development'):
"""Creates and returns app instance."""
app = Flask(__name__)
app.config.from_object(config[flask_config])
print(app.config['ENV'])
# Bluprints registration
from app.api import api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
# create and session factory from the engine and attach
# it app instance as an attribute
engine = create_engine(app.config['DATABASE_URL'])
session_factory = sessionmaker(engine, autocommit=False, autoflush=False)
# proxy dbsession using flask_sql_session
FlaskSqlSession(session_factory, app)
return app
__all__ = ['create_app', 'Base', 'dbsession']
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView
from django.views import generic as views
from petstagram.main.models import PetPhoto
class PetPhotoDetailsView(LoginRequiredMixin, DetailView):
model = PetPhoto
template_name = 'main/photo_details.html'
context_object_name = 'pet_photo'
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
viewed_pet_photos = request.session.get('last_viewed_pet_photo_ids', [])
viewed_pet_photos.insert(0, self.kwargs['pk'])
request.session['last_viewed_pet_photo_ids'] = viewed_pet_photos[:4]
return response
def get_queryset(self):
return super().get_queryset().prefetch_related('tagged_pets')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['is_owner'] = self.object.user == self.request.user
return context
class CreatePetPhotoView(LoginRequiredMixin, CreateView):
model = PetPhoto
template_name = 'main/photo_create.html'
fields = ('photo', 'description', 'tagged_pets')
success_url = reverse_lazy('dashboard')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class EditPetPhotoView(UpdateView):
model = PetPhoto
template_name = 'main/photo_edit.html'
fields = ('description',)
def get_success_url(self):
return reverse_lazy('pet photo details', kwargs={'pk': self.object.id})
def like_pet_photo(request, pk):
pet_photo = PetPhoto.objects.get(pk=pk)
pet_photo.likes += 1
pet_photo.save()
return redirect('pet photo details', pk)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.contrib import auth
from django.contrib.auth import get_user_model
from app.models import User
class LoginUserForm(forms.Form):
username = forms.CharField(label=u'用户名', error_messages={'required': u'用户名不能为空'},
widget=forms.TextInput(
attrs={'class': 'form-control', }
))
password = forms.CharField(label=u'密 码', error_messages={'required': u'密码不能为空'},
widget=forms.PasswordInput(
attrs={'class': 'form-control', }
))
def __init__(self, request=None, *args, **kwargs):
self.request = request
self.user_cache = None
super(LoginUserForm, self).__init__(*args, **kwargs)
def clean_password(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(u'账号密码不匹配')
elif not self.user_cache.is_active:
raise forms.ValidationError(u'此账号已被禁用')
return self.cleaned_data
def get_user(self):
return self.user_cache
class ChangePasswordForm(forms.Form):
old_password = forms.CharField(label=u'原始密码', error_messages={'required': '请输入原始密码'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
new_password1 = forms.CharField(label=u'新密码', error_messages={'required': '请输入新密码'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
new_password2 = forms.CharField(label=u'重复输入', error_messages={'required': '请重复新输入密码'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
def __init__(self, user, *args, **kwargs):
self.user = user
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_old_password(self):
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(u'原密码错误')
return old_password
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if len(password1) < 6:
raise forms.ValidationError(u'密码必须大于6位')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(u'两次密码输入不一致')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
|
from boto.sqs.connection import SQSConnection
from boto.sqs import SQSRegionInfo
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.shortcuts import render_to_response
from django.template import RequestContext
from datetime import datetime, timedelta
def superuser_only(view_func):
"""
Limit a view to superuser only.
"""
def _inner(request, *args, **kwargs):
if not request.user.is_superuser:
raise PermissionDenied
return view_func(request, *args, **kwargs)
return _inner
def parse_attributes(items):
for q, attr in items.iteritems():
q.name = q._name
q.created = datetime.fromtimestamp(int(attr['CreatedTimestamp']))
q.last_modified = datetime.fromtimestamp(int(attr['LastModifiedTimestamp']))
q.visibility_timeout = timedelta(seconds=int(attr['VisibilityTimeout']))
q.message_retention = timedelta(seconds=int(attr['MessageRetentionPeriod']))
q.messages_not_visible = attr['ApproximateNumberOfMessagesNotVisible']
@superuser_only
def dashboard(request):
"""
Graph SQS send statistics over time.
"""
cache_key = 'vhash:django_sqs_stats'
cached_view = cache.get(cache_key)
if cached_view:
return cached_view
region_name = getattr(settings, 'SQS_REGION', 'us-east-1')
endpoint_name = getattr(settings, 'SQS_ENDPOINT', 'queue.amazonaws.com')
sqs_conn = SQSConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY,)
if region_name and endpoint_name:
region = SQSRegionInfo(sqs_conn, region_name, endpoint_name)
sqs_conn = SQSConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, region=region)
queues = sqs_conn.get_all_queues()
qas = {}
for queue in queues:
qas[queue] = sqs_conn.get_queue_attributes(queue)
parse_attributes(qas)
extra_context = {
'title': 'SQS Statistics',
'queues': queues,
'access_key': sqs_conn.gs_access_key_id,
}
response = render_to_response(
'django_sqs/queue_stats.html',
extra_context,
context_instance=RequestContext(request))
cache.set(cache_key, response, 60 * 1) # Cache for 1 minute
return response
|
#!/usr/bin/env python
"""
Usage:
splitfiles.py [options] FOLD_SPEC_PREFIX STORIES_DIR OUT_FOLDER
Options:
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import os
import nltk
from docopt import docopt
from typing import Iterable, List, Optional, Tuple, Set, Dict
import hashlib
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
def load_split(prefix: str)-> Dict[str, Set[str]]:
file_folds = {
'train': set(),
'val': set(),
'test': set()
}
for fold in file_folds:
with open(prefix + fold + '.txt') as f:
for line in f:
line = line.strip()
if len(line)==0:
continue
h = hashlib.sha1()
h.update(line.encode())
file_folds[fold].add(h.hexdigest())
return file_folds
dm_single_close_quote = u'\u2019' # unicode
dm_double_close_quote = u'\u201d'
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', dm_single_close_quote, dm_double_close_quote, ")"] # acceptable ways to end a sentence
def fix_missing_period(line: str)-> str:
if "@highlight" in line: return line
if line=="": return line
if line[-1] in END_TOKENS: return line
return line + " ."
def read_text_file(text_file: str)-> List[str]:
lines = []
with open(text_file, 'r') as f:
for line in f:
if len(line.strip()) > 0:
lines.append(line.strip())
return lines
def get_art_abs(story_file)-> Tuple[str, str]:
lines = read_text_file(story_file)
# Put periods on the ends of lines that are missing them (this is a problem in the dataset because many image captions don't end in periods; consequently they end up in the body of the article as run-on sentences)
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for idx,line in enumerate(lines):
if line == '':
continue # empty line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around the sentences
abstract = ' '.join([' '.join(nltk.word_tokenize(sent)) for sent in highlights])
return article, abstract
if __name__ == '__main__':
args = docopt(__doc__)
split_spec = load_split(args['FOLD_SPEC_PREFIX'])
for fold, story_ids in split_spec.items():
fold_out_articles_path = os.path.join(args['OUT_FOLDER'], fold, 'articles')
fold_out_summaries_path = os.path.join(args['OUT_FOLDER'], fold, 'summaries')
os.makedirs(fold_out_articles_path, exist_ok=True)
os.makedirs(fold_out_summaries_path, exist_ok=True)
filelist_path = os.path.join(args['OUT_FOLDER'], fold+'_filelist.txt')
story_files = []
for story_id in story_ids:
story_filepath = os.path.join(args['STORIES_DIR'], story_id + '.story')
assert os.path.exists(story_filepath)
article, abstract = get_art_abs(story_filepath)
article_path = os.path.join(fold_out_articles_path, story_id + '.article')
with open(article_path, 'w') as f:
f.write(article+'\n')
story_files.append(article_path)
abstract_path = os.path.join(fold_out_summaries_path, story_id + '.abstr')
with open(abstract_path, 'w') as f:
f.write(abstract+'\n')
with open(filelist_path, 'w') as f:
f.write('\n'.join(story_files))
|
# -*- coding: utf-8 -*-
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list1.extend(list2)
print(list1)
|
# -*- coding: utf-8 -*-
import json
from sample import constants
from sample.utils.test import CustomAsyncHTTPTestCase
class TestHelloHandler(CustomAsyncHTTPTestCase):
def test_invalid_json(self):
response = self.fetch(
'/hello',
method='POST',
body='{[]: []}'
)
self.assertEqual(400, response.code)
self.assertEqual('invalid JSON', json.loads(response.body.decode()))
def test_hello_world(self):
world = 'world'
response = self.fetch(
'/hello',
method='POST',
body=json.dumps(world)
)
self.assertEqual(200, response.code)
self.assertEqual('{}, {}'.format(constants.HELLO, world), json.loads(response.body.decode()))
|
from nvdb_segment import *
from geometry_basics import calc_way_length
import logging
_log = logging.getLogger("tags")
def merge_tags(seg, src, data_src_name):
way = seg.way
dst = seg.tags
src_date = src.get("FRAN_DATUM", 0)
fixmes = []
for k, v in src.items():
if k in NVDB_GEOMETRY_TAGS or k == "FRAN_DATUM":
# ignore special tags
continue
if not k in dst:
# new value
dst[k] = v
seg.tag_src[k] = (data_src_name, src_date)
continue
ov = dst[k]
if isinstance(ov, list):
match = False
for item in ov:
if item == v:
match = True
break
if match:
continue
elif ov == v:
if seg.tag_src[k][1] < src_date:
seg.tag_src[k] = (data_src_name, src_date)
continue
if k == "fixme":
append_tag_value(dst, k, v)
continue
resolved = False
fixme = False
solution = "resolve solution not specified"
# go through tags that can become lists first
if not resolved:
if data_src_name == "NVDB_DKVagnummer":
if k == "NVDB_vagnummer":
append_tag_value(dst, k, v)
solution = "list"
resolved = True
elif data_src_name in [ "NVDB_DKGatunamn", "NVDB_DKOvrigt_vagnamn", "NVDB_DKKorsning" ]:
if k == "name":
if v is None:
# unusual, but 'None' have been observed
solution = "ignoring 'None'"
else:
# this is normal for roundabouts (names of streets passing through)
append_tag_value(dst, "alt_name", v)
solution = "list"
resolved = True
if not resolved and isinstance(dst[k], list):
fixme = True
resolved = True
fixmes.append("Could not resolve key %s, alt value %s" % (k, v))
if not resolved:
# resolve by date
if seg.tag_src[k][1] != src_date:
if seg.tag_src[k][1] < src_date:
dst[k] = v
seg.tag_src[k] = (data_src_name, src_date)
solution = "date"
resolved = True
if not resolved:
if data_src_name == "NVDB_DKGatutyp":
if k == "NVDB_gatutyp":
# if same date, just keep current, not too important and Gatutyp is a bit messy data
solution = "not deemed important, current value kept"
resolved = True
elif data_src_name == "NVDB_DKHastighetsgrans":
if k in ("maxspeed", "maxspeed:forward", "maxspeed:backward"):
if ov > v:
dst[k] = v # keep smaller value
solution = "kept smaller value"
resolved = True
elif data_src_name in [ "NVDB_DKInskrTranspFarligtGods", "NVDB_DKRekomVagFarligtGods" ]:
# We've seen overlaps of these two layers (Malmö dataset), but as DKRekomVagFarligtGods is more
# specific we trust that layer more
if k == "hazmat":
if ov != "designated" and v == "designated":
dst[k] = v
solution = "kept designated"
resolved = True
elif data_src_name == "NVDB_DKVagbredd":
if k == "width":
if ov > v:
dst[k] = v
solution = "kept smaller value"
resolved = True
elif data_src_name == "NVDB_DKGagata":
if k == "NVDB_gagata_side":
if (ov == "left" and v == "right") or (ov == "right" and v == "left"):
dst[k] = "both"
solution = "merged to 'both'"
resolved = True
elif data_src_name == "NVDB_DKGCM_vagtyp":
if k == "GCMTYP":
gcm_resolve = [
(11, 17, 17), # gångbana 11 => trappa 17
(1, 3, 3), # cykelbana 1 => cykelpassage 3
(1, 4, 4), # cykelbana 1 => övergångsställe 4
(12, 5, 5), # trottoar 12 => gatupassage utan märkning 5
(12, 4, 4), # trottoar 12 => övergångsställe 4
]
for gcm in gcm_resolve:
if (ov == gcm[0] and v) == gcm[1] or (ov == gcm[1] and v == gcm[0]):
dst[k] = gcm[2]
solution = "used GCM resolve table"
resolved = True
break
elif data_src_name == "NVDB_DKFarthinder":
if k == "traffic_calming":
if ov == "yes" and v != "yes":
dst[k] = v
solution = "used more specific"
resolved = True
elif ov == "choker" and v != "choker" and v != "yes":
dst[k] = v
solution = "prefer other value over choker"
resolved = True
elif data_src_name == "VIS_DKP_ficka":
if k == "layby":
if (ov == "left" and v == "right") or (ov == "right" and v == "left"):
dst[k] = "both"
solution = "merged to 'both'"
resolved = True
if not resolved and isinstance(way, list):
dist, _ = calc_way_length(way)
if dist < 1.0:
# short segment, we don't care
solution = "short segment (%g), keeping old value" % dist
resolved = True
if not resolved:
fixme = True
resolved = True
fixmes.append("Could not resolve key %s, alt value %s" % (k, v))
if fixme:
res_str = "Warning: not resolved, added fixme tag"
else:
res_str = "Resolved, using %s (%s)" % (dst[k], solution)
if fixme or solution != "list":
_log.warning("Conflicting value for key '%s' ('%s' and '%s', RLID %s). %s" % (k, v, ov, seg.rlid, res_str))
if dst[k] == v and seg.tag_src[k][1] < src_date:
seg.tag_src[k] = (data_src_name, src_date)
for v in fixmes:
append_fixme_value(dst, v)
def append_fixme_value(tags, fixme_value):
if "fixme" not in tags:
fixme_value = "NVDB import: " + fixme_value
append_tag_value(tags, "fixme", fixme_value)
def append_tag_value(tags, k, v):
if k not in tags:
tags[k] = v
return
current = tags[k]
if isinstance(current, list):
if not v in current:
current.append(v)
elif current != v:
tags[k] = [ current, v ]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A monkey patch to fix auto complete failed when inputs contains unicode words in zsh
See https://github.com/kislyuk/argcomplete/issues/228 for the discussions
"""
import argcomplete
from argcomplete import *
def hacked_call(self, argument_parser, always_complete_options=True, exit_method=os._exit, output_stream=None,
exclude=None, validator=None, print_suppressed=False, append_space=None,
default_completer=FilesCompleter()):
self.__init__(argument_parser, always_complete_options=always_complete_options, exclude=exclude,
validator=validator, print_suppressed=print_suppressed, append_space=append_space,
default_completer=default_completer)
if "_ARGCOMPLETE" not in os.environ:
# not an argument completion invocation
return
global debug_stream
try:
debug_stream = os.fdopen(9, "w")
except:
debug_stream = sys.stderr
if output_stream is None:
try:
output_stream = os.fdopen(8, "wb")
except:
debug("Unable to open fd 8 for writing, quitting")
exit_method(1)
# print("", stream=debug_stream)
# for v in "COMP_CWORD COMP_LINE COMP_POINT COMP_TYPE COMP_KEY _ARGCOMPLETE_COMP_WORDBREAKS COMP_WORDS".split():
# print(v, os.environ[v], stream=debug_stream)
ifs = os.environ.get("_ARGCOMPLETE_IFS", "\013")
if len(ifs) != 1:
debug("Invalid value for IFS, quitting [{v}]".format(v=ifs))
exit_method(1)
comp_line = os.environ["COMP_LINE"]
comp_point = int(os.environ["COMP_POINT"])
# Adjust comp_point for wide chars
if USING_PYTHON2:
comp_point = len(ensure_str(comp_line[:comp_point]))
else:
comp_point = len(ensure_str(ensure_bytes(comp_line)[:comp_point]))
comp_line = ensure_str(comp_line)
cword_prequote, cword_prefix, cword_suffix, comp_words, last_wordbreak_pos = split_line(comp_line, comp_point)
# _ARGCOMPLETE is set by the shell script to tell us where comp_words
# should start, based on what we're completing.
# 1: <script> [args]
# 2: python <script> [args]
# 3: python -m <module> [args]
start = int(os.environ["_ARGCOMPLETE"]) - 1
comp_words = comp_words[start:]
# debug(
# "\nLINE: '{l}'\nPREQUOTE: '{pq}'\nPREFIX: '{p}'".format(l=comp_line, pq=cword_prequote, p=cword_prefix).encode(
# 'utf-8'),
# "\nSUFFIX: '{s}'".format(s=cword_suffix).encode('utf-8'),
# "\nWORDS:", comp_words)
completions = self._get_completions(comp_words, cword_prefix, cword_prequote, last_wordbreak_pos)
debug("\nReturning completions:", completions)
output_stream.write(ifs.join(completions).encode(sys_encoding))
output_stream.flush()
debug_stream.flush()
exit_method(0)
argcomplete.CompletionFinder.__call__ = hacked_call
|
from unittest import TestCase, TestSuite, TextTestRunner
from cryptoMath.finiteField import FieldElement
class FieldElementTest(TestCase):
def test_ne(self):
a = FieldElement(2, 31)
b = FieldElement(2, 31)
c = FieldElement(15, 31)
self.assertEqual(a, b)
self.assertTrue(a != c)
self.assertFalse(a != b)
def test_add(self):
a = FieldElement(2, 31)
b = FieldElement(15, 31)
self.assertEqual(a + b, FieldElement(17, 31))
a = FieldElement(17, 31)
b = FieldElement(21, 31)
self.assertEqual(a + b, FieldElement(7, 31))
def test_sub(self):
a = FieldElement(29, 31)
b = FieldElement(4, 31)
self.assertEqual(a - b, FieldElement(25, 31))
a = FieldElement(15, 31)
b = FieldElement(30, 31)
self.assertEqual(a - b, FieldElement(16, 31))
def test_mul(self):
a = FieldElement(24, 31)
b = FieldElement(19, 31)
self.assertEqual(a * b, FieldElement(22, 31))
def test_pow(self):
a = FieldElement(17, 31)
self.assertEqual(a**3, FieldElement(15, 31))
a = FieldElement(5, 31)
b = FieldElement(18, 31)
self.assertEqual(a**5 * b, FieldElement(16, 31))
def test_div(self):
a = FieldElement(3, 31)
b = FieldElement(24, 31)
self.assertEqual(a / b, FieldElement(4, 31))
a = FieldElement(17, 31)
self.assertEqual(a**-3, FieldElement(29, 31))
a = FieldElement(4, 31)
b = FieldElement(11, 31)
self.assertEqual(a**-4 * b, FieldElement(13, 31))
def run(test):
suite = TestSuite()
suite.addTest(test)
TextTestRunner().run(suite)
if __name__ == "__main__":
run(FieldElementTest('test_ne'))
run(FieldElementTest('test_add'))
run(FieldElementTest('test_sub'))
run(FieldElementTest('test_mul'))
run(FieldElementTest('test_pow'))
run(FieldElementTest('test_div'))
|
import pytest
from aoc_cqkh42.year_2019 import day_01
@pytest.mark.parametrize(
'data, answer', [('12', 2), ('14', 2), ('1969', 654), ('100756', 33583)]
)
def test__fuel_needed(data, answer):
assert day_01._fuel_needed(data) == answer
@pytest.mark.parametrize(
'data, answer', [('14', 2), ('1969', 966), ('100756', 50346)]
)
def test__total_fuel_needed(data, answer):
assert day_01._total_fuel_needed(data) == answer
|
from collections import defaultdict
from urllib.parse import quote_plus
from sys import platform
from time import sleep
import numpy as np
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from tspy import TSP
from tspy.solvers import TwoOpt_solver
addresses = """
DTU bygning 101, lyngby
Bagsværd Station
Lautrupvang 15, 2750 Ballerup
Nørreport Station
Rævehøjvej 36, 2800 Kongens Lyngby
"""
# ==== DONT TOUCH BELOW ==== #
addresses = [adr for adr in addresses.split("\n") if adr.strip() != ""]
print("Tour:")
for i, adr in enumerate(addresses):
print(f' {i+1}) {adr}')
start_idx = int(input("\nWhich address should the tour start at? ")) - 1 # because humans
assert 0 <= start_idx < len(addresses), 'Oops, too high or low number?'
URL = "https://www.google.dk/maps/dir/{a}/{b}/"
ext = {'windows': 'exe'}.get(platform, platform)
driver = webdriver.Chrome(executable_path=f'./chromedriver.{ext}')
def time_str(string):
""" Super error prone function, deal with it """
# input is eg: "40 min" or "1 t 34 min"
if len(string) > 10:
print(f'Fejl i tid? Tid: {string}')
return 60
return eval(string.replace('t', '*60 +').replace('min', '')) # TODO: dont do this
def get_time_between(a, b):
driver.get(URL.format(a=quote_plus(a, safe=","), b=quote_plus(b, safe=",")))
while True:
try:
sleep(1)
offentlig_transport = driver.find_element_by_xpath("//div[@aria-label='Offentlig transport']")
offentlig_transport.click()
break
except WebDriverException: pass
while True:
try:
sleep(1)
trip = driver.find_element_by_class_name("section-directions-trip-description")
return time_str(trip.text.split("\n")[0])
except WebDriverException: pass
graph = defaultdict(dict)
N = len(addresses)
for i, adr in enumerate(addresses):
for j, adr2 in enumerate(addresses):
print(f"\rCreating graph... ({i*N + j+1} of {N*N})", end='')
if i == j:
continue
if adr in graph[adr2] or adr2 in graph[adr]:
continue
assert (adr2 not in graph[adr]) and (adr not in graph[adr2])
tid = get_time_between(adr, adr2)
graph[adr][adr2] = tid
graph[adr2][adr] = tid # <-- this might not be true, but whatever...
driver.get(f'https://xn--sb-lka.org/?tdr={len(addresses)}')
print("\n... Graph created.\n\nFinding fastest route... (you can close the browser)")
# Insert fake node:
start_node = addresses[start_idx]
graph['fake'][start_node] = 1
for node in graph.keys():
if node in ['fake', start_node]:
continue
graph[node]['fake'] = 1
# Create distance matrix:
M = np.ndarray(shape=(N,N), dtype=float)
for i, adr in enumerate(addresses):
for j, adr2 in enumerate(addresses):
if i == j:
M[i,j] = np.inf
else:
M[i,j] = graph[adr][adr2]
tsp = TSP()
tsp.read_mat(M)
print("Solving using 2-opt heuristic for TSP:")
two_opt = TwoOpt_solver(initial_tour='NN', iter_num=100)
two_opt_tour = tsp.get_approx_solution(two_opt)
for idx in range(len(two_opt_tour)-1):
a = addresses[two_opt_tour[idx]]
b = addresses[two_opt_tour[idx+1]]
print(f'From: {a}')
print(f'To..: {b}')
print('Time: {} min'.format(graph[a][b]))
print('')
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Calls a function defined in rpc_client.py, which will forward the request
to the test service.
This command assumes the updater test service is already installed on the
system.
Example usages:
vpython service_proxy.py --function=RunAsSystem \
--args='{"command": "notepad.exe"}'
vpython service_proxy.py --function=AnswerUpcomingUACPrompt \
--args='{"actions": "A", "wait_child": false, "source": "demo"}'
"""
import argparse
import logging
import json
import sys
import rpc_client
def ParseCommandLine():
"""Parse the command line arguments."""
cmd_parser = argparse.ArgumentParser(
description='Updater test service client')
cmd_parser.add_argument(
'--function',
dest='function',
type=str,
help='Name of the function to call, defined in rpc_client.py')
cmd_parser.add_argument(
'--args',
dest='args',
type=json.loads,
help='Arguments to the function, in json format.')
return cmd_parser.parse_args()
def main():
flags ParseCommandLine()
if not flags.function:
logging.error('Must specify a function to call.')
sys.exit(-1)
if not hasattr(rpc_client, flags.function):
logging.error('Function %s is not defined in module rpc_client.',
flags.function)
function = getattr(rpc_client, flags.function)
result = function(**flags.args)
logging.error('Function [%s] returned: %s', flags.function, result)
sys.exit(0)
if __name__ == '__main__':
main()
|
from queue import Queue
from typing import Callable, Optional
from blessed import Terminal
from blessed.keyboard import Keystroke
from src.commands import ChangeSection, EndGame
from src.sections.base import GameSection
class Debug(GameSection):
"""A game section for debugging purposes"""
def __init__(self, in_queue: Queue):
super().__init__(in_queue)
self.start_data = None
def handle_start(self, start_data: object) -> bool:
"""Inherit"""
self.start_data = start_data
return True
def run_processing(self, inp: Optional[Keystroke]) -> bool:
"""Inherit"""
if inp is not None:
self.stop()
return False
def run_rendering(self, terminal: Terminal, echo: Callable[[str], None]) -> None:
"""Inherit"""
echo(terminal.clear)
echo(terminal.move_xy(0, 0))
echo(self.start_data)
if isinstance(self.start_data, ChangeSection):
echo('\n')
echo(self.start_data.data)
echo('\n\n')
echo('Press any key to end the game...')
def handle_stop(self) -> object:
"""Inherit"""
return EndGame()
|
"""Module door keys."""
__author__ = 'Joan A. Pinol (japinol)'
from codemaster.config.constants import BM_DOOR_KEYS_FOLDER
from codemaster.models.actors.actor_types import ActorCategoryType, ActorType
from codemaster.models.actors.actors import ActorItem
from codemaster.models.stats import Stats
from codemaster.utils.colors import ColorName
class DoorKey(ActorItem):
"""Represents a door key.
It is not intended to be instantiated.
"""
def __init__(self, x, y, game, door, name=None):
self.door = door
self.file_folder = BM_DOOR_KEYS_FOLDER
self.file_name_key = 'im_door_keys'
self.images_sprite_no = 1
self.category_type = ActorCategoryType.DOOR_KEY
self.stats = Stats()
self.stats.health = self.stats.health_total = 1
self.stats.power = self.stats.power_total = 0
self.stats.strength = self.stats.strength_total = 1
super().__init__(x, y, game, name=name)
def update_when_hit(self):
"""Cannot be hit."""
pass
def use_key_in_door(self, door):
if door is self.door:
if self.color == self.door.color:
self.player.sound_effects and self.player.door_unlock_sound.play()
self.door.is_locked = False
class DoorKeyGreen(DoorKey):
"""Represents a green door key."""
def __init__(self, x, y, game, door, name=None):
self.file_mid_prefix = '01'
self.type = ActorType.DOOR_KEY_GREEN
self.color = ColorName.GREEN.name
self.key_type = 'G'
super().__init__(x, y, game, door, name=name)
class DoorKeyBlue(DoorKey):
"""Represents a blue door key."""
def __init__(self, x, y, game, door, name=None):
self.file_mid_prefix = '02'
self.type = ActorType.DOOR_KEY_BLUE
self.color = ColorName.BLUE.name
self.key_type = 'B'
super().__init__(x, y, game, door, name=name)
class DoorKeyAqua(DoorKey):
"""Represents an aqua door key."""
def __init__(self, x, y, game, door, name=None):
self.file_mid_prefix = '05'
self.type = ActorType.DOOR_KEY_AQUA
self.color = ColorName.AQUA.name
self.key_type = 'A'
super().__init__(x, y, game, door, name=name)
class DoorKeyYellow(DoorKey):
"""Represents a yellow door key."""
def __init__(self, x, y, game, door, name=None):
self.file_mid_prefix = '03'
self.type = ActorType.DOOR_KEY_YELLOW
self.color = ColorName.YELLOW.name
self.key_type = 'Y'
super().__init__(x, y, game, door, name=name)
class DoorKeyRed(DoorKey):
"""Represents a red door key."""
def __init__(self, x, y, game, door, name=None):
self.file_mid_prefix = '04'
self.type = ActorType.DOOR_KEY_RED
self.color = ColorName.RED.name
self.key_type = 'R'
super().__init__(x, y, game, door, name=name)
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : August 2021
import logging
import sys
from dxm.lib.DxJDBC.DxJDBC import DxJDBC
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
from dxm.lib.DxTools.DxTools import get_objref_by_val_and_attribute
from dxm.lib.DxTools.DxTools import paginator
from dxm.lib.DxLogging import print_error
from dxm.lib.masking_api.api.jdbc_driver_api import JdbcDriverApi
from dxm.lib.masking_api.rest import ApiException
class DxJDBCList(object):
__engine = None
__driverList = {}
__logger = None
@classmethod
def __init__(self):
self.__engine = DxMaskingEngine
self.__logger = logging.getLogger()
self.__logger.debug("creating DxJDBCList object")
if not self.__driverList:
self.LoadDrivers()
@classmethod
def LoadDrivers(self):
"""
Load list of drivers
Return None if OK
"""
self.__api = JdbcDriverApi
self.__apiexc = ApiException
try:
api_instance = self.__api(self.__engine.api_client)
drivers = paginator(
api_instance,
"get_all_jdbc_drivers")
if drivers.response_list:
for c in drivers.response_list:
driver = DxJDBC(self.__engine)
driver.from_driver(c)
self.__driverList[c.jdbc_driver_id] = driver
else:
print_error("No JDBC drivers found")
self.__logger.error("No JDBC drivers found")
except self.__apiexc as e:
print_error("Can't load JDBC drivers %s" % e.body)
return None
@classmethod
def get_by_ref(self, reference):
"""
return a driver object by refrerence
"""
try:
self.__logger.debug("reference %s" % reference)
return self.__driverList[reference]
except KeyError as e:
self.__logger.debug("can't find driver object"
" for reference %s" % reference)
self.__logger.debug(e)
sys.exit(1)
@classmethod
def get_allref(self):
"""
return a list of all references
"""
return self.__driverList.keys()
@classmethod
def get_driver_id_by_name(self, name):
reflist = self.get_driver_id_by_name_worker(name)
# convert list to single value
# as there will be only one element in list
if reflist:
return reflist[0]
else:
return None
@classmethod
def get_all_driver_id_by_name(self, name):
reflist = self.get_driver_id_by_name_worker(name)
return reflist
@classmethod
def get_driver_id_by_name_worker(self, name, check_uniqueness=1):
"""
:param1 name: name of ruleset
:param2 check_uniqueness: check uniqueness put None if skip this check
return list of rulesets
"""
reflist = get_objref_by_val_and_attribute(name, self, 'driver_name')
if len(reflist) == 0:
self.__logger.error('Driver %s not found' % name)
print_error('Driver %s not found' % name)
return None
if check_uniqueness:
if len(reflist) > 1:
self.__logger.error('File format %s is not unique' % name)
print_error('File format %s is not unique' % name)
return None
return reflist
@classmethod
def add(self, driver):
"""
Add an File type to a list and Engine
:param ruleset: File type object to add to Engine and list
return None if OK
"""
if (driver.add() == 0):
self.__logger.debug("Adding driver %s to list" % driver)
self.__driverList[driver.jdbc_driver_id] = driver
return None
else:
return 1
@classmethod
def delete(self, driver_ref):
"""
Add an File type to a list and Engine
:param filetype_ref: File format ref to delete from engine and list
return None if OK
"""
driver = self.get_by_ref(driver_ref)
if driver is not None:
if driver.delete() is None:
return None
else:
return 1
else:
print_error("Driver type with id %s not found" % driver_ref)
return 1
|
import PartFunc as pfunc
import DaemonDownload as daemonDnl
import Function as func
import Search as src
import FileStruct as fs
import Package as pack
import SocketFunc as sfunc
import threading
from threading import *
import TextFunc as tfunc
import Constant as const
import os
import time
###### DOWNLOAD FILE
class DaemonMasterOfDownloads(Thread):
def __init__(self, host, t_host, selectFile, sessionID, listPartOwned, waitingDownload):
Thread.__init__(self)
self.host = host
self.t_host = t_host
self.selectFile = selectFile
self.sessionID = sessionID
self.listPartOwned = listPartOwned
self.waitingDownload = waitingDownload
def run(self):
while start_download(self.host, self.t_host, self.selectFile, self.sessionID, self.listPartOwned, self.waitingDownload):
time.sleep(const.TIME_TO_UPDATE)
# Funzione di download
# >> PEER
def start_download(host, t_host, selectFile, sessionID, listPartOwned, waitingDownload):
md5 = selectFile[1]
fileName = selectFile[2]
lenFile = selectFile[3]
lenPart = selectFile[4]
ricevutoByte = request_memory_of_hitpeer(t_host, sessionID, md5)
if str(ricevutoByte[0:4], "ascii") == pack.CODE_ANSWER_FIND_PART:
nHitPeer = int(ricevutoByte[4:7])
if nHitPeer != 0:
listPart = fs.find_part_from_hitpeer(host, int(ricevutoByte[4:7]), ricevutoByte[7:], listPartOwned, md5, lenFile, lenPart)
if [-1, []] in listPart:
tfunc.error("Errore nei peer inviati del Tracker. Download bloccato.")
del waitingDownload[:]
return False
else:
for part in listPart:
daemonThreadD = daemonDnl.DaemonDownload(host, t_host, sessionID, fileName, md5, part[0], part[1], listPartOwned, lenFile, lenPart)
daemonThreadD.setName("DAEMON DOWNLOAD PART " + str(part[0]) + " di " + str(fileName, "ascii"))
daemonThreadD.setDaemon(True)
daemonThreadD.start()
# Controllo se ho finito di scaricare il file
if check_ended_download(fileName, md5, listPartOwned):
save_and_open_file(fileName)
del waitingDownload[:]
return False
else:
tfunc.error("Non ci sono hitpeer disponibili da cui scaricare il file.")
return True
# >> PEER
def request_memory_of_hitpeer(t_host, sessionID, md5):
s = sfunc.create_socket_client(func.roll_the_dice(t_host[0]), t_host[1]);
if s is None:
#tfunc.error("Tracker non attivo.")
return bytes(const.ERROR_PKT, "ascii")
else:
pk = pack.request_hitpeer(sessionID, md5)
s.sendall(pk)
return s.recv(4 * const.LENGTH_PACK)
# >> PEER
def update_own_memory(md5, partN, listPartOwned, value):
listToUpdate = list(listPartOwned[md5][0])
listToUpdate[partN] = value
listPartOwned[md5][0] = "".join(listToUpdate)
#pfunc.part_compl(listPartOwned[md5][0])
# >> PEER
def save_and_open_file(fileN):
fileName = str(fileN, "ascii").strip()
try:
retcode = os.system("open " + const.FILE_COND + fileName)
except:
try:
os.system("start " + const.FILE_COND + fileName)
except:
print("Apertura non riuscita")
# >> PEER
def create_part(ricevutoByte, fileN, partN, lenFile, lenPart):
notExists = False
startPos = int(lenPart) * (partN)
fileName = str(fileN, "ascii").strip()
if os.path.exists(const.FILE_COND + fileName):
fileDnl = open((const.FILE_COND + fileName), 'r+b')
else:
notExists = True
fileDnl = open((const.FILE_COND + fileName),'w+b')
fileDnl.write(b'\x00' * startPos)
fileDnl.seek(startPos, 0)
fileDnl.write(ricevutoByte)
if notExists:
fileDnl.write(b'\x00' * (int(lenFile) - int(startPos) - int(lenPart)))
fileDnl.close()
# >> PEER
def check_ended_download(fileName, md5, listPartOwned):
if len(listPartOwned[md5][0]) == fs.count_one_in_part(listPartOwned[md5][0]):
tfunc.success("Download del file completato.")
return True
else:
return False
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a report similar to the "Orders report" on the Ad Manager website.
Includes additional attributes and can filter to include just one order.
"""
from datetime import datetime
from datetime import timedelta
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
from googleads import errors
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Create statement object to filter for an order.
statement = (ad_manager.StatementBuilder(version='v201902')
.Where('ORDER_ID = :id')
.WithBindVariable('id', long(order_id))
.Limit(None) # No limit or offset for reports
.Offset(None))
# Set the start and end dates of the report to run (past 8 days).
end_date = datetime.now().date()
start_date = end_date - timedelta(days=8)
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['ORDER_ID', 'ORDER_NAME'],
'dimensionAttributes': ['ORDER_TRAFFICKER', 'ORDER_START_DATE_TIME',
'ORDER_END_DATE_TIME'],
'statement': statement.ToStatement(),
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS',
'AD_SERVER_CTR', 'AD_SERVER_CPM_AND_CPC_REVENUE',
'AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'],
'dateRangeType': 'CUSTOM_DATE',
'startDate': start_date,
'endDate': end_date
}
}
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201902')
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.AdManagerReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id "%s" downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ORDER_ID)
|
"""Tools for loading student-submitted, maybe-invalid code from source files."""
import importlib.util
import os
from os.path import join as pathjoin
from types import ModuleType
from typing import Any, TypeVar
from dill import Unpickler # type: ignore
from .core import Problem
Output = TypeVar("Output")
class InvalidSubmissionError(BaseException):
"""Something about the submission was invalid."""
class TooManyMatchingSymbols(InvalidSubmissionError):
"""Too many maching symbols were found."""
class NoMatchingSymbol(InvalidSubmissionError, AttributeError):
"""An expected symbol was not found."""
class SubmissionSyntaxError(InvalidSubmissionError, SyntaxError):
"""The submission held an invalid syntax."""
def _load_source_from_path(path: str, name: str = "module") -> Any:
"""Load the python source file found at path, absolute or relative, as a module.
There's a lot of weird stuff going on in this method with type signatures and
poorly-documented code that python uses internally for their `import` statement. I
got this implementation from https://stackoverflow.com/a/67692 and made only small
modifications to it, but I'm not 100% sure I can explain how it works.
"""
spec = importlib.util.spec_from_file_location(name, path)
if spec is None:
# Based on inspection of the source, I'm not certain how this can happen, but my
# type checker insists it can. This seems like the most reasonable error to
# raise.
raise FileNotFoundError
mod = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(mod) # type: ignore
except (SyntaxError, NameError) as err:
raise SubmissionSyntaxError from err # group all parse errors
return mod
def _load_attr_from_module(attr: str, module: ModuleType) -> Any:
"""Get a specific symbol from a module."""
try:
return module.__getattribute__(attr)
except AttributeError as err:
raise NoMatchingSymbol from err
def load_symbol_from_path(path: str, symbol: str) -> Any:
"""Load a specific symbol from a source file found at path, absolute or relative."""
mod = _load_source_from_path(path)
return _load_attr_from_module(symbol, mod)
def load_symbol_from_dir(path: str, symbol: str) -> Any:
"""Load a specific symbol from any of the source files in a directory."""
matching_symbols = []
for file in os.listdir(path):
try:
file_path = pathjoin(path, file)
matching_symbols.append(load_symbol_from_path(file_path, symbol))
except (FileNotFoundError, AttributeError, SyntaxError):
continue
if len(matching_symbols) > 1:
raise TooManyMatchingSymbols
if len(matching_symbols) == 0:
raise NoMatchingSymbol
return matching_symbols[0]
class _ProblemUnpickler(Unpickler): # type: ignore
"""A custom unpickler which will always get the `Problem` class from `aga`.
This is a hack-ish thing which is required because dill expects us to unpickle an
object in the some module it was pickled in, so it can then find the object's type
and use that for instantiation. We want to be able to unpickle the object in any
type, and we know that we always have a Problem pickled at `problem.pckl`, so we can
just assert that it's class should be Problem. This is _highly_ unsafe if we are
unsure of the safety of `problem.pckl`, but pickle/dill is not remotely safe anyway
with untrusted data.
This specific solution will break if dill, for some reason, wants to pickle some
*other* class named "Problem". In that case, I think the best solution will be to
look into a custom pickler which changes the module name on that end.
"""
def find_class(self, module: str, name: str) -> Any:
if name == "Problem":
return Problem
return super().find_class(module, name)
def load_problem(root: str, fname: str = "problem.pckl") -> Problem[Output]:
"""Load a problem from the gradescope environment."""
with open(pathjoin(root, fname), "rb") as problem_pickled:
out: Problem[Output] = _ProblemUnpickler(problem_pickled).load()
return out
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-21 02:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0003_question_values'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='vs',
),
]
|
import shutil
import random
from ngslite import read_genbank, write_genbank
from locus_hunter.template import Settings
from locus_hunter.add_color import ColorDictGenerator, AddColor
from .tools import TestCase, setup_dir, remove_genbank_date_str
class TestColorDictGenerator(TestCase):
def test_set_keys(self):
generator = ColorDictGenerator()
generator.set_keys(non_unique_keys=[1, 2, 2, 2, 3, 5, 5, 5, 5, 1])
self.assertListEqual([5, 2, 1, 3], generator.keys)
def test_main(self):
random.seed(1)
color_dict = ColorDictGenerator().main(
keys=[1, 2, 2, 2, 3, 5, 5, 5, 5, 1])
expected = {5: '#1F77B4', 2: '#FF7F0E', 1: '#2CA02C', 3: '#D62728'}
self.assertDictEqual(expected, color_dict)
class TestAddColor(TestCase):
def setUp(self):
self.indir, self.workdir, self.outdir = setup_dir(__file__)
self.settings = Settings(
workdir=self.workdir,
outdir=self.outdir,
threads=4,
debug=True)
def tearDown(self):
shutil.rmtree(self.workdir)
shutil.rmtree(self.outdir)
def test_main(self):
random.seed(1)
sorted_loci = read_genbank(file=f'{self.indir}/sorted_loci.gbk')
colored_loci = AddColor(settings=self.settings).main(
loci=sorted_loci)
write_genbank(
data=colored_loci,
file=f'{self.outdir}/colored_loci.gbk',
use_locus_text=False)
remove_genbank_date_str(f'{self.outdir}/colored_loci.gbk')
self.assertFileEqual(
file1=f'{self.indir}/colored_loci.gbk',
file2=f'{self.outdir}/colored_loci.gbk')
|
from ..constraint import AbstractConstraint
import sublime
class IsInSvnRepoConstraint(AbstractConstraint):
"""Check whether this file is in a SVN repo."""
def test(self, view: sublime.View) -> bool:
view_info = self.get_view_info(view)
# early return so that we may save some IO operations
if not view_info["file_name"]:
return False
return self.has_sibling(view_info["file_path"], ".svn/")
|
import pytest
from telegrambot.bot import (
get_user_first_name,
get_chat_id,
prepare_welcome_text)
@pytest.fixture
def update_with_expectedkey():
"""
function to return an object that simulates json update
posted to the bot's fall back webhook url
this data has the expected key and chat_id of typ int
"""
update = {
'update_id': 418763186,
'message': {
'message_id': 59,
'date': 1611506709,
'new_chat_member': {
'id': 1344418577,
'is_bot': False,
'first_name': 'Nyior',
'last_name': 'Clement'
},
'chat': {
'id':1
}
}
}
return update
@pytest.fixture
def update_without_expectedkey():
"""
function to return an object that simulates json
update posted to the bot's fall back webhook url
but without the expected 'new_chat_member' key
and chat id not int
"""
update = {
'update_id': 418763186,
'message': {
'message_id': 59,
'date': 1611506709,
'chat': {
'id':"nyior"
}
}
}
return update
def test_get_user_first_name(update_with_expectedkey):
"""
Method to test the get_user_first_name function
"""
first_name = get_user_first_name(update_with_expectedkey)
assert first_name.lower() == "nyior"
def test_get_chat_id(update_with_expectedkey):
"""
Method to test the get_chat_id function
"""
assert get_chat_id(update_with_expectedkey) == 1
def test_raises_exception_on_not_int(update_without_expectedkey):
with pytest.raises(TypeError):
get_chat_id(update_without_expectedkey)
def test_prepare_welcome_text(update_with_expectedkey):
"""
function that tests the prepare_welcome_text method
"""
data = prepare_welcome_text(update_with_expectedkey)
assert (
"chat_id" in data.keys()
and "text" in data.keys()
and "parse_mode" in data.keys())
|
"""
This example shows two ways to redirect flows to another server.
"""
from mitmproxy import http, ctx
def request(flow: http.HTTPFlow) -> None:
# pretty_host takes the "Host" header of the request into account,
# which is useful in transparent mode where we usually only have the IP
# otherwise.
# if flow.request.pretty_host == "10.5.10.169":
if flow.request.pretty_host == "proxy":
request = Request(flow.request.path)
ctx.log.info(request.toString())
flow.request.host = request.host
flow.request.scheme = request.scheme
flow.request.port = request.port
flow.request.path = request.path
if flow.request.path == '/PFBA_AhorrosyCtaCte40/WRBA_AhorrosyCtaCte_validarSeguros':
flow.request.headers["content-type"] = "application/x-www-form-urlencoded"
if flow.request.path == '/PFBA_AhorrosyCtaCte41/WRBA_AhorrosyCtaCte_consultarCuentasCDT':
flow.request.headers["content-type"] = "text/plain"
# if flow.request.path == '/https://192.168.135.28:442/PFBA_Crm31/sca/WSBA_Crm_consultarCondicionesCliente':
# if flow.request.path == '/mock':
# # flow.request.host = "rb-dev-alb-ecs-ext-525169194.us-east-2.elb.amazonaws.com"
# flow.request.host = "localhost"
# flow.request.port = 80
# flow.request.path = '/castlemock/mock/soap/project/7cGqrI/CustomerConditionsInquirySvcPort'
class Request:
def __init__(self, path):
initialPath = path.split('/', 4)
self.scheme = initialPath[1].replace(':', '')
if (self.scheme == 'http'):
self.port = "80"
else:
self.port = "443"
self.host = initialPath[3]
splitHost = initialPath[3].split(':')
if (len(splitHost) > 1):
self.host = splitHost[0]
self.port = splitHost[1]
self.path = '/' + initialPath[4]
def toString(self):
return "Scheme:" + self.scheme +'\n'\
+ "Host:" + self.host +'\n'\
+ "Port:" + self.port +'\n'\
+ "Path:" + self.path +'\n'
# Examples
# print(Request(
# '/https://192.168.135.28:442/PFBA_Crm31/sca/WSBA_Crm_consultarCondicionesCliente').toString())
# print(Request(
# '/https://192.168.135.28/PFBA_Crm31/sca/WSBA_Crm_consultarCondicionesCliente').toString())
# print(Request(
# '/http://192.168.135.28/PFBA_Crm31/sca/WSBA_Crm_consultarCondicionesCliente').toString())
|
"""Module contains email handling."""
import email.message
import smtplib
class EmailError(Exception):
"""Error during email handling occurred."""
pass
def send_email(conf, url):
"""Send the email reminder."""
try:
with smtplib.SMTP(url) as s:
msg = email.message.EmailMessage()
msg['From'] = conf.from_address
msg['To'] = conf.to_addresses
msg['Subject'] = conf.subject
msg.set_content('{}\n\nLast reminder sent {}.'.format(conf.message, conf.last_fed))
s.send_message(msg)
except Exception as e:
raise EmailError("Error sending email.") from e
|
import itertools
import os
import math
import random
PROCESS_PER_SCRIPT = 1
def template_file(texts):
text = "".join(texts)
out = f"""#!/bin/bash
#$ -cwd
#$ -e ./logs/
#$ -o ./logs/
#$ -l vf=4G
source /data/nlp/lunar_pilot_env/bin/activate
echo 'Starting job'
mkdir -p data
mkdir -p output
mkdir -p ./logs/
mkdir -p ./output/
{text}
wait
"""
return out
def template_exp_option(
experiment_id,
mode,
seed,
perceptual_dimensions,
vocab_size,
n_distractors,
n_epoch,
max_len,
train_samples,
validation_samples,
test_samples,
sender_lr,
receiver_lr,
sender_entropy_coeff,
receiver_entropy_coeff,
batch_size,
):
out = f"""python -m egg.zoo.objects_game.train \
--experiment_id {experiment_id} \
--perceptual_dimensions {perceptual_dimensions} \
--vocab_size {vocab_size} \
--n_distractors {n_distractors} \
--n_epoch {n_epoch} \
--max_len {max_len} \
--sender_lr {sender_lr} \
--receiver_lr {receiver_lr} \
--batch_size {batch_size} \
--sender_entropy_coeff {sender_entropy_coeff} \
--receiver_entropy_coeff {receiver_entropy_coeff} \
--random_seed {seed} \
--data_seed {seed} \
--train_samples {train_samples} \
--validation_samples {validation_samples} \
--test_samples {test_samples} \
--evaluate \
--dump_msg_folder '../messages' \
--shuffle_train_data \
--mode {mode} &
"""
return out
def main():
experiment_name = "rf-50-switch"
options = {
"perceptual_dimensions": ["[10,10]"],
"vocab_size": [20],
"n_distractors": [1],
"n_epoch": [5000],
"max_len": [2],
"train_samples": [80],
"validation_samples": [10],
"test_samples": [10],
"sender_lr": [0.01, 0.00001,0.001, 0.0001, 0.0005], # [0.001, 0.0001, 0.0005],
"receiver_lr": [0.01, 0.00001,0.001, 0.0001, 0.0005],
"sender_entropy_coeff": [0, 0.001, 0.0001,],
"receiver_entropy_coeff": [0, 0.001, 0.0001,],
"batch_size": [50],
}
options = list(itertools.product(*options.values()))
samples = random.sample(options, 100)
seeds = [0]
modes = ["rf", "rf-deterministic",]
exp_templates = []
for mode in modes:
for seed in seeds:
for option in samples:
option_str = "-".join([str(p) for p in option])
experiment_id = f"{experiment_name}-{mode}-{seed}-{option_str}"
template = template_exp_option(experiment_id, mode, seed, *option)
exp_templates.append(template)
exp_files = []
for start_idx in range(0, len(exp_templates), PROCESS_PER_SCRIPT):
batch = exp_templates[start_idx : start_idx + PROCESS_PER_SCRIPT]
file = template_file(batch)
exp_files.append(file)
if not os.path.exists("./jobs"):
os.makedirs("./jobs")
for i, file in enumerate(exp_files):
with open(f"./jobs/{experiment_name}_exp_{i}.sh", "w") as f:
f.write(file)
all_exp_file = "\n".join(
[f"qsub ./jobs/{experiment_name}_exp_{i}.sh".format(i) for i in range(len(exp_files))]
)
with open(f"./jobs/{experiment_name}_all_exp.sh", "w") as f:
f.write(all_exp_file)
if __name__ == "__main__":
main()
|
import pymongo
import os
jsonfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data")
def getMongoDb():
client = pymongo.MongoClient('localhost', 27017)
return client.md
|
READY = 'READY'
# Channels
CHANNEL_CREATE = 'CHANNEL_CREATE'
CHANNEL_UPDATE = 'CHANNEL_UPDATE'
CHANNEL_DELETE = 'CHANNEL_DELETE'
CHANNEL_PINS_UPDATE = 'CHANNEL_PINS_UPDATE'
# Guild
GUILD_CREATE = 'GUILD_CREATE'
GUILD_UPDATE = 'GUILD_UPDATE'
GUILD_DELETE = 'GUILD_DELETE'
GUILD_BAN_ADD = 'GUILD_BAN_ADD'
GUILD_BAN_REMOVE = 'GUILD_BAN_REMOVE'
GUILD_EMOJIS_UPDATE = 'GUILD_EMOJIS_UPDATE'
GUILD_INTEGRATIONS_UPDATE = 'GUILD_INTEGRATIONS_UPDATE'
GUILD_MEMBER_ADD = 'GUILD_MEMBER_ADD'
GUILD_MEMBER_REMOVE = 'GUILD_MEMBER_REMOVE'
GUILD_MEMBER_UPDATE = 'GUILD_MEMBER_UPDATE'
GUILD_ROLE_CREATE = 'GUILD_ROLE_CREATE'
GUILD_ROLE_UPDATE = 'GUILD_ROLE_UPDATE'
GUILD_ROLE_DELETE = 'GUILD_ROLE_DELETE'
# Messages
MESSAGE_CREATE = 'MESSAGE_CREATE'
MESSAGE_UPDATE = 'MESSAGE_UPDATE'
MESSAGE_DELETE = 'MESSAGE_DELETE'
MESSAGE_DELETE_BULK = 'MESSAGE_DELETE_BULK'
MESSAGE_REACTION_ADD = 'MESSAGE_REACTION_ADD'
MESSAGE_REACTION_REMOVE = 'MESSAGE_REACTION_REMOVE'
MESSAGE_REACTION_REMOVE_ALL = 'MESSAGE_REACTION_REMOVE_ALL'
# Presence
PRESENCE_UPDATE = 'PRESENCE_UPDATE'
TYPING_START = 'TYPING_START'
USER_UPDATE = 'USER_UPDATE'
# Voice
VOICE_STATE_UPDATE = 'VOICE_STATE_UPDATE'
VOICE_SERVER_UPDATE = 'VOICE_SERVER_UPDATE'
# Webhooks
WEBHOOKS_UPDATE = 'WEBHOOKS_UPDATE'
__all__ = ['READY',
'CHANNEL_CREATE', 'CHANNEL_UPDATE', 'CHANNEL_DELETE', 'CHANNEL_PINS_UPDATE',
'GUILD_CREATE', 'GUILD_UPDATE', 'GUILD_DELETE', 'GUILD_BAN_ADD', 'GUILD_BAN_REMOVE',
'GUILD_EMOJIS_UPDATE', 'GUILD_INTEGRATIONS_UPDATE',
'GUILD_MEMBER_ADD', 'GUILD_MEMBER_REMOVE', 'GUILD_MEMBER_UPDATE',
'GUILD_ROLE_CREATE', 'GUILD_ROLE_UPDATE', 'GUILD_ROLE_DELETE',
'MESSAGE_CREATE', 'MESSAGE_UPDATE', 'MESSAGE_DELETE', 'MESSAGE_DELETE_BULK',
'MESSAGE_REACTION_ADD', 'MESSAGE_REACTION_REMOVE', 'MESSAGE_REACTION_REMOVE_ALL',
'PRESENCE_UPDATE', 'TYPING_START', 'USER_UPDATE',
'VOICE_STATE_UPDATE', 'VOICE_SERVER_UPDATE',
'WEBHOOKS_UPDATE'
]
|
import sys, os
import numpy as np
import matplotlib.pyplot as plt
from sympy import Symbol
from sympy.solvers import solve
from scipy.optimize import linprog
x1 = np.linspace(-1, 20, 100)
f1 = lambda x1: 12.0 - 6.0 * x1
f2 = lambda x1: 7.0 - 1.5 * x1
x = Symbol("x")
y1, = solve(f1(x) - f2(x))
y2, = solve(f2(x))
figure = plt.figure()
plt.plot([0] * len(x1), x1, "--k", label="$x_1 = 0$")
plt.plot(x1, [0] * len(x1), "--k", label="$x_2 = 0$")
plt.plot(x1, f1(x1), "--r", label="$30 \\cdot x_1 + 5 \\cdot x_2 = 60$")
plt.plot(x1, f2(x1), "--b", label="$15 \\cdot x_1 + 10 \\cdot x_2 = 70$")
for cost in sorted([0 + f1(0), y1 + f1(y1), y2 + f2(y2)], reverse=True):
cost_f = lambda x1: cost - x1
plt.plot(x1, cost_f(x1), "-c", label="$x_1 + x_2 = {:05.2f}$".format(round(cost, 2)))
plt.plot([0, y2, y1], [f1(0), f2(y2), f1(y1)], "xk", markersize=10)
A = np.array([[-30, -5], [-15, -10], [0, -1], [-1, 0]])
B = np.array([-60, -70, 0, 0])
C = np.array([1, 1])
result = linprog(C, A_ub=A, b_ub=B, bounds=(0, None))
solution = (result.fun, f2(result.fun))
plt.plot(solution[0], solution[1], ".c", markersize=10)
plt.ylabel("$x_2$", color="#1C2833")
plt.xlabel("$x_1$", color="#1C2833")
plt.legend(loc="upper right")
plt.grid()
plt.xlim([-1, 8])
plt.ylim([-1, 20])
plt.show()
figure.savefig(os.path.splitext(sys.argv[0])[0] + "_figure.eps", format="eps")
|
class Solution:
def reverse(self, x: int) -> int:
if x > 0:
rev = int(str(x)[::-1])
elif x < 0:
rev = -1 * int(str(x * -1)[::-1])
upper_bound = 2**31 - 1
lower_bound = -2**31
if rev not in range(lower_bound, upper_bound):
return 0
return rev
|
#! /usr/bin/env python
import os, sys, glob
from os.path import join, isdir, basename
from optparse import OptionParser
if __name__=='__main__':
if len(sys.argv) < 3:
print 'usage: %s <patterns separated by file seps> <dir paths separated by file seps>' \
% basename(sys.argv[0])
sys.exit(1)
patterns = sys.argv[1].split(os.pathsep)
dirs = sys.argv[2].split(os.pathsep)
if not patterns:
print 'No patterns specified'
sys.exit(1)
if not dirs:
print 'No directories specified'
sys.exit(1)
for dirpath in dirs:
if not os.path.isdir(dirpath):
print '%s is not an existing directory' % dirpath
sys.exit(1)
matchingFiles = []
for dirpath in dirs:
for root, dirs, files in os.walk(dirpath):
for pat in patterns:
dirpattern = join(root, pat)
matchingFileInDir = glob.glob(dirpattern)
matchingFiles.extend(matchingFileInDir)
for f in matchingFiles: print f,
|
import hashlib
from io import StringIO
from functools import lru_cache
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from unidecode import unidecode
from .Cache import cachedRequest
from .Exceptions import UserException
def getPdfTxt(fname):
return _getPdf(fname, cache_key=md5sum(fname))
@cachedRequest("PDF")
def _getPdf(fname, cache_key=None):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(fname, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
print('\f', file=retstr)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return [unidecode(x) for x in text.split("\f") if x.strip()]
@lru_cache(maxsize=32)
def md5sum(fname):
try:
return hashlib.md5(open(fname,"rb").read()).hexdigest()
except FileNotFoundError:
raise UserException("File {} not found".format(fname))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dataset_utils as du
from skmultilearn.adapt import MLkNN
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import os
import sys
MAX_NB_WORDS =20000
def tokenize_data(X):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(X)
return tokenizer
def get_cado_predictions():
data_path = '../../datasets/cado/train.csv'
test_path = '../../datasets/cado/test.csv'
data = du.load_data(data_path)
test = du.load_data(test_path)
text_index = 6
label_start_index = 7
X = [d[text_index] for d in data]
labels = [d[label_start_index:label_start_index+12] for d in data ]
X_test = [d[text_index] for d in test]
labels_test = [d[label_start_index:label_start_index+12] for d in test]
Y = np.array(labels, dtype='int')
y_test = np.array(labels_test, dtype='int')
#Y = np.array(binary_labels, dtype='int')
test_index = len(X)
X = X + X_test
Y = np.vstack([Y , y_test])
tokenizer = tokenize_data(X)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(X)
X = pad_sequences(sequences, maxlen=700,
padding="post", truncating="post", value=0)
num_words = min(MAX_NB_WORDS, len(word_index)+1)
embedding_matrix = np.zeros((num_words, 1))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_matrix[i] = 1
X_train = X[0:test_index , :]
Y_train = Y[0:test_index , :]
x_test = X[test_index:len(X), :]
y_test = Y[test_index:len(Y), :]
classifier = MLkNN()
classifier.fit(X_train,Y_train)
predictions = classifier.predict(x_test)
scores = classifier.predict_proba(x_test)
y_pred= predictions.toarray()
y_score= scores.toarray()
return y_pred, y_score
if __name__ == "__main__":
p, pr = get_cado_predictions()
|
"""
THIS IS A MORE FEATUREFUL CUSTOM ALGORITHM to provide a skeleton to develop your
own custom algorithms. The algorithm itself, although viable, is not
recommended for production or general use, it is simply a toy algorithm here to
demonstrate the structure of a more complex custom algorithm that has
``algorithm_parameters`` passed and can also log if enabled.
It is documented via comments #
"""
# REQUIRED Skyline imports. All custom algorithms MUST have the following two
# imports. These are required for exception handling and to record algorithm
# errors regardless of debug_logging setting for the custom_algorithm
import traceback
from custom_algorithms import record_algorithm_error
import logging
# Import ALL modules that the custom algorithm requires. Remember that if a
# requirement is not one that is provided by the Skyline requirements.txt you
# must ensure it is installed in the Skyline virtualenv
import numpy as np
# To test max_execution_time import sleep
# from time import sleep
# Define your simple algorithm.
# The name of the fucntion MUST be the same as the name declared in
# settings.CUSTOM_ALGORITHMS.
# It MUST have 3 parameters:
# current_skyline_app, timeseries, algorithm_parameters
# See https://earthgecko-skyline.readthedocs.io/en/latest/algorithms/custom-algorithms.html
# for a full explanation about each.
# ALWAYS WRAP YOUR ALGORITHM IN try and except
def last_same_hours(current_skyline_app, parent_pid, timeseries, algorithm_parameters):
"""
The last_same_hours algorithm determines the data points for the same hour
and minute as the current timestamp from the last x days and calculates the
mean of those values and determines whether the current data point is within
3 standard deviations of the mean.
:param current_skyline_app: the Skyline app executing the algorithm. This
will be passed to the algorithm by Skyline. This is **required** for
error handling and logging. You do not have to worry about handling the
argument in the scope of the custom algorithm itself, but the algorithm
must accept it as the first agrument.
:param parent_pid: the parent pid which is executing the algorithm, this is
**required** for error handling and logging. You do not have to worry
about handling this argument in the scope of algorithm, but the
algorithm must accept it as the second argument.
:param timeseries: the time series as a list e.g. ``[[1578916800.0, 29.0],
[1578920400.0, 55.0], ... [1580353200.0, 55.0]]``
:param algorithm_parameters: a dictionary of any parameters and their
arguments you wish to pass to the algorithm.
:type current_skyline_app: str
:type parent_pid: int
:type timeseries: list
:type algorithm_parameters: dict
:return: True, False or Non
:rtype: boolean
"""
# You MUST define the algorithm_name
algorithm_name = 'last_same_hours'
# Define the default state of None and None, anomalous does not default to
# False as that is not correct, False is only correct if the algorithm
# determines the data point is not anomalous. The same is true for the
# anomalyScore.
anomalous = None
anomalyScore = None
current_logger = None
# If you wanted to log, you can but this should only be done during
# testing and development
def get_log(current_skyline_app):
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
return current_logger
# Use the algorithm_parameters to determine the sample_period
debug_logging = None
try:
debug_logging = algorithm_parameters['debug_logging']
except:
debug_logging = False
if debug_logging:
try:
current_logger = get_log(current_skyline_app)
current_logger.debug('debug :: %s :: debug_logging enabled with algorithm_parameters - %s' % (
algorithm_name, str(algorithm_parameters)))
except:
# This except pattern MUST be used in ALL custom algortihms to
# facilitate the traceback from any errors. The algorithm we want to
# run super fast and without spamming the log with lots of errors.
# But we do not want the function returning and not reporting
# anything to the log, so the pythonic except is used to "sample" any
# algorithm errors to a tmp file and report once per run rather than
# spewing tons of errors into the log e.g. analyzer.log
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback.format_exc())
# Return None and None as the algorithm could not determine True or False
return (None, None)
# Use the algorithm_parameters to determine the sample_period
try:
sample_period = algorithm_parameters['sample_period']
if debug_logging:
current_logger.debug('debug :: %s :: sample_period - %s' % (
algorithm_name, str(sample_period)))
except:
# This except pattern MUST be used in ALL custom algortihms to
# facilitate the traceback from any errors. The algorithm we want to
# run super fast and without spamming the log with lots of errors.
# But we do not want the function returning and not reporting
# anything to the log, so the pythonic except is used to "sample" any
# algorithm errors to a tmp file and report once per run rather than
# spewing tons of errors into the log e.g. analyzer.log
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback.format_exc())
# Return None and None as the algorithm could not determine True or False
return (None, None)
# To test max_execution_time enable a sleep
# sleep(1)
# ALWAYS WRAP YOUR ALGORITHM IN try and the BELOW except
try:
sorted_timeseries = sorted(timeseries, key=lambda x: x[0])
if debug_logging:
current_logger.debug('debug :: %s :: sorted_timeseries of length - %s' % (
algorithm_name, str(len(sorted_timeseries))))
# In compute terms, think lite, remember there could be multiple
# processes running the algorithm, try keeping its footprint as small as
# possible
try:
del timeseries
except:
pass
# Test error handling
# make_an_error = 2 * UNDEFINED_VARIABLE
# Think about testing the data to ensure it meets any requirements
try:
start_timestamp = int(sorted_timeseries[0][0])
end_timestamp = int(sorted_timeseries[-1][0])
# If the time series does not have 3 days of data it does not have
# sufficient data to sample.
if (end_timestamp - start_timestamp) < 259200:
return (anomalous, anomalyScore)
except:
sorted_timeseries = []
if not sorted_timeseries:
return (anomalous, anomalyScore)
reversed_timeseries = sorted_timeseries[::-1]
try:
del sorted_timeseries
except:
pass
datapoint = reversed_timeseries[0][1]
for timestamp, value in reversed_timeseries:
if int(timestamp) < end_timestamp:
break
oldest_timestamp_in_window = int(timestamp) - (int(sample_period))
if int(timestamp) < oldest_timestamp_in_window:
continue
same_hour_data_points = []
last_same_hour = int(timestamp) - 86400
for sh_ts, sh_val in reversed_timeseries:
if int(sh_ts) < oldest_timestamp_in_window:
break
if int(sh_ts) > last_same_hour:
continue
if int(sh_ts) < last_same_hour:
continue
if int(sh_ts) == last_same_hour:
same_hour_data_points.append(sh_val)
last_same_hour = int(sh_ts) - 86400
continue
if len(same_hour_data_points) > 1:
mean_of_previous_hours = np.mean(same_hour_data_points)
stdDev = np.std(same_hour_data_points)
upper = (mean_of_previous_hours) + (3 * stdDev)
lower = (mean_of_previous_hours) - (3 * stdDev)
if debug_logging:
current_logger.debug('debug :: %s :: data point - %s, mean - %s, upper - %s, lower - %s, same_hour_data_points - %s' % (
algorithm_name, str(datapoint), str(mean_of_previous_hours),
str(upper), str(lower), str(same_hour_data_points)))
if value > upper:
anomalous = True
anomalyScore = 1.0
if value < lower:
anomalous = True
anomalyScore = 1.0
if not anomalous:
anomalous = False
anomalyScore = 0.0
if debug_logging:
current_logger.debug('debug :: %s :: anomalous - %s, anomalyScore - %s' % (
algorithm_name, str(anomalous), str(anomalyScore)))
try:
del reversed_timeseries
except:
pass
return (anomalous, anomalyScore)
except StopIteration:
# This except pattern MUST be used in ALL custom algortihms to
# facilitate the traceback from any errors. The algorithm we want to
# run super fast and without spamming the log with lots of errors.
# But we do not want the function returning and not reporting
# anything to the log, so the pythonic except is used to "sample" any
# algorithm errors to a tmp file and report once per run rather than
# spewing tons of errors into the log e.g. analyzer.log
return (None, None)
except:
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback.format_exc())
# Return None and None as the algorithm could not determine True or False
return (None, None)
return (anomalous, anomalyScore)
|
# Ctrl+K+C to comment a line, Ctrl+K+U to uncomment a line
# input function always returns strings
# To get current date and time we need to use the datetime library
from datetime import datetime, timedelta
# The now cuntion returns a datetime object
Today = datetime.now()
print("Today is " + str(Today))
one_day = timedelta(days=1)
yesterday = Today - one_day
print("Yesterday was " + str(yesterday))
current_date = datetime.now()
print("Today is " + str(current_date))
one_week = timedelta(weeks=1)
last_week = Today - one_week
print("Last week was " + str(last_week))
print("Day " + str(Today.day))
print("Month " + str(Today.month))
print("Year " + str(Today.year))
print("Hour " + str(Today.hour))
print("Minute " + str(Today.minute))
print("Second " + str(Today.second))
UserBirthday = input("When is your birthday? (dd/mm/yyyy) ")
birthday_date = datetime.strptime(UserBirthday, '%d/%m/%Y')
print("Birthday " = str(birthday_date))
|
#!/usr/bin/python
"""
*Very* simple tool to pretty-print JSON data.
It's primary use is to pipe in the output of ``curl``. For example::
curl -H "Accept: application/json" -X GET http://url/ | jsonf
or, with headers::
curl -i -H "Accept: application/json" -X GET http://url/ | jsonf
Handling headers is *very* curl specific and not tested with any other source.
Given that curl's output looks like a standard HTTP response, it should work
with other tools too. YMMV.
"""
from sys import stdin, stderr
import json
try:
from pygments import highlight
from pygments.lexers import JSONLexer
from pygments.formatters import TerminalFormatter
PYGMENTS_AVAILABLE = True
except ImportError:
PYGMENTS_AVAILABLE = False
def format_json(data):
return json.dumps(json.loads(data), sort_keys=True, indent=4)
def main():
headers = []
content = []
active_list = headers
headers_done = False
for line in stdin:
if not headers_done and not line.strip():
active_list = content
continue
active_list.append(line)
if not content:
# no headers found, so the content had been loaded in the wrong list.
# Let's exchange them.
content, headers = headers, content
print ''.join(headers)
output = format_json(''.join(content))
if PYGMENTS_AVAILABLE:
print highlight(output, JSONLexer(), TerminalFormatter())
else:
print output
print >>stderr, ("NOTE: If you have the python package "
"`pygments` available for import, you'll get nice "
"syntax highlighting ^_^")
if __name__ == '__main__':
main()
|
from scheme.exceptions import *
from scheme.field import *
__all__ = ('Boolean',)
class Boolean(Field):
"""A field for boolean values."""
basetype = 'boolean'
equivalent = bool
errors = [
FieldError('invalid', 'invalid value', '%(field)s must be a boolean value'),
]
def _validate_value(self, value, ancestry):
if not isinstance(value, bool):
raise InvalidTypeError(identity=ancestry, field=self, value=value).construct('invalid')
|
"""
This example demonstrates SQL Schema generation for each DB type supported.
"""
from tortoise import fields
from tortoise.fields import SET_NULL
from tortoise.models import Model
class Tournament(Model):
tid = fields.SmallIntField(pk=True)
name = fields.CharField(max_length=100, description="Tournament name", index=True)
created = fields.DatetimeField(auto_now_add=True, description="Created */'`/* datetime")
class Meta:
table_description = "What Tournaments */'`/* we have"
class Event(Model):
id = fields.BigIntField(pk=True, description="Event ID")
name = fields.TextField()
tournament: fields.ForeignKeyRelation[Tournament] = fields.ForeignKeyField(
"models.Tournament", related_name="events", description="FK to tournament"
)
participants: fields.ManyToManyRelation["Team"] = fields.ManyToManyField(
"models.Team",
related_name="events",
through="teamevents",
description="How participants relate",
on_delete=SET_NULL,
)
modified = fields.DatetimeField(auto_now=True)
prize = fields.DecimalField(max_digits=10, decimal_places=2, null=True)
token = fields.CharField(max_length=100, description="Unique token", unique=True)
key = fields.CharField(max_length=100)
class Meta:
table_description = "This table contains a list of all the events"
unique_together = [("name", "prize"), ["tournament", "key"]]
class TeamEvent(Model):
team: fields.ForeignKeyRelation["Team"] = fields.ForeignKeyField(
"models.Team", related_name="teams"
)
event: fields.ForeignKeyRelation[Event] = fields.ForeignKeyField(
"models.Event", related_name="events"
)
score = fields.IntField()
class Meta:
table = "teamevents"
table_description = "How participants relate"
unique_together = ("team", "event")
class Team(Model):
name = fields.CharField(max_length=50, pk=True, description="The TEAM name (and PK)")
key = fields.IntField()
manager: fields.ForeignKeyRelation["Team"] = fields.ForeignKeyField(
"models.Team", related_name="team_members", null=True
)
talks_to: fields.ManyToManyRelation["Team"] = fields.ManyToManyField(
"models.Team", related_name="gets_talked_to"
)
class Meta:
table_description = "The TEAMS!"
indexes = [("manager", "key"), ["manager_id", "name"]]
|
from arrays.max_consecutive_ones import max_consecutive_ones
def test_max_consecutive_ones():
assert max_consecutive_ones([1, 1]) == 2
assert max_consecutive_ones([1, 1, 0, 1, 1, 1]) == 3
assert max_consecutive_ones([1, 1, 0, 0, 1, 1]) == 2
assert max_consecutive_ones([0, 1, 1, 1, 1, 0, 1, 1, 1, 0]) == 4
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# OpenStack Monitoring
# Copyright (C) 2015 Tobias Urdin
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from distutils.core import setup
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
from subprocess import call
class TestCommand(Command):
description = "run test"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
status = self._run_tests()
sys.exit(status)
def _run_tests(self):
print "hello world"
class Pep8Command(Command):
description = "run pep8"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
status = self._run_tests()
sys.exit(status)
def _run_tests(self):
try:
import pep8
pep8
except ImportError:
print('Missing "pep8" library. You can install it using pip:'
'pip install pep8')
sys.exit(1)
cwd = os.getcwd()
retcode = call(('pep8 %s/openstack_monitoring/ %s/test/' %
(cwd, cwd)).split(' '))
sys.exit(retcode)
class CoverageCommand(Command):
description = "run coverage"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import coverage
except ImportError:
print('Missing "coverage" library. You can install it using pip:'
'pip install coverage')
sys.exit(1)
cover = coverage.coverage(config_file='.coveragerc')
cover.start()
tc = TestCommand(self.distribution)
tc._run_tests()
cover.stop()
cover.save()
cover.html_report()
setup(name='openstack-monitoring',
version='1.0',
description='OpenStack Monitoring checks for Nagios and its forks.',
author='Tobias Urdin',
author_email='tobias.urdin@gmail.com',
license='Apache License 2.0',
packages=['openstack_monitoring'],
package_dir={
'openstack_monitoring': 'openstack_monitoring',
},
url='https://github.com/tobias-urdin/openstack-monitoring',
cmdclass={
'test': TestCommand,
'pep8': Pep8Command,
'coverage': CoverageCommand
},
)
|
# -*- coding: utf-8 -*-
import os
from dotenv import load_dotenv
class Config:
def __init__(self, dotenv_path=".env"):
load_dotenv(dotenv_path)
def get(self, key):
value = os.environ.get(key)
if value is None:
raise Exception
return value
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import json
import math
import os
import unicodedata
from typing import Dict, List, Optional, TYPE_CHECKING
from PyQt5.QtCore import QObject, pyqtSignal, pyqtProperty, pyqtSlot
from UM.Logger import Logger
from UM.Qt.Duration import Duration
from UM.Scene.SceneNode import SceneNode
from UM.i18n import i18nCatalog
from UM.MimeTypeDatabase import MimeTypeDatabase, MimeTypeNotFoundError
if TYPE_CHECKING:
from cura.CuraApplication import CuraApplication
catalog = i18nCatalog("cura")
## A class for processing and the print times per build plate as well as managing the job name
#
# This class also mangles the current machine name and the filename of the first loaded mesh into a job name.
# This job name is requested by the JobSpecs qml file.
class PrintInformation(QObject):
UNTITLED_JOB_NAME = "Untitled"
def __init__(self, application: "CuraApplication", parent = None) -> None:
super().__init__(parent)
self._application = application
self.initializeCuraMessagePrintTimeProperties()
# Indexed by build plate number
self._material_lengths = {} # type: Dict[int, List[float]]
self._material_weights = {} # type: Dict[int, List[float]]
self._material_costs = {} # type: Dict[int, List[float]]
self._material_names = {} # type: Dict[int, List[str]]
self._pre_sliced = False
self._backend = self._application.getBackend()
if self._backend:
self._backend.printDurationMessage.connect(self._onPrintDurationMessage)
self._application.getController().getScene().sceneChanged.connect(self._onSceneChanged)
self._is_user_specified_job_name = False
self._base_name = ""
self._abbr_machine = ""
self._job_name = ""
self._active_build_plate = 0
self._initVariablesByBuildPlate(self._active_build_plate)
self._multi_build_plate_model = self._application.getMultiBuildPlateModel()
self._application.globalContainerStackChanged.connect(self._updateJobName)
self._application.globalContainerStackChanged.connect(self.setToZeroPrintInformation)
self._application.fileLoaded.connect(self.setBaseName)
self._application.workspaceLoaded.connect(self.setProjectName)
self._application.getMachineManager().rootMaterialChanged.connect(self._onActiveMaterialsChanged)
self._application.getInstance().getPreferences().preferenceChanged.connect(self._onPreferencesChanged)
self._multi_build_plate_model.activeBuildPlateChanged.connect(self._onActiveBuildPlateChanged)
self._material_amounts = [] # type: List[float]
self._onActiveMaterialsChanged()
def initializeCuraMessagePrintTimeProperties(self) -> None:
self._current_print_time = {} # type: Dict[int, Duration]
self._print_time_message_translations = {
"inset_0": catalog.i18nc("@tooltip", "Outer Wall"),
"inset_x": catalog.i18nc("@tooltip", "Inner Walls"),
"skin": catalog.i18nc("@tooltip", "Skin"),
"infill": catalog.i18nc("@tooltip", "Infill"),
"support_infill": catalog.i18nc("@tooltip", "Support Infill"),
"support_interface": catalog.i18nc("@tooltip", "Support Interface"),
"support": catalog.i18nc("@tooltip", "Support"),
"skirt": catalog.i18nc("@tooltip", "Skirt"),
"prime_tower": catalog.i18nc("@tooltip", "Prime Tower"),
"travel": catalog.i18nc("@tooltip", "Travel"),
"retract": catalog.i18nc("@tooltip", "Retractions"),
"none": catalog.i18nc("@tooltip", "Other")
}
self._print_times_per_feature = {} # type: Dict[int, Dict[str, Duration]]
def _initPrintTimesPerFeature(self, build_plate_number: int) -> None:
# Full fill message values using keys from _print_time_message_translations
self._print_times_per_feature[build_plate_number] = {}
for key in self._print_time_message_translations.keys():
self._print_times_per_feature[build_plate_number][key] = Duration(None, self)
def _initVariablesByBuildPlate(self, build_plate_number: int) -> None:
if build_plate_number not in self._print_times_per_feature:
self._initPrintTimesPerFeature(build_plate_number)
if self._active_build_plate not in self._material_lengths:
self._material_lengths[self._active_build_plate] = []
if self._active_build_plate not in self._material_weights:
self._material_weights[self._active_build_plate] = []
if self._active_build_plate not in self._material_costs:
self._material_costs[self._active_build_plate] = []
if self._active_build_plate not in self._material_names:
self._material_names[self._active_build_plate] = []
if self._active_build_plate not in self._current_print_time:
self._current_print_time[self._active_build_plate] = Duration(parent = self)
currentPrintTimeChanged = pyqtSignal()
preSlicedChanged = pyqtSignal()
@pyqtProperty(bool, notify=preSlicedChanged)
def preSliced(self) -> bool:
return self._pre_sliced
def setPreSliced(self, pre_sliced: bool) -> None:
if self._pre_sliced != pre_sliced:
self._pre_sliced = pre_sliced
self._updateJobName()
self.preSlicedChanged.emit()
@pyqtProperty(Duration, notify = currentPrintTimeChanged)
def currentPrintTime(self) -> Duration:
return self._current_print_time[self._active_build_plate]
materialLengthsChanged = pyqtSignal()
@pyqtProperty("QVariantList", notify = materialLengthsChanged)
def materialLengths(self):
return self._material_lengths[self._active_build_plate]
materialWeightsChanged = pyqtSignal()
@pyqtProperty("QVariantList", notify = materialWeightsChanged)
def materialWeights(self):
return self._material_weights[self._active_build_plate]
materialCostsChanged = pyqtSignal()
@pyqtProperty("QVariantList", notify = materialCostsChanged)
def materialCosts(self):
return self._material_costs[self._active_build_plate]
materialNamesChanged = pyqtSignal()
@pyqtProperty("QVariantList", notify = materialNamesChanged)
def materialNames(self):
return self._material_names[self._active_build_plate]
# Get all print times (by feature) of the active buildplate.
def printTimes(self) -> Dict[str, Duration]:
return self._print_times_per_feature[self._active_build_plate]
def _onPrintDurationMessage(self, build_plate_number: int, print_times_per_feature: Dict[str, int], material_amounts: List[float]) -> None:
self._updateTotalPrintTimePerFeature(build_plate_number, print_times_per_feature)
self.currentPrintTimeChanged.emit()
self._material_amounts = material_amounts
self._calculateInformation(build_plate_number)
def _updateTotalPrintTimePerFeature(self, build_plate_number: int, print_times_per_feature: Dict[str, int]) -> None:
total_estimated_time = 0
if build_plate_number not in self._print_times_per_feature:
self._initPrintTimesPerFeature(build_plate_number)
for feature, time in print_times_per_feature.items():
if feature not in self._print_times_per_feature[build_plate_number]:
self._print_times_per_feature[build_plate_number][feature] = Duration(parent=self)
duration = self._print_times_per_feature[build_plate_number][feature]
if time != time: # Check for NaN. Engine can sometimes give us weird values.
duration.setDuration(0)
Logger.log("w", "Received NaN for print duration message")
continue
total_estimated_time += time
duration.setDuration(time)
if build_plate_number not in self._current_print_time:
self._current_print_time[build_plate_number] = Duration(None, self)
self._current_print_time[build_plate_number].setDuration(total_estimated_time)
def _calculateInformation(self, build_plate_number: int) -> None:
global_stack = self._application.getGlobalContainerStack()
if global_stack is None:
return
self._material_lengths[build_plate_number] = []
self._material_weights[build_plate_number] = []
self._material_costs[build_plate_number] = []
self._material_names[build_plate_number] = []
material_preference_values = json.loads(self._application.getInstance().getPreferences().getValue("cura/material_settings"))
for index, extruder_stack in enumerate(global_stack.extruderList):
if index >= len(self._material_amounts):
continue
amount = self._material_amounts[index]
# Find the right extruder stack. As the list isn't sorted because it's a annoying generator, we do some
# list comprehension filtering to solve this for us.
density = extruder_stack.getMetaDataEntry("properties", {}).get("density", 0)
material = extruder_stack.material
radius = extruder_stack.getProperty("material_diameter", "value") / 2
weight = float(amount) * float(density) / 1000
cost = 0.
material_guid = material.getMetaDataEntry("GUID")
material_name = material.getName()
if material_guid in material_preference_values:
material_values = material_preference_values[material_guid]
if material_values and "spool_weight" in material_values:
weight_per_spool = float(material_values["spool_weight"])
else:
weight_per_spool = float(extruder_stack.getMetaDataEntry("properties", {}).get("weight", 0))
cost_per_spool = float(material_values["spool_cost"] if material_values and "spool_cost" in material_values else 0)
if weight_per_spool != 0:
cost = cost_per_spool * weight / weight_per_spool
else:
cost = 0
# Material amount is sent as an amount of mm^3, so calculate length from that
if radius != 0:
length = round((amount / (math.pi * radius ** 2)) / 1000, 2)
else:
length = 0
self._material_weights[build_plate_number].append(weight)
self._material_lengths[build_plate_number].append(length)
self._material_costs[build_plate_number].append(cost)
self._material_names[build_plate_number].append(material_name)
self.materialLengthsChanged.emit()
self.materialWeightsChanged.emit()
self.materialCostsChanged.emit()
self.materialNamesChanged.emit()
def _onPreferencesChanged(self, preference: str) -> None:
if preference != "cura/material_settings":
return
for build_plate_number in range(self._multi_build_plate_model.maxBuildPlate + 1):
self._calculateInformation(build_plate_number)
def _onActiveBuildPlateChanged(self) -> None:
new_active_build_plate = self._multi_build_plate_model.activeBuildPlate
if new_active_build_plate != self._active_build_plate:
self._active_build_plate = new_active_build_plate
self._updateJobName()
self._initVariablesByBuildPlate(self._active_build_plate)
self.materialLengthsChanged.emit()
self.materialWeightsChanged.emit()
self.materialCostsChanged.emit()
self.materialNamesChanged.emit()
self.currentPrintTimeChanged.emit()
def _onActiveMaterialsChanged(self, *args, **kwargs) -> None:
for build_plate_number in range(self._multi_build_plate_model.maxBuildPlate + 1):
self._calculateInformation(build_plate_number)
# Manual override of job name should also set the base name so that when the printer prefix is updated, it the
# prefix can be added to the manually added name, not the old base name
@pyqtSlot(str, bool)
def setJobName(self, name: str, is_user_specified_job_name = False) -> None:
self._is_user_specified_job_name = is_user_specified_job_name
self._job_name = name
self._base_name = name.replace(self._abbr_machine + "_", "")
if name == "":
self._is_user_specified_job_name = False
self.jobNameChanged.emit()
jobNameChanged = pyqtSignal()
@pyqtProperty(str, notify = jobNameChanged)
def jobName(self):
return self._job_name
def _updateJobName(self) -> None:
if self._base_name == "":
self._job_name = self.UNTITLED_JOB_NAME
self._is_user_specified_job_name = False
self.jobNameChanged.emit()
return
base_name = self._stripAccents(self._base_name)
self._defineAbbreviatedMachineName()
# Only update the job name when it's not user-specified.
if not self._is_user_specified_job_name:
if self._pre_sliced:
self._job_name = catalog.i18nc("@label", "Pre-sliced file {0}", base_name)
elif self._application.getInstance().getPreferences().getValue("cura/jobname_prefix"):
# Don't add abbreviation if it already has the exact same abbreviation.
if base_name.startswith(self._abbr_machine + "_"):
self._job_name = base_name
else:
self._job_name = self._abbr_machine + "_" + base_name
else:
self._job_name = base_name
# In case there are several buildplates, a suffix is attached
if self._multi_build_plate_model.maxBuildPlate > 0:
connector = "_#"
suffix = connector + str(self._active_build_plate + 1)
if connector in self._job_name:
self._job_name = self._job_name.split(connector)[0] # get the real name
if self._active_build_plate != 0:
self._job_name += suffix
self.jobNameChanged.emit()
@pyqtSlot(str)
def setProjectName(self, name: str) -> None:
self.setBaseName(name, is_project_file = True)
baseNameChanged = pyqtSignal()
def setBaseName(self, base_name: str, is_project_file: bool = False) -> None:
self._is_user_specified_job_name = False
# Ensure that we don't use entire path but only filename
name = os.path.basename(base_name)
# when a file is opened using the terminal; the filename comes from _onFileLoaded and still contains its
# extension. This cuts the extension off if necessary.
check_name = os.path.splitext(name)[0]
filename_parts = os.path.basename(base_name).split(".")
# If it's a gcode, also always update the job name
is_gcode = False
if len(filename_parts) > 1:
# Only check the extension(s)
is_gcode = "gcode" in filename_parts[1:]
# if this is a profile file, always update the job name
# name is "" when I first had some meshes and afterwards I deleted them so the naming should start again
is_empty = check_name == ""
if is_gcode or is_project_file or (is_empty or (self._base_name == "" and self._base_name != check_name)):
# Only take the file name part, Note : file name might have 'dot' in name as well
data = ""
try:
mime_type = MimeTypeDatabase.getMimeTypeForFile(name)
data = mime_type.stripExtension(name)
except MimeTypeNotFoundError:
Logger.log("w", "Unsupported Mime Type Database file extension %s", name)
if data is not None and check_name is not None:
self._base_name = data
else:
self._base_name = ""
# Strip the old "curaproject" extension from the name
OLD_CURA_PROJECT_EXT = ".curaproject"
if self._base_name.lower().endswith(OLD_CURA_PROJECT_EXT):
self._base_name = self._base_name[:len(self._base_name) - len(OLD_CURA_PROJECT_EXT)]
# CURA-5896 Try to strip extra extensions with an infinite amount of ".curaproject.3mf".
OLD_CURA_PROJECT_3MF_EXT = ".curaproject.3mf"
while self._base_name.lower().endswith(OLD_CURA_PROJECT_3MF_EXT):
self._base_name = self._base_name[:len(self._base_name) - len(OLD_CURA_PROJECT_3MF_EXT)]
self._updateJobName()
@pyqtProperty(str, fset = setBaseName, notify = baseNameChanged)
def baseName(self):
return self._base_name
## Created an acronym-like abbreviated machine name from the currently
# active machine name.
# Called each time the global stack is switched.
def _defineAbbreviatedMachineName(self) -> None:
global_container_stack = self._application.getGlobalContainerStack()
if not global_container_stack:
self._abbr_machine = ""
return
active_machine_type_name = global_container_stack.definition.getName()
self._abbr_machine = self._application.getMachineManager().getAbbreviatedMachineName(active_machine_type_name)
## Utility method that strips accents from characters (eg: â -> a)
def _stripAccents(self, to_strip: str) -> str:
return ''.join(char for char in unicodedata.normalize('NFD', to_strip) if unicodedata.category(char) != 'Mn')
@pyqtSlot(result = "QVariantMap")
def getFeaturePrintTimes(self) -> Dict[str, Duration]:
result = {}
if self._active_build_plate not in self._print_times_per_feature:
self._initPrintTimesPerFeature(self._active_build_plate)
for feature, time in self._print_times_per_feature[self._active_build_plate].items():
if feature in self._print_time_message_translations:
result[self._print_time_message_translations[feature]] = time
else:
result[feature] = time
return result
# Simulate message with zero time duration
def setToZeroPrintInformation(self, build_plate: Optional[int] = None) -> None:
if build_plate is None:
build_plate = self._active_build_plate
# Construct the 0-time message
temp_message = {}
if build_plate not in self._print_times_per_feature:
self._print_times_per_feature[build_plate] = {}
for key in self._print_times_per_feature[build_plate].keys():
temp_message[key] = 0
temp_material_amounts = [0.]
self._onPrintDurationMessage(build_plate, temp_message, temp_material_amounts)
## Listen to scene changes to check if we need to reset the print information
def _onSceneChanged(self, scene_node: SceneNode) -> None:
# Ignore any changes that are not related to sliceable objects
if not isinstance(scene_node, SceneNode)\
or not scene_node.callDecoration("isSliceable")\
or not scene_node.callDecoration("getBuildPlateNumber") == self._active_build_plate:
return
self.setToZeroPrintInformation(self._active_build_plate)
|
# Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""\
Video to labels example.
"""
import argparse
import sys
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor
MEM_CHOICES = ("low_mem", "mid_mem", "full_mem")
def main(args):
size = 256 // 2
# Conv3D expects (B, C, dim1, dim2, dim3)
in_ = eddl.Input([3, 10, size, size])
layer = in_
layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D(
layer, 4, [1, 3, 3], [1, 1, 1], "same"
)), [1, 2, 2], [1, 2, 2], "same")
layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D(
layer, 8, [1, 3, 3], [1, 1, 1], "same"
)), [1, 2, 2], [1, 2, 2], "same")
layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D(
layer, 16, [1, 3, 3], [1, 1, 1], "same"
)), [1, 2, 2], [1, 2, 2], "same")
layer = eddl.GlobalMaxPool3D(layer)
layer = eddl.Reshape(layer, [-1])
layer = eddl.LSTM(layer, 128)
layer = eddl.Dense(layer, 100)
layer = eddl.ReLu(layer)
layer = eddl.Dense(layer, 2)
out = eddl.ReLu(layer)
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.adam(),
["mse"],
["mse"],
eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
)
eddl.summary(net)
seqImages = Tensor.randu([32, 10, 3, 10, size, size])
seqLabels = Tensor.randu([32, 7, 2])
eddl.fit(net, [seqImages], [seqLabels], 4, 2 if args.small else 10)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--small", action="store_true")
parser.add_argument("--mem", metavar="|".join(MEM_CHOICES),
choices=MEM_CHOICES, default="low_mem")
main(parser.parse_args(sys.argv[1:]))
|
# Getting Started
# Try typing
print('this is a test, this is only a test') # in the REPL and you should see it printed as a reply.
#You can type math - try
2 + 2
# If you type more than one line, spaces are important - for instance if you define a function:
def Tell():
print('more testing')
# and then call the function
Tell() # and you should see the text printed.
# To run existing code in libraries, you have to "import". Try getting a directory listing with:
import os
os.listdir() #and you should get an array of text strings returned.
#https://docs.micropython.org/en/latest/index.html
#-------------------------Loops
# There are many different kinds. The standard for loop going from 0 to 9 is just
for i in range(10):
print (i)
# You can also pull from a list:
for i in[1,3,5,9]:
print (i)
# or run until a condition is met:
i = 0
while i < 10:
print (i)
i = i + 1
#or run forever:
import utime
i = 0
while True:
print (i)
i = i + 1
utime.sleep(1) # wait one second between prints so that you can hit CTRL-C and continue on
#------------------------- Conditionals
#You can have a standard if..then..else format:
A=3
B=1
def Test():
if A < B:
print ('A Wins')
else:
if B:
for i in ["1","3","5"]:
print ('i=',i)
else:
print ('B Loses')
Test()
#------------------------- Running code that might not work...
# Use the "Try: Except:" options. A sample would be
from pyb import SPI
spi = SPI(2, SPI.SLAVE, polarity=0, phase=0)
spi.MSB
data=bytearray(4)
reply=bytearray(4)
data[0]=2
data[1]=1
data[2]=3
data[3]=11
def Test():
try:
spi.send_recv(data,reply)
print(reply)
except Exception as e:
print(e):
print('failed')
Test()
|
#!/usr/local/bin/python
from mqtt_fiware_bridge import MFB
class MockBridge(MFB.MqttFiwareBridge):
def __init__(self, **kwargs):
super(MockBridge, self).__init__(**kwargs)
def do_something(self, message):
self.log.info(f"Printing validated message from {__name__}")
print(f"Voila: {message}")
if __name__ == "__main__":
mocker_client = MockBridge()
mocker_client.connect()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Ajusta as isntru��es SQL dos programas COBOL convertido pelo EASY2COB
para todos os programs do diret�rio no properties ['DIRSOUPGM'] com a exten��o ['EXTSOU']
'pathProp.txt' default do diret�io do properties
'''
import os
import ConfigParser
from collections import namedtuple
from HOFs import *
from utilities import *
from DirFileList import *
from Sql import Sql
class Sqlcob(object):
def __init__(self, properties='pathProp.txt'):
self.properties = properties
path = ''.join(open(self.properties).readline().replace("'", "").split())
config = ConfigParser.ConfigParser()
config.read(os.path.join(path, 'properties.cnf'))
self.diccnfg = {k: v for k, v in config.items('SQL')}
self.include = self.loadinclude()[0]
self.dcltable = self.loadinclude()[1]
self.tables = self.loadtables()
self.cmds = file(r'cblcmds.txt').read().splitlines()
self.sql = Sql(self.diccnfg, self.include, self.tables, self.cmds)
def sqlcob(self):
ispgm = lambda pgm: pgm[-3:].upper() == self.diccnfg['extsou']
dirfilelist = DirFileList()
dirfilelist.setDirFileList(self.diccnfg['dirsoupgm'])
pgmlist = dirfilelist.getDirFileList()
for pgm in filter(ispgm, pgmlist):
basename = os.path.basename(pgm)
print pgm
pgmwrite = open('{}'.format(os.path.join(self.diccnfg['dircnvpgm'], basename)), 'w')
pgmwrite.writelines(self.sql.sql(pgm))
pgmwrite.close()
def loadinclude(self):
lines = file('{}'.format(os.path.join(self.diccnfg['dirdatwor'], 'include.txt'))).readlines()
Attrs = namedtuple('Attrs', ['dclgen', 'prefix', 'declare'])
include = {}
dcltable = {}
for line in lines:
lis = line.split()
include[lis[0]] = Attrs(lis[1], '' if lis[2] == 'None' else lis[2], lis[3])
dcltable[lis[1]] = lis[0]
return include, dcltable
def loadtables(self):
isdcl = lambda dcl: dcl[-3:].upper() == self.diccnfg['extcpy']
dirfilelist = DirFileList()
dirfilelist.setDirFileList(self.diccnfg['dirsoudcl'])
dcllist = dirfilelist.getDirFileList()
Attrs = namedtuple('Attrs', ['datatype', 'isnull'])
tables = {}
for dcl in filter(isdcl, dcllist):
basename = os.path.basename(dcl)
fields = {}
lines = file(dcl).readlines()
lines = change({'(': ' ', ',': ' '}, lines)
n = 0
while 'EXEC SQL DECLARE' not in lines[n]:
n += 1
n += 1
while 'END-EXEC' not in lines[n]:
wrds = words(lines[n])[1]
fields[wrds[0]] = Attrs(wrds[1], False if 'NOT NULL' in lines[n] else True)
n += 1
tables[self.dcltable[basename[:-(len(self.diccnfg['extcpy']) + 1)]]] = fields
return tables
|
from collections import Counter, defaultdict
class Solution:
def frequencySort(self, s: str) -> str:
return self.counter_soln(s)
def defaultdict_soln(self, s: str) -> str:
"""
Runtime: O(nlogn)
Space: O(n)
"""
scounter = defaultdict(int)
for char in s:
scounter[char] += 1
sorted_chars = sorted(scounter, key=scounter.get, reverse=True)
return ''.join([char * scounter[char] for char in sorted_chars])
def counter_soln(self, s: str) -> str:
"""
Runtime: O(nlogn)
Space: O(n)
"""
scounter = Counter(s)
sorted_chars = sorted(scounter, key=scounter.get, reverse=True)
return ''.join([char * scounter[char] for char in sorted_chars])
"""
Both Counter and defaultdict solution
Runtime: O(nlogn)
Space: O(n)
Counter Solution:
Runtime: 32 ms, faster than 94.55% of Python3 online submissions for Sort Characters By Frequency.
Memory Usage: 13.9 MB, less than 50.00% of Python3 online submissions for Sort Characters By Frequency.
"""
|
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract test classes for FHIR-specific primitive_handler modules."""
import abc
import decimal
import json
import os
from typing import cast, Any, Type
from google.protobuf import message
from absl.testing import absltest
from google.fhir import _primitive_time_utils
from google.fhir import extensions
from google.fhir import fhir_errors
from google.fhir import primitive_handler
from google.fhir.testing import testdata_utils
from google.fhir.utils import path_utils
from google.fhir.utils import proto_utils
class PrimitiveWrapperPrimitiveHasNoValueTest(
absltest.TestCase, metaclass=abc.ABCMeta):
"""A suite of tests to ensure proper validation for PrimitiveHasNoValue."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testPrimitiveHasNoValue_withValidBase64Binary_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.base64_binary_cls)
def testPrimitiveHasNoValue_withInvalidBase64Binary_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.base64_binary_cls)
def testPrimitiveHasNoValue_withValidBoolean_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.boolean_cls)
def testPrimitiveHasNoValue_withInvalidBoolean_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.boolean_cls)
def testPrimitiveHasNoValue_withValidCode_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.code_cls)
def testPrimitiveHasNoValue_withInvalidCode_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.code_cls)
def testPrimitiveHasNoValue_withValidDate_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.date_cls)
def testPrimitiveHasNoValue_withInvalidDate_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.date_cls)
def testPrimitiveHasNoValue_withValidDateTime_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.date_time_cls)
def testPrimitiveHasNoValue_withInvalidDateTime_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.date_time_cls)
def testPrimitiveHasNoValue_withValidDecimal_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.decimal_cls)
def testPrimitiveHasNoValue_withInvalidDecimal_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.decimal_cls)
def testPrimitiveHasNoValue_withValidId_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.id_cls)
def testPrimitiveHasNoValue_withInvalidId_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.id_cls)
def testPrimitiveHasNoValue_withValidInstant_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.instant_cls)
def testPrimitiveHasNoValue_withInvalidInstant_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.instant_cls)
def testPrimitiveHasNoValue_withValidInteger_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.integer_cls)
def testPrimitiveHasNoValue_withInvalidInteger_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.integer_cls)
def testPrimitiveHasNoValue_withValidMarkdown_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.markdown_cls)
def testPrimitiveHasNoValue_withInvalidMarkdown_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.markdown_cls)
def testPrimitiveHasNoValue_withValidOid_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.oid_cls)
def testPrimitiveHasNoValue_withInvalidOid_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.oid_cls)
def testPrimitiveHasNoValue_withValidPositiveInt_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.positive_int_cls)
def testPrimitiveHasNoValue_withInvalidPositiveInt_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.positive_int_cls)
def testPrimitiveHasNoValue_withValidString_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.string_cls)
def testPrimitiveHasNoValue_withInvalidString_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.string_cls)
def testPrimitiveHasNoValue_withValidTime_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.time_cls)
def testPrimitiveHasNoValue_withInvalidTime_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.time_cls)
def testPrimitiveHasNoValue_withValidUnsignedInt_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.unsigned_int_cls)
def testPrimitiveHasNoValue_withInvalidUnsignedInt_raises(self):
self.assert_set_invalid_primitive_has_no_value_raises(
self.primitive_handler.unsigned_int_cls)
def testPrimitiveHasNoValue_withValidUri_succeeds(self):
self.assert_set_valid_primitive_has_no_value_succeeds(
self.primitive_handler.uri_cls)
def _set_primitive_has_no_value_extension(self, primitive: message.Message):
"""Sets the PrimitiveHasNoValue FHIR extension on the provided primitive."""
extensions_field = primitive.DESCRIPTOR.fields_by_name['extension']
primitive_has_no_value = extensions.create_primitive_has_no_value(
extensions_field.message_type)
proto_utils.set_value_at_field(primitive, 'extension',
[primitive_has_no_value])
def assert_set_valid_primitive_has_no_value_succeeds(
self, primitive_cls: Type[message.Message]):
"""Tests setting PrimitiveHasNoValue with other extensions present.
Having a PrimitiveHasNoValue extension is okay provided there are other
extensions set. This is expected not to raise any exceptions.
Args:
primitive_cls: The type of primitive to instantiate and test.
"""
primitive = primitive_cls()
self._set_primitive_has_no_value_extension(primitive)
# Add arbitrary extension
extensions_field = primitive.DESCRIPTOR.fields_by_name['extension']
extension = proto_utils.create_message_from_descriptor(
extensions_field.message_type)
# Silencing the type checkers as pytype doesn't fully support structural
# subtyping yet.
# pylint: disable=line-too-long
# See: https://mypy.readthedocs.io/en/stable/casts.html#casts-and-type-assertions.
# pylint: enable=line-too-long
# Soon: https://www.python.org/dev/peps/pep-0544/.
cast(Any, extension).url.value = 'abcd'
cast(Any, extension).value.boolean.value = True
proto_utils.append_value_at_field(primitive, 'extension', extension)
try:
self.primitive_handler.primitive_wrapper_from_primitive(primitive)
except fhir_errors.InvalidFhirError as e:
self.fail('PrimitiveHasNoValue validation failed for {}: {}.'.format(
primitive.DESCRIPTOR.full_name, e))
def assert_set_invalid_primitive_has_no_value_raises(
self, primitive_cls: Type[message.Message]):
"""Tests setting PrimitiveHasNoValue with no other fields present.
Having a PrimitiveHasNoValue extension is only acceptable provided there
are no other fields other than id or extension set. This is expected to
raise an exception.
Args:
primitive_cls: The type of primitive to instantiate and test.
"""
primitive = primitive_cls()
self._set_primitive_has_no_value_extension(primitive)
with self.assertRaises(fhir_errors.InvalidFhirError) as fe:
self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertIsInstance(fe.exception, fhir_errors.InvalidFhirError)
class PrimitiveWrapperProtoValidationTest(
absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the primitive_wrapper classes for proto validation functionality.
Validation tests generally consist of two components:
1. Validation with a valid FHIR primitive (should succeed)
2. Validation with an *invalid* FHIR primitive (should raise)
Note that, these tests may *not* be applicable to every type. For example,
some types, like Boolean, don't have an invalid representation.
"""
_PROTO_DELIMITER = '\n---\n'
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
@property
@abc.abstractmethod
def validation_dir(self) -> str:
raise NotImplementedError('Subclasses *must* implement validation_dir')
def testValidateWrapped_withValidBase64Binary_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.base64_binary_cls)
def testValidateWrapped_withValidBoolean_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.boolean_cls)
def testValidateWrapped_withValidCode_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.code_cls)
def testValidateWrapped_withInvalidCode_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.code_cls)
def testValidateWrapped_withValidDate_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.date_cls)
def testValidateWrapped_withInvalidDate_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.date_cls)
def testValidateWrapped_withValidDateTime_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.date_time_cls)
def testValidateWrapped_withInvalidDateTime_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.date_time_cls)
def testValidateWrapped_withValidDecimal_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.decimal_cls)
def testValidateWrapped_withInvalidDecimal_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.decimal_cls)
def testValidateWrapped_withValidId_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.id_cls)
def testValidateWrapped_withInvalidId_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.id_cls)
def testValidateWrapped_withValidInstant_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.instant_cls)
def testValidateWrapped_withInvalidInstant_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.instant_cls)
def testValidateWrapped_withValidInteger_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.integer_cls)
def testValidateWrapped_withValidMarkdown_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.markdown_cls)
def testValidateWrapped_withValidOid_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.oid_cls)
def testValidateWrapped_withInvalidOid_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.oid_cls)
def testValidateWrapped_withValidPositiveInt_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.positive_int_cls)
def testValidateWrapped_withInvalidPositiveInt_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.positive_int_cls)
def testValidateWrapped_withValidString_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.string_cls)
def testValidateWrapped_withValidTime_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.time_cls)
def testValidateWrapped_withInvalidTime_raises(self):
self.assert_validation_of_invalid_primitive_raises(
self.primitive_handler.time_cls)
def testValidateWrapped_withValidUnsignedInt_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.unsigned_int_cls)
def testValidateWrapped_withValidUri_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.uri_cls)
def testValidateWrapped_withValidXhtml_succeeds(self):
self.assert_validation_of_valid_primitive_succeeds(
self.primitive_handler.xhtml_cls)
def assert_validation_of_valid_primitive_succeeds(
self, primitive_cls: Type[message.Message]):
"""Performs a suite of validation tests on valid FHIR primitives."""
filename = path_utils.camel_case_to_snake_case(
primitive_cls.DESCRIPTOR.name)
valid_protos = testdata_utils.read_protos(
os.path.join(self.validation_dir, filename + '.valid.prototxt'),
primitive_cls, self._PROTO_DELIMITER)
for valid_proto in valid_protos:
try:
self.primitive_handler.primitive_wrapper_from_primitive(valid_proto)
except fhir_errors.InvalidFhirError as e:
self.fail('{} did not represent valid FHIR: {}.'.format(filename, e))
def assert_validation_of_invalid_primitive_raises(
self, primitive_cls: Type[message.Message]):
"""Performs a suite of validation tests on invalid FHIR primitives."""
filename = path_utils.camel_case_to_snake_case(
primitive_cls.DESCRIPTOR.name)
invalid_protos = testdata_utils.read_protos(
os.path.join(self.validation_dir, filename + '.invalid.prototxt'),
primitive_cls, self._PROTO_DELIMITER)
for invalid_proto in invalid_protos:
with self.assertRaises(fhir_errors.InvalidFhirError) as fe:
self.primitive_handler.primitive_wrapper_from_primitive(invalid_proto)
self.assertIsInstance(fe.exception, fhir_errors.InvalidFhirError)
class PrimitiveWrapperJsonValidationTest(
absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the primitive_wrapper classes for json parsing/validation."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
@property
@abc.abstractmethod
def validation_dir(self) -> str:
raise NotImplementedError('Subclasses *must* implement validation_dir')
def testValidateWrapped_withValidBase64Binary_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.base64_binary_cls)
def testValidateWrapped_withInvalidBase64Binary_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.base64_binary_cls)
def testValidateWrapped_withValidBoolean_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.boolean_cls)
def testValidateWrapped_withValidCode_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.code_cls)
def testValidateWrapped_withInvalidCode_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.code_cls)
def testValidateWrapped_withValidDate_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.date_cls)
def testValidateWrapped_withInvalidDate_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.date_cls)
def testValidateWrapped_withValidDateTime_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.date_time_cls)
def testValidateWrapped_withInvalidDateTime_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.date_time_cls)
def testValidateWrapped_withValidDecimal_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.decimal_cls)
def testValidateWrapped_withInvalidDecimal_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.decimal_cls)
def testValidateWrapped_withValidId_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.id_cls)
def testValidateWrapped_withInvalidId_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.id_cls)
def testValidateWrapped_withValidInstant_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.instant_cls)
def testValidateWrapped_withInvalidInstant_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.instant_cls)
def testValidateWrapped_withValidInteger_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.integer_cls)
def testValidateWrapped_withValidMarkdown_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.markdown_cls)
def testValidateWrapped_withValidOid_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.oid_cls)
def testValidateWrapped_withInvalidOid_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.oid_cls)
def testValidateWrapped_withValidPositiveInt_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.positive_int_cls)
def testValidateWrapped_withInvalidPositiveInt_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.positive_int_cls)
def testValidateWrapped_withValidString_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.string_cls)
def testValidateWrapped_withValidTime_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.time_cls)
def testValidateWrapped_withInvalidTime_raises(self):
self.assert_json_validation_with_invalid_primitive_raises(
self.primitive_handler.time_cls)
def testValidateWrapped_withValidUnsignedInt_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.unsigned_int_cls)
def testValidateWrapped_withValidUri_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.uri_cls)
def testValidateWrapped_withValidXhtml_succeeds(self):
self.assert_json_validation_with_valid_primitive_succeeds(
self.primitive_handler.xhtml_cls)
def assert_json_validation_with_valid_primitive_succeeds(
self, primitive_cls: Type[message.Message]):
"""Performs a suite of validation tests on valid FHIR primitives."""
filename = path_utils.camel_case_to_snake_case(
primitive_cls.DESCRIPTOR.name)
filepath = os.path.join(self.validation_dir, filename + '.valid.ndjson')
json_lines = testdata_utils.read_data(filepath, delimiter='\n')
json_values = [
json.loads(x, parse_float=decimal.Decimal, parse_int=decimal.Decimal)
for x in json_lines
]
for value in json_values:
self.primitive_handler.primitive_wrapper_from_json_value(
value, primitive_cls)
def assert_json_validation_with_invalid_primitive_raises(
self, primitive_cls: Type[message.Message]):
"""Performs a suite of validation tests on invalid FHIR primitives."""
filename = path_utils.camel_case_to_snake_case(
primitive_cls.DESCRIPTOR.name)
filepath = os.path.join(self.validation_dir, filename + '.invalid.ndjson')
json_lines = testdata_utils.read_data(filepath, delimiter='\n')
json_values = [
json.loads(x, parse_float=decimal.Decimal, parse_int=decimal.Decimal)
for x in json_lines
]
for value in json_values:
with self.assertRaises(fhir_errors.InvalidFhirError):
self.primitive_handler.primitive_wrapper_from_json_value(
value, primitive_cls)
class DateTimeWrapperTest(absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the DateTimeWrapper class on specific parsing/printing scenarios."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testParseDateTime_withYearPrecision_succeeds(self):
datetime_str = '1971'
expected = self.primitive_handler.new_date_time(
value_us=31536000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1971')
self.assertEqual(wrapper.wrapped, expected)
expected_alt_timezone = self.primitive_handler.new_date_time(
value_us=31500000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str,
self.primitive_handler.date_time_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1971')
self.assertEqual(wrapper.wrapped, expected_alt_timezone)
def testParseDateTime_withMonthPrecision_succeeds(self):
datetime_str = '1970-02'
expected = self.primitive_handler.new_date_time(
value_us=2678400000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-02')
self.assertEqual(wrapper.wrapped, expected)
expected_alt_timezone = self.primitive_handler.new_date_time(
value_us=2642400000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str,
self.primitive_handler.date_time_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1970-02')
self.assertEqual(wrapper.wrapped, expected_alt_timezone)
def testParseDateTime_withDayPrecision_succeeds(self):
datetime_str = '1970-01-01'
expected = self.primitive_handler.new_date_time(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01')
self.assertEqual(wrapper.wrapped, expected)
expected_alt_timezone = self.primitive_handler.new_date_time(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str,
self.primitive_handler.date_time_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1970-01-01')
self.assertEqual(wrapper.wrapped, expected_alt_timezone)
def testParseDateTime_withSecondPrecision_succeeds(self):
datetime_str = '2014-10-09T14:58:00+11:00'
expected = self.primitive_handler.new_date_time(
value_us=1412827080000000,
precision=_primitive_time_utils.DateTimePrecision.SECOND,
timezone='+11:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '2014-10-09T14:58:00+11:00')
self.assertEqual(wrapper.wrapped, expected)
def testParseDateTime_withMillisecondPrecision_succeeds(self):
datetime_str = '1970-01-01T12:00:00.123Z'
expected = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123Z')
self.assertEqual(wrapper.wrapped, expected)
datetime_str = '1970-01-01T12:00:00.123+00:00'
expected = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123+00:00')
self.assertEqual(wrapper.wrapped, expected)
datetime_str = '1970-01-01T12:00:00.123-00:00'
expected = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123-00:00')
self.assertEqual(wrapper.wrapped, expected)
def testParseDateTime_withMicrosecondPrecision_succeeds(self):
datetime_str = '1970-01-01T12:00:00.123456Z'
expected = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456Z')
self.assertEqual(wrapper.wrapped, expected)
datetime_str = '1970-01-01T12:00:00.123456+00:00'
expected = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456+00:00')
self.assertEqual(wrapper.wrapped, expected)
datetime_str = '1970-01-01T12:00:00.123000-00:00'
expected = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
datetime_str, self.primitive_handler.date_time_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123000-00:00')
self.assertEqual(wrapper.wrapped, expected)
def testPrintDateTime_withYearPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970')
primitive = self.primitive_handler.new_date_time(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970')
def testPrintDateTime_withMonthPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01')
primitive = self.primitive_handler.new_date_time(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01')
def testPrintDateTime_withDayPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01')
primitive = self.primitive_handler.new_date_time(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01')
def testPrintDateTime_withSecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=1412827080000000,
precision=_primitive_time_utils.DateTimePrecision.SECOND,
timezone='+11:00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '2014-10-09T14:58:00+11:00')
primitive = self.primitive_handler.new_date_time(
value_us=1412827080000000,
precision=_primitive_time_utils.DateTimePrecision.SECOND,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '2014-10-09T14:58:00+11:00')
def testPrintDateTime_withMillisecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123Z')
primitive = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='UTC')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123Z')
primitive = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123-00:00')
primitive = self.primitive_handler.new_date_time(
value_us=43200123000,
precision=_primitive_time_utils.DateTimePrecision.MILLISECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123+00:00')
def testPrintDateTime_withMicrosecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456Z')
primitive = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='UTC')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456Z')
primitive = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456-00:00')
primitive = self.primitive_handler.new_date_time(
value_us=43200123456,
precision=_primitive_time_utils.DateTimePrecision.MICROSECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01T12:00:00.123456+00:00')
class DateWrapperTest(absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the DateWrapper class on specific parsing/printing scenarios."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testParseDate_withYearPrecision_succeeds(self):
date_str = '1971'
expected = self.primitive_handler.new_date(
value_us=31536000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str, self.primitive_handler.date_cls)
self.assertEqual(wrapper.string_value(), '1971')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_date(
value_us=31500000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str,
self.primitive_handler.date_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1971')
self.assertEqual(wrapper.wrapped, expected)
def testParseDate_withMonthPrecision_succeeds(self):
date_str = '1970-02'
expected = self.primitive_handler.new_date(
value_us=2678400000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str, self.primitive_handler.date_cls)
self.assertEqual(wrapper.string_value(), '1970-02')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_date(
value_us=2642400000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str,
self.primitive_handler.date_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1970-02')
self.assertEqual(wrapper.wrapped, expected)
def testParseDate_withDayPrecision_succeeds(self):
date_str = '1970-01-01'
expected = self.primitive_handler.new_date(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str, self.primitive_handler.date_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_date(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str,
self.primitive_handler.date_cls,
default_timezone='Australia/Sydney')
self.assertEqual(wrapper.string_value(), '1970-01-01')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_date(
value_us=18000000000,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='-05:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
date_str, self.primitive_handler.date_cls, default_timezone='-05:00')
self.assertEqual(wrapper.string_value(), '1970-01-01')
self.assertEqual(wrapper.wrapped, expected)
def testPrintDate_withYearPrecision_succeeds(self):
primitive = self.primitive_handler.new_date(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970')
primitive = self.primitive_handler.new_date(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.YEAR,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970')
def testPrintDate_withMonthPrecision_succeeds(self):
primitive = self.primitive_handler.new_date(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01')
primitive = self.primitive_handler.new_date(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.MONTH,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01')
def testPrintDate_withDayPrecision_succeeds(self):
primitive = self.primitive_handler.new_date(
value_us=0,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01')
primitive = self.primitive_handler.new_date(
value_us=-36000000000,
precision=_primitive_time_utils.DateTimePrecision.DAY,
timezone='Australia/Sydney')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '1970-01-01')
class InstantWrapperTest(absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the InstantWrapper class on specific parsing/printing scenarios."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testParseInstant_withSecondPrecision_succeeds(self):
instant_str = '1970-01-01T00:00:00Z'
expected = self.primitive_handler.new_instant(
value_us=0,
precision=_primitive_time_utils.TimePrecision.SECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00Z')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00+00:00'
expected = self.primitive_handler.new_instant(
value_us=0,
precision=_primitive_time_utils.TimePrecision.SECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00+00:00')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00-00:00'
expected = self.primitive_handler.new_instant(
value_us=0,
precision=_primitive_time_utils.TimePrecision.SECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00-00:00')
self.assertEqual(wrapper.wrapped, expected)
def testParseInstant_withMillisecondPrecision_succeeds(self):
instant_str = '1970-01-01T00:00:00.123Z'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MILLISECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123Z')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00.123+00:00'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MILLISECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123+00:00')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00.123-00:00'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MILLISECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123-00:00')
self.assertEqual(wrapper.wrapped, expected)
def testParseInstant_withMicrosecondPrecision_succeeds(self):
instant_str = '1970-01-01T00:00:00.123000Z'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MICROSECOND,
timezone='Z')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123000Z')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00.123000+00:00'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MICROSECOND,
timezone='+00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123000+00:00')
self.assertEqual(wrapper.wrapped, expected)
instant_str = '1970-01-01T00:00:00.123000-00:00'
expected = self.primitive_handler.new_instant(
value_us=123000,
precision=_primitive_time_utils.TimePrecision.MICROSECOND,
timezone='-00:00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
instant_str, self.primitive_handler.instant_cls)
self.assertEqual(wrapper.string_value(), '1970-01-01T00:00:00.123000-00:00')
self.assertEqual(wrapper.wrapped, expected)
class TimeWrapperTest(absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the TimeWrapper class on specific parsing/printing scenarios."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testParseTime_withSecondPrecision_succeeds(self):
timestamp = '12:00:00'
expected = self.primitive_handler.new_time(
value_us=43200000000,
precision=_primitive_time_utils.TimePrecision.SECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
timestamp, self.primitive_handler.time_cls)
self.assertEqual(wrapper.string_value(), '12:00:00')
self.assertEqual(wrapper.wrapped, expected)
def testParseTime_withMillisecondPrecision_succeeds(self):
timestamp = '12:00:00.123'
expected = self.primitive_handler.new_time(
value_us=43200123000,
precision=_primitive_time_utils.TimePrecision.MILLISECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
timestamp, self.primitive_handler.time_cls)
self.assertEqual(wrapper.string_value(), '12:00:00.123')
self.assertEqual(wrapper.wrapped, expected)
def testParseTime_withMicrosecondPrecision_succeeds(self):
timestamp = '12:00:00.123000'
expected = self.primitive_handler.new_time(
value_us=43200123000,
precision=_primitive_time_utils.TimePrecision.MICROSECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
timestamp, self.primitive_handler.time_cls)
self.assertEqual(wrapper.string_value(), '12:00:00.123000')
self.assertEqual(wrapper.wrapped, expected)
def testPrintTime_withSecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_time(
value_us=43200000000,
precision=_primitive_time_utils.TimePrecision.SECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '12:00:00')
def testPrintTime_withMillisecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_time(
value_us=43200123000,
precision=_primitive_time_utils.TimePrecision.MILLISECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '12:00:00.123')
def testPrintTime_withMicrosecondPrecision_succeeds(self):
primitive = self.primitive_handler.new_time(
value_us=43200123000,
precision=_primitive_time_utils.TimePrecision.MICROSECOND)
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '12:00:00.123000')
class DecimalWrapperTest(absltest.TestCase, metaclass=abc.ABCMeta):
"""Tests the DecimalWrapper class on specific parsing/printing scenarios."""
@property
@abc.abstractmethod
def primitive_handler(self) -> primitive_handler.PrimitiveHandler:
raise NotImplementedError('Subclasses *must* implement primitive_handler.')
def testParseDecimal_withPositiveInteger_succeeds(self):
expected = self.primitive_handler.new_decimal(value='185')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('185'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '185')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_decimal(value='100')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('100'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '100')
self.assertEqual(wrapper.wrapped, expected)
def testParseDecimal_withNegativeInteger_succeeds(self):
expected = self.primitive_handler.new_decimal(value='-40')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('-40'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '-40')
self.assertEqual(wrapper.wrapped, expected)
def testParseDecimal_withPositiveReal_succeeds(self):
expected = self.primitive_handler.new_decimal(value='0.0099')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('0.0099'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '0.0099')
self.assertEqual(wrapper.wrapped, expected)
expected = self.primitive_handler.new_decimal(value='100.00')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('100.00'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '100.00')
self.assertEqual(wrapper.wrapped, expected)
def testParseDecimal_withZero_succeeds(self):
expected = self.primitive_handler.new_decimal(value='0')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('0'), self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '0')
self.assertEqual(wrapper.wrapped, expected)
def testParseDecimal_withHighPrecisionReal_succeeds(self):
expected = self.primitive_handler.new_decimal(value='1.00065022141624642')
wrapper = self.primitive_handler.primitive_wrapper_from_json_value(
decimal.Decimal('1.00065022141624642'),
self.primitive_handler.decimal_cls)
self.assertEqual(wrapper.string_value(), '1.00065022141624642')
self.assertEqual(wrapper.wrapped, expected)
def testPrintDecimal_withPositiveInteger_succeeds(self):
primitive = self.primitive_handler.new_decimal(value='185')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '185')
primitive = self.primitive_handler.new_decimal(value='100')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '100')
def testPrintDecimal_withNegativeInteger_succeeds(self):
primitive = self.primitive_handler.new_decimal(value='-40')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '-40')
def testPrintDecimal_withPositiveReal_succeeds(self):
primitive = self.primitive_handler.new_decimal(value='0.0099')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '0.0099')
primitive = self.primitive_handler.new_decimal(value='100.00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '100.00')
def testPrintDecimal_withZero_succeeds(self):
primitive = self.primitive_handler.new_decimal(value='0')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '0')
primitive = self.primitive_handler.new_decimal(value='0.00')
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
self.assertEqual(wrapper.string_value(), '0.00')
|
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from google_address.models import Address
from jobboard.handlers.oracle import OracleHandler
from users.utils import company_member_role
class Company(models.Model):
contract_address = models.CharField(max_length=42,
blank=True,
null=True)
published = models.BooleanField(default=False)
created_by = models.ForeignKey('users.Member',
related_name='+',
on_delete=models.SET_NULL,
null=True)
logo = models.ImageField(null=True)
name = models.CharField(max_length=512,
null=False,
blank=False)
tax_number = models.CharField(max_length=64,
null=False,
blank=False)
legal_address = models.CharField(max_length=255)
work_sector = models.CharField(max_length=512)
date_created = models.DateField(null=True,
blank=True)
site = models.URLField(null=True,
blank=True)
description = models.TextField(blank=False,
null=False)
phone = models.CharField(max_length=31)
email = models.EmailField()
verified = models.BooleanField(default=False)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('company', kwargs={'pk': self.pk})
@property
def owners(self):
collaborators = self.collaborators
return collaborators.filter(pk__in=[member.id for member in collaborators if
company_member_role(self.contract_address,
member.contract_address) == 'owner'])
@property
def collaborators(self):
members = self.members
return members.filter(pk__in=[member.id for member in members if
company_member_role(self.contract_address, member.contract_address) != 'member'])
@property
def members(self):
_model = get_user_model()
if not self.contract_address:
return _model.objects.none()
oracle = OracleHandler()
members = oracle.get_company_members(self.contract_address)
return _model.objects.filter(contract_address__in=members)
class Office(models.Model):
company = models.ForeignKey(Company,
on_delete=models.CASCADE,
related_name='offices')
address = models.ForeignKey(Address,
on_delete=models.SET_NULL,
null=True)
main = models.BooleanField(default=False)
def __str__(self):
return self.address.address_line
class SocialLink(models.Model):
company = models.ForeignKey(Company,
blank=False,
null=False,
on_delete=models.CASCADE,
related_name='social_links')
link = models.URLField(blank=False,
null=False)
def __str__(self):
return '{}: {}'.format(self.company.name, self.link)
class RequestToCompany(models.Model):
company = models.ForeignKey(Company,
on_delete=models.CASCADE,
related_name='invites')
member = models.ForeignKey('users.Member',
on_delete=models.CASCADE,
related_name='+')
class Meta:
unique_together = (('company', 'member'),)
|
from proto import FieldType, Message, serializable, field
@serializable()
@field('message', FieldType.String)
class ErrorMessage(Message):
def __init__(self, message=''):
self.message = message
def repr(self):
return '%s(%s)' % (self.__class__.__name__, self.message)
|
from keras.layers import GlobalAveragePooling2D, Multiply, Dense
from keras import backend as K
def SqueezeExcite(x, ratio=16, name=''):
nb_chan = K.int_shape(x)[-1]
y = GlobalAveragePooling2D(name='{}_se_avg'.format(name))(x)
y = Dense(nb_chan // ratio, activation='relu', name='{}_se_dense1'.format(name))(y)
y = Dense(nb_chan, activation='sigmoid', name='{}_se_dense2'.format(name))(y)
y = Multiply(name='{}_se_mul'.format(name))([x, y])
return y
|
import pytest
from lib.utils.merge_dict import merge_dict
def test_merge_dict():
dict1 = {1: 'apple', 2: 'ball'}
dict2 = {'name':'Jack', 'age': 26, 'phone': ['number1', 'number2']}
output_dict = merge_dict(dict1, dict2)
expected_dict = {'name':'Jack', 'age': 26, 'phone': ['number1', 'number2'], 1: 'apple', 2: 'ball'}
assert output_dict == expected_dict
|
from .utils.fixtures import user_validator
from .utils.validator_functions import TestValidators
def test_field_validator_functions(user_validator):
"""
This test makes sure that the defined functions are present.
Note:
Unfortunately for our dynamic functions like `is_gt_than` we can't
check because it returns a closure, but just knowing that the static
functions are present in the fields validators list is good enough.
"""
email_field = user_validator.fields['email']
assert TestValidators.is_email in email_field.validators
def test_field_attribute_after_validation(user_validator):
assert getattr(user_validator, 'email', None) is None
user_validator.is_valid()
assert user_validator.email == user_validator.data['email']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DEVELOPMENT SERVER
------------------
Simple development server.
Copyright (c) 2014 Teun Zengerink
Licensed under MIT License.
See: https://raw.github.com/Mytho/groceries/master/LISENCE.md
"""
import os
import sys
sys.path.append(os.path.abspath('.'))
from application import app
def main():
app.run(host='0.0.0.0', port=8001, debug=app.config['DEBUG'])
if __name__ == "__main__":
main()
|
from mrhttp import app
@app.route('/')
def hello(r):
if r.file == None:
return "No file uploaded"
#for f in r.files:
#print(f)
name = r.file['name']
typ = r.file['type']
body = r.file['body']
return name
app.run(cores=4)
# curl -i -X POST -F "data=@14_upload.py" http://localhost:8080/
|
from binaryninja import *
import base64
import copy
import json
def falcon_export(bv) :
filename = interaction.get_save_filename_input("Filename for Binja export")
segments = []
for segment in bv.segments :
segments.append({
'address': segment.start,
'bytes': base64.b64encode(bv.read(segment.start, segment.length))
})
functions = []
for function in bv.functions :
functions.append({
'name': function.name,
'address': function.start,
})
fh = open(filename, 'wb')
fh.write(json.dumps({
'functions': functions,
'segments': segments,
'arch': bv.arch.name,
'entry': bv.entry_point
}))
fh.close()
PluginCommand.register("Export for Falcon",
"Export disassembly information for Falcon",
falcon_export)
|
from OpenGL.raw.osmesa._types import *
from OpenGL.raw.osmesa.mesa import *
|
import schedules
import weather
import email_handler
def main():
# introduce our variables from our modules
emails = email_handler.get_emails()
schedule = schedules.get_schedule()
forecast = weather.get_weather_forecast()
# send the emails with schedule and forecast included.
try:
email_handler.send_emails(emails, schedule, forecast)
print(str(len(emails))+' Emails were sent!')
except:
print('Something went wrong. Emails did not send.')
main()
|
# Generated by Django 3.0.5 on 2020-05-02 06:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_backend', '0010_businessmodel_code'),
('app_sme12', '0007_auto_20200428_1731'),
]
operations = [
migrations.RemoveField(
model_name='formregister',
name='employ',
),
migrations.CreateModel(
name='Revenue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True, verbose_name='รายได้ต่อปี')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
('business_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_backend.BusinessType')),
],
),
migrations.CreateModel(
name='Employment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True, verbose_name='จำนวนการจ้างงาน')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
('business_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_backend.BusinessType')),
],
),
migrations.AddField(
model_name='formregister',
name='employment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.Employment'),
),
migrations.AlterField(
model_name='formregister',
name='revenue',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.Revenue'),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. Licence MIT
.. codeauthor:: Jan Lipovský <janlipovsky@gmail.com>, janlipovsky.cz
"""
import pytest
@pytest.mark.parametrize("text, expected", [
(r"{\url{http://www.google.com}}",
["http://www.google.com"]),
(r"{\url{http://www.google.com/file.pdf}}",
["http://www.google.com/file.pdf"]),
(r"{\url{http://www.google.com/{file.pdf}}",
["http://www.google.com/{file.pdf"]),
("a(sa\"enclosure.net/bracketext\"as)asd",
['enclosure.net/bracketext']),
("<email@address.net>",
[]),
("`https://coala.io/200`",
['https://coala.io/200']),
("(enclosure.net/bracket)",
['enclosure.net/bracket']),
("enclosure.net)",
['enclosure.net']),
("(enclosure.net",
['enclosure.net']),
("(enclosure.net)",
['enclosure.net']),
("(encl)o(sure.net",
['sure.net']),
("enclosure.net/blah)o(blah",
['enclosure.net/blah)o(blah']),
("(enclosure.net/blah)o(blah",
['enclosure.net/blah']),
("stackoverflow.com)/my_account",
['stackoverflow.com']),
("{enclosure.net/curly}",
['enclosure.net/curly']),
("[enclosure.net/square]",
['enclosure.net/square']),
("\"enclosure.net/dqoute\"",
['enclosure.net/dqoute']),
("\\enclosure.net/slash\\",
['enclosure.net/slash']),
("'enclosure.net/qoute'",
['enclosure.net/qoute']),
("(( example.net)asd",
['example.net']),
("asd(enclosure.net/bracketext)asd",
['enclosure.net/bracketext']),
("Foo (http://de.wikipedia.org/wiki/Agilit%C3%A4t_(Management)) Bar",
["http://de.wikipedia.org/wiki/Agilit%C3%A4t_(Management)"]),
("asd(http://de.wikipedia.org/wiki/(Agilit%C3(%A4t_(Manag)ement))) Bar",
["http://de.wikipedia.org/wiki/(Agilit%C3(%A4t_(Manag)ement))"]),
("asd(enclosure.net/rbracketless",
['enclosure.net/rbracketless']),
("asd)enclosure.net/lbracketless",
['enclosure.net/lbracketless']),
("asd{enclosure.net",
['enclosure.net']),
("asd}enclosure.net",
['enclosure.net']),
("asd[enclosure.net",
['enclosure.net']),
("asd]enclosure.net",
['enclosure.net']),
("(enclo(sure.net",
['sure.net']),
('([smh.com.au])]',
['smh.com.au']),
('"some string with urls ( example.com/somepath)"',
['example.com/somepath']),
('"some string with urls example.com/somepa)th)"',
['example.com/somepa)th)']),
("asd( enclosure.net/bracketext)asd",
['enclosure.net/bracketext']),
])
def test_find_urls(urlextract, text, expected):
"""
Testing find_urls returning all URLs
:param fixture urlextract: fixture holding URLExtract object
:param str text: text in which we should find links
:param list(str) expected: list of URLs that has to be found in text
"""
assert urlextract.find_urls(text) == expected
def test_get_enclosures(urlextract):
assert urlextract._enclosure == urlextract.get_enclosures()
def test_add_enclosure(urlextract):
old_enclosure = urlextract.get_enclosures().copy()
old_enclosure.add(("%", "%"))
urlextract.add_enclosure("%", "%")
assert old_enclosure == urlextract.get_enclosures()
with pytest.raises(AssertionError):
urlextract.remove_enclosure("aa", "ss")
with pytest.raises(AssertionError):
urlextract.remove_enclosure("", "")
def test_remove_enclosure(urlextract):
old_enclosure = urlextract.get_enclosures().copy()
old_enclosure.remove(("%", "%"))
urlextract.remove_enclosure("%", "%")
assert old_enclosure == urlextract.get_enclosures()
with pytest.raises(AssertionError):
urlextract.remove_enclosure("asd", "dddsa")
with pytest.raises(AssertionError):
urlextract.remove_enclosure("", "")
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import click
import json
from services import PredictDoc
from dotenv import load_dotenv
load_dotenv()
@click.command()
@click.option('-d','--doctype', prompt='Document Type Label',
help='The document type you would like use for prediction.')
@click.option('-s','--source', prompt='Blob Sas Url',
help='The document you would like to run a prediction on. This needs to be a blob sas url.')
def main(doctype, source):
"""Document Prediction """
logging.getLogger().setLevel(logging.INFO)
logging.info(f'Started document prediction for document type:{doctype} document:{source}')
try:
predict_doc = PredictDoc()
response = predict_doc.run(doctype, source)
return print(json.dumps(response))
except Warning as we:
logging.warn(we)
except EnvironmentError as ee:
logging.error(ee)
except Exception as e:
logging.error(e)
if __name__ == "__main__":
main()
|
from talon.mac import applescript
from talon import Context, Module
import os
mod = Module()
@mod.action_class
class SystemPreferencesActions:
def bluetooth_focus():
"""Focuses on Bluetooth preferences"""
applescript.run(r"""tell application "System Preferences"
reveal pane "com.apple.preferences.Bluetooth"
end tell""")
def display_focus():
"""Focuses on display preferences"""
applescript.run(r"""tell application "System Preferences"
reveal pane "com.apple.preference.displays"
end tell""")
def keyboard_focus():
"""Focuses on keyboard preferences"""
applescript.run(r"""tell application "System Preferences"
reveal pane "com.apple.preference.keyboard"
end tell""")
def audio_focus():
"""Focuses on audio preferences"""
applescript.run(r"""tell application "System Preferences"
reveal pane "com.apple.preference.sound"
end tell""")
|
#coding:utf-8
import numpy as np
from scipy.linalg import norm
import cvxopt
import cvxopt.solvers
from pylab import *
"""
非線形SVM
cvxoptのQuadratic Programmingを解く関数を使用
"""
N = 100 # データ数
P = 3 # 多項式カーネルのパラメータ
SIGMA = 5.0 # ガウスカーネルのパラメータ
# 多項式カーネル
def polynomial_kernel(x, y):
return (1 + np.dot(x, y)) ** P
# ガウスカーネル
def gaussian_kernel(x, y):
return np.exp(-norm(x-y)**2 / (2 * (SIGMA ** 2)))
# どちらのカーネル関数を使うかここで指定
kernel = gaussian_kernel
# Sを渡してサポートベクトルだけで計算した方が早い
# サポートベクトルはa[n]=0なのでsumに足す必要ない
def f(x, a, t, X, b):
summation = 0.0
for n in range(N):
summation += a[n] * t[n] * kernel(x, X[n])
return summation + b
if __name__ == "__main__":
# 訓練データを作成
cls1 = []
cls2 = []
mean1 = [-1, 2]
mean2 = [1, -1]
mean3 = [4, -4]
mean4 = [-4, 4]
cov = [[1.0,0.8], [0.8, 1.0]]
cls1.extend(np.random.multivariate_normal(mean1, cov, N / 4))
cls1.extend(np.random.multivariate_normal(mean3, cov, N / 4))
cls2.extend(np.random.multivariate_normal(mean2, cov, N / 4))
cls2.extend(np.random.multivariate_normal(mean4, cov, N / 4))
# データ行列Xを作成
X = vstack((cls1, cls2))
# ラベルtを作成
t = []
for i in range(N / 2):
t.append(1.0) # クラス1
for i in range(N / 2):
t.append(-1.0) # クラス2
t = array(t)
# ラグランジュ乗数を二次計画法(Quadratic Programming)で求める
K = np.zeros((N, N))
for i in range(N):
for j in range(N):
K[i, j] = t[i] * t[j] * kernel(X[i], X[j])
Q = cvxopt.matrix(K)
p = cvxopt.matrix(-np.ones(N))
G = cvxopt.matrix(np.diag([-1.0] * N))
h = cvxopt.matrix(np.zeros(N))
A = cvxopt.matrix(t, (1, N))
b = cvxopt.matrix(0.0)
sol = cvxopt.solvers.qp(Q, p, G, h, A, b)
a = array(sol['x']).reshape(N)
# サポートベクトルのインデックスを抽出
S = []
for n in range(len(a)):
if a[n] < 1e-5: continue
S.append(n)
# bを計算
summation = 0
for n in S:
temp = 0
for m in S:
temp += a[m] * t[m] * kernel(X[n], X[m])
summation += (t[n] - temp)
b = summation / len(S)
print S, b
# 訓練データを描画
x1, x2 = np.array(cls1).transpose()
plot(x1, x2, 'rx')
x1, x2 = np.array(cls2).transpose()
plot(x1, x2, 'bx')
# サポートベクトルを描画
for n in S:
scatter(X[n,0], X[n,1], s=80, c='g', marker='o')
# 識別境界を描画
X1, X2 = meshgrid(linspace(-6,6,50), linspace(-6,6,50))
w, h = X1.shape
X1.resize(X1.size)
X2.resize(X2.size)
Z = array([f(array([x1, x2]), a, t, X, b) for (x1, x2) in zip(X1, X2)])
X1.resize((w, h))
X2.resize((w, h))
Z.resize((w, h))
CS = contour(X1, X2, Z, [0.0], colors='k', linewidths=1, origin='lower')
for n in S:
print f(X[n], a, t, X, b)
xlim(-6, 6)
ylim(-6, 6)
show()
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import copy
import numpy as np
import math
class VisionTransformerUpHead(nn.Layer):
"""VisionTransformerUpHead
VisionTransformerUpHead is the decoder of SETR-PUP, Ref https://arxiv.org/pdf/2012.15840.pdf
Reference:
Sixiao Zheng, et al. *"Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers"*
"""
def __init__(self, embed_dim=1024, num_conv=1, num_upsample_layer=1,
conv3x3_conv1x1=True, align_corners=False, num_classes=60):
super(VisionTransformerUpHead, self).__init__()
self.num_classes = num_classes
self.align_corners = align_corners
self.num_conv = num_conv
self.num_upsample_layer = num_upsample_layer
self.conv3x3_conv1x1 = conv3x3_conv1x1
norm_bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.0))
self.norm = nn.LayerNorm(embed_dim, epsilon=1e-06, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
if self.num_conv == 2:
if self.conv3x3_conv1x1:
self.conv_0 = nn.Conv2D(embed_dim, 256, 3, stride=1, padding=1, bias_attr=True)
else:
self.conv_0 = nn.Conv2D(embed_dim, 256, 1, stride=1, bias_attr=True)
self.conv_1 = nn.Conv2D(256, self.num_classes, 1, stride=1)
self.syncbn_fc_0 = nn.SyncBatchNorm(256, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
elif self.num_conv == 4:
self.conv_0 = nn.Conv2D(embed_dim, 256, kernel_size=3, stride=1, padding=1)
self.conv_1 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
self.conv_2 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
self.conv_3 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1)
self.conv_4 = nn.Conv2D(256, self.num_classes, kernel_size=1, stride=1)
self.syncbn_fc_0 = nn.SyncBatchNorm(256, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
self.syncbn_fc_1 = nn.SyncBatchNorm(256, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
self.syncbn_fc_2 = nn.SyncBatchNorm(256, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
self.syncbn_fc_3 = nn.SyncBatchNorm(256, weight_attr=self.get_norm_weight_attr(), bias_attr=norm_bias_attr)
def get_norm_weight_attr(self):
return paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))
def to_2D(self, x):
n, hw, c = x.shape
h = w = int(math.sqrt(hw))
x = x.transpose([0, 2, 1]).reshape([n, c, h, w])
return x
def forward(self, x):
x = self.norm(x)
# (b,hw,c) -> (b,c,h,w)
x = self.to_2D(x)
up4x_resolution = [ 4*item for item in x.shape[2:]]
up16x_resolution = [ 16*item for item in x.shape[2:]]
if self.num_conv == 2:
if self.num_upsample_layer == 2:
x = self.conv_0(x)
x = self.syncbn_fc_0(x)
x = F.relu(x)
x = F.interpolate(x, up4x_resolution, mode='bilinear', align_corners=self.align_corners)
x = self.conv_1(x)
x = F.interpolate(x, up16x_resolution, mode='bilinear', align_corners=self.align_corners)
elif self.num_upsample_layer == 1:
x = self.conv_0(x)
x = self.syncbn_fc_0(x)
x = F.relu(x)
x = self.conv_1(x)
x = F.interpolate(x, up16x_resolution, mode='bilinear', align_corners=self.align_corners)
elif self.num_conv == 4:
if self.num_upsample_layer == 4:
x = self.conv_0(x)
x = self.syncbn_fc_0(x)
x = F.relu(x)
up2x_resolution = [ 2*item for item in x.shape[2:]]
x = F.interpolate(x, up2x_resolution, mode='bilinear', align_corners=self.align_corners)
x = self.conv_1(x)
x = self.syncbn_fc_1(x)
x = F.relu(x)
up2x_resolution = [ 2*item for item in x.shape[2:]]
x = F.interpolate(x, up2x_resolution, mode='bilinear', align_corners=self.align_corners)
x = self.conv_2(x)
x = self.syncbn_fc_2(x)
x = F.relu(x)
up2x_resolution = [ 2*item for item in x.shape[2:]]
x = F.interpolate(x, up2x_resolution, mode='bilinear', align_corners=self.align_corners)
x = self.conv_3(x)
x = self.syncbn_fc_3(x)
x = F.relu(x)
x = self.conv_4(x)
up2x_resolution = [ 2*item for item in x.shape[2:]]
x = F.interpolate(x, up2x_resolution, mode='bilinear', align_corners=self.align_corners)
return x
|
import click
from mlsteam import version
from .api import MyelindlApi, MyelindlApiError
@click.command()
def info():
try:
api = MyelindlApi()
server_ver = api.version()
click.echo("Version: {}".format(version.__version__))
click.echo("Server: {}".format(api.host))
click.echo("Server Version: {}".format(server_ver))
click.echo("Username: {}".format(api.username))
click.echo("Data Port: {}".format(api.data_port))
except MyelindlApiError as e:
click.echo('Fail due to {}'.format(e))
raise
|
# -*- coding: utf-8 -*-
import json, os
README_URL = os.environ['CLLNMBR__DOCS_URL']
# LEGIT_USER = os.environ['CLLNMBR__LEGIT_USER']
# LEGIT_USER_PASSWORD = os.environ['CLLNMBR__LEGIT_USER_PASSWORD']
# LEGIT_GROUPER_GROUPS = json.loads( os.environ['CLLNMBR__LEGIT_GROUPS_JSON'] )
# LEGIT_EPPNS = json.loads( os.environ['CLLNMBR__LEGIT_EPPNS_JSON'] )
## auth
SUPER_USERS = json.loads( os.environ['CLLNMBR__SUPER_USERS_JSON'] )
STAFF_USERS = json.loads( os.environ['CLLNMBR__STAFF_USERS_JSON'] ) # users permitted access to admin
STAFF_GROUP = os.environ['CLLNMBR__STAFF_GROUP'] # not grouper-group; rather, name of django-admin group for permissions
TEST_META_DCT = json.loads( os.environ['CLLNMBR__TEST_META_DCT_JSON'] )
POST_LOGIN_ADMIN_REVERSE_URL = os.environ['CLLNMBR__POST_LOGIN_ADMIN_REVERSE_URL'] # tricky; for a direct-view of a model, the string would be in the form of: `admin:APP-NAME_MODEL-NAME_changelist`
|
from flask import render_template, redirect, url_for, flash, request
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, ResetPasswordForm
from app.models import User, Club
from app.auth.email import send_password_reset_email, notify_admin
from sqlalchemy import func
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
# user = User.query.filter_by(username=form.username.data).first()
# club = Club.query.filter_by(clubnum=form.clubnum.data).first()
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
user = User.query.filter_by(usernum=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password','info')
return redirect(url_for('auth.login'))
username = user.username
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index' )
return redirect(next_page)
return render_template('auth/login.html',title=_('Sign In'), form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index' ))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
clubs = []
user = current_user
form = RegistrationForm()
clubrows = Club.query.all()
# for club in clubrows:
# clubs.append(club.clubnum)
# clubs.append(club.clubname)
form.club.choices = [(str(row.clubnum), row.clubname) for row in Club.query.all()]
# print(form.club.data)validators=[DataRequired(),
if form.validate_on_submit():
clubrow = Club.query.filter_by(clubname = form.club.data).first()
usernumber = int(form.club.data)
usernumber = (usernumber +1) * 1000000
nextuser = (db.session.query(func.max(User.usernum)).filter(User.usernum<usernumber).scalar() or 0)
nextuser = nextuser +1
user = User(username=form.username.data, email=form.email.data, usernum=nextuser,club=form.club.data)
# user = User(username=form.username.data, email=form.email.data, usernum=nextuser)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
print(form.club.data)
admin_user = User.query.filter_by(club = form.club.data, adminuser = 1)
for admins in admin_user:
print(admin_user)
print(admins.usernum)
notify_admin(user=user,adminuser = admins)
flash('Congratulations, you are now a registered user! Logon to complete your profile','info')
return redirect(url_for('auth.login'))
elif request.method == 'GET':
print()
# clubrows = Club.query.all()
# for club in clubrows:
# clubs.append(club.clubnum)
# clubs.append(club.clubname)
# form.club.choices = [(row.clubnum, row.clubname) for row in Club.query.all()]
return render_template('auth/register.html', title=_('Register'), form=form)
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password','info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset.','info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
from django.contrib.auth.models import User
from polymorphic.models import PolymorphicModel
class Tag(models.Model):
name = models.CharField(max_length=64, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
db_table = 'tag'
class Instructor(PolymorphicModel):
name = models.CharField(max_length=128, blank=True, null=True)
link = models.TextField(blank=True, null=True)
def __str__(self):
return self.name
class Meta:
db_table = 'instructor'
class Content(models.Model):
owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
is_public = models.BooleanField(default=False)
# remove null=True for these three
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
abstract = True
class Item(PolymorphicModel, Content):
tag_set = models.ManyToManyField(Tag, blank=True)
title = models.CharField(max_length=128, blank=True, null=True)
folder = models.ForeignKey('Folder', null=True, blank=True, on_delete=models.CASCADE)
def type(self):
return self.__class__.__name__
def __str__(self):
return self.title
class Meta:
db_table = 'item'
ordering = ['id', ]
class FileItem(Item):
file = models.FileField(upload_to='files', null=True)
class Meta:
db_table = 'file'
class Log(models.Model):
datetime = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class UserItem(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
class Meta:
abstract = True
class UserItemActionLog(Log, UserItem):
# Defining actions
VIEWED = 'VI',
COMPLETED = 'CP'
SHARE = 'SH'
ACTIONS = [
(VIEWED, 'View'),
(COMPLETED, 'Complete'),
(SHARE, 'Share')
]
action = models.CharField(max_length=2, choices=ACTIONS)
@staticmethod
def save_log(action, user, item):
user_item_log = UserItemActionLog()
user_item_log.action = action
user_item_log.user = user
user_item_log.item = item
user_item_log.save()
class Meta:
abstract = 'user_item_log'
class RatedItem(models.Model):
rate = models.SmallIntegerField(blank=True, null=True)
class Meta:
db_table = 'rated_item'
class SharedItem(UserItem):
@staticmethod
def shared_with(user):
return [shared_item.item for shared_item in SharedItem.objects.filter(user=user)]
class Meta:
db_table = 'shared_item'
class Course(Item):
instructor_set = models.ManyToManyField(Instructor, blank=True)
resource_set = models.ManyToManyField(FileItem, blank=True)
release_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'course'
class Sheet(Item):
class Meta:
db_table = 'sheet'
class NoteBook(Item):
sheet_set = models.ManyToManyField(Sheet, blank=True)
class CourseElement(Sheet):
sequence = models.IntegerField(blank=True, null=True)
course_part = models.ForeignKey('CoursePart', on_delete=models.CASCADE)
class Meta:
db_table = 'course_element'
ordering = ['sequence', ]
class School(Instructor):
colloquial_name = models.CharField(max_length=8, blank=True, null=True)
class Meta:
db_table = 'school'
class CoursePart(models.Model):
label = models.CharField(max_length=32, default='Week')
title = models.CharField(max_length=64, blank=True, null=True)
course = models.ForeignKey(Course, models.CASCADE)
level = models.SmallIntegerField(default=1)
sequence = models.SmallIntegerField(default=1)
def __str__(self):
return self.title
class Meta:
db_table = 'course_part'
ordering = ['id', ]
class Cell(PolymorphicModel):
sequence = models.IntegerField(blank=False, null=False)
sheet = models.ForeignKey(Sheet, models.CASCADE)
def type(self):
return self.__class__.__name__[:-4]
class Meta:
db_table = 'cell'
ordering = ['sequence', ]
class MediaCell(Cell):
title = models.CharField(max_length=64, blank=True, null=True)
url = models.URLField(blank=True, null=True)
def __str__(self):
return self.title
class Meta:
abstract = True
class GraphicMediaCell(MediaCell):
scale = models.FloatField(default=1)
class Meta:
abstract = True
class MarkdownCell(Cell):
text = models.TextField(blank=True, null=True)
def __str__(self):
return (self.text[:75] + '...') if len(self.text) > 75 else self.text
class Meta:
db_table = 'markdown_cell'
class VideoCell(GraphicMediaCell):
class Meta:
db_table = 'video_cell'
class YoutubeCell(GraphicMediaCell):
class Meta:
db_table = 'youtube_cell'
class AudioCell(MediaCell):
class Meta:
db_table = 'audio_cell'
class FileCell(MediaCell):
class Meta:
db_table = 'file_cell'
class ImageCell(GraphicMediaCell):
class Meta:
db_table = 'image_cell'
class Folder(Content):
name = models.CharField(max_length=128, blank=True, null=True)
parent = models.ForeignKey('Folder', models.CASCADE, null=True)
def __str__(self):
return self.name
def siblings(self):
result = []
if self.parent:
result = [folder for folder in self.parent.folder_set.filter(owner=self.owner) if folder.id != self.id and folder.id != 1]
return result
def ascendants(self):
result = []
folder = self
while folder.id != 1:
result.append(folder.parent)
folder = folder.parent
result.reverse()
print(result)
return result
class Meta:
db_table = 'folder'
ordering = ['id', ]
class MultipleChoiceInputCell(Cell):
class Meta:
db_table = 'multiple_choice_input_cell'
class NumericalInputCell(Cell):
answer = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'numerical_input_cell'
class OpenEndedInputCell(Cell):
answer = models.TextField(blank=True, null=True)
class Meta:
db_table = 'open_ended_input_cell'
class Proposition(models.Model):
input_cell = models.ForeignKey(MultipleChoiceInputCell, on_delete=models.CASCADE)
statement = models.TextField(blank=True, null=True)
is_true = models.BooleanField(default=False)
class Meta:
db_table = 'proposition'
|
from channels.routing import route
from OpsManage.djchannels import wssh,notices,chats
# The channel routing defines what channels get handled by what consumers,
# including optional matching on message attributes. In this example, we route
# all WebSocket connections to the class-based BindingConsumer (the consumer
# class itself specifies what channels it wants to consume)
channel_routing = [
wssh.webterminal.as_route(path = r'^/ws/webssh/(?P<id>[0-9]+)/$'),
notices.WebNotice.as_route(path = r'^/ws/notice/(?P<username>.+)/$'),
# chats.WebChat.as_route(path = r'^/ws/chats/$'),
]
|
"""
Amatino API Python Bindings
Global Unit Module
Author: hugh@amatino.io
"""
from amatino.session import Session
from amatino.denomination import Denomination
from amatino.internal.url_target import UrlTarget
from amatino.internal.url_parameters import UrlParameters
from amatino.internal.api_request import ApiRequest
from amatino.internal.immutable import Immutable
from amatino.internal.http_method import HTTPMethod
from amatino.api_error import ApiError
from amatino.unexpected_response_type import UnexpectedResponseType
from typing import TypeVar
from typing import Type
from typing import Any
from typing import List
T = TypeVar('T', bound='GlobalUnit')
class GlobalUnit(Denomination):
"""
Global Units are standardised units of account available across
all Amatino Entities. For example, many major currencies are available
as Global Units.
Global Units cannot be modified by Amatino users.
"""
_PATH = '/units'
_URL_KEY = 'global_unit_id'
def __init__(
self,
code: str,
id_: int,
name: str,
priority: int,
description: str,
exponent: int
) -> None:
super().__init__(code, id_, name, priority, description, exponent)
return
@classmethod
def retrieve(cls: Type[T], session: Session, id_: int) -> T:
"""Retrieve a Global Unit"""
if not isinstance(id_, int):
raise TypeError('id_ must be of type `int`')
return GlobalUnit.retrieve_many(session, [id_])[0]
@classmethod
def retrieve_many(
cls: Type[T],
session: Session,
ids: List[int]
) -> List[T]:
"""Retrieve a set of Global Units"""
if not isinstance(session, Session):
raise TypeError('session must be of type `Session`')
if not isinstance(ids, list):
raise TypeError('ids must be of type `List[int]`')
if False in [isinstance(i, int) for i in ids]:
raise TypeError('ids must be of type `List[int]`')
targets = UrlTarget.from_many_integers(GlobalUnit._URL_KEY, ids)
parameters = UrlParameters.from_targets(targets)
request = ApiRequest(
path=GlobalUnit._PATH,
url_parameters=parameters,
credentials=session,
method=HTTPMethod.GET
)
units = GlobalUnit._decode_many(request.response_data)
return units
@classmethod
def _decode_many(cls: Type[T], data: Any) -> List[T]:
"""Return a list of Global Units decoded from raw API response data"""
if not isinstance(data, list):
raise ApiError('Unexpected non-list API data response')
def decode(unit_data: Any) -> T:
if not isinstance(unit_data, dict):
raise UnexpectedResponseType(unit_data, dict)
try:
unit = cls(
code=unit_data['code'],
id_=unit_data['global_unit_id'],
name=unit_data['name'],
priority=unit_data['priority'],
description=unit_data['description'],
exponent=unit_data['exponent']
)
except KeyError as error:
message = 'Expected key "{key}" missing from response data'
message.format(key=error.args[0])
raise ApiError(message)
return unit
units = [decode(u) for u in data]
return units
def __eq__(self, other):
if isinstance(other, GlobalUnit) and other.id_ == self.id_:
return True
return False
class GlobalUnitConstants:
EUR = GlobalUnit('EUR', 2, 'Euro', 1, '', 2)
USD = GlobalUnit('USD', 5, 'US Dollar', 1, '', 2)
AUD = GlobalUnit('AUD', 11, 'Australian Dollar', 1, '', 2)
CAD = GlobalUnit('CAD', 35, 'Canadian Dollar', 1, '', 2)
CNY = GlobalUnit('CNY', 39, 'Yuan Renminbi', 1, '', 4)
NZD = GlobalUnit('NZD', 44, 'New Zealand Dollar', 1, '', 2)
GBP = GlobalUnit('GBP', 66, 'Pound Sterling', 1, '', 2)
JPY = GlobalUnit('JPY', 80, 'Yen', 1, '', 2)
CHF = GlobalUnit('CHF', 94, 'Swiss Franc', 1, '', 3)
SEK = GlobalUnit('SEK', 143, 'Swedish Krona', 1, '', 2)
PRIORITY_1_UNITS = (EUR, USD, AUD, CAD, CNY, NZD, GBP, JPY, CHF, SEK)
|
import time
from collections import Counter, defaultdict
from math import ceil
start = time.time()
with open("14.txt") as f:
rawInput = f.read().splitlines()
# part 1
curTemplate = list(rawInput[0])
rules = {}
for rule in rawInput[2:]:
k, v = rule.split(" -> ")
rules[k] = v
for _ in range(10):
newCurTemplate = []
for i in range(len(curTemplate) - 1):
char1 = curTemplate[i]
char2 = curTemplate[i + 1]
newCurTemplate.append(char1)
newCurTemplate.append(rules[char1 + char2])
newCurTemplate.append(curTemplate[-1])
curTemplate = newCurTemplate
# counting chars as you go along in a dict is definitely better but i forgot
elemCount = Counter(curTemplate).most_common()
mostCommonMinusLeastCommon = elemCount[0][1] - elemCount[-1][1]
print("Part 1:", mostCommonMinusLeastCommon)
# part 2
rulesPart2 = {}
for k, v in rules.items():
rulesPart2[k] = [k[0] + v, v + k[1]]
curPairCount = defaultdict(int)
for i in range(len(curTemplate) - 1):
curPairCount[curTemplate[i] + curTemplate[i + 1]] += 1
for _ in range(30):
newPairCount = defaultdict(int)
for k, v in curPairCount.items():
i1, i2 = rulesPart2[k]
newPairCount[i1] += v
newPairCount[i2] += v
curPairCount = newPairCount
elemCount = defaultdict(int)
for k, v in curPairCount.items():
elemCount[k[0]] += v
elemCount[k[1]] += v
for k in elemCount.keys():
elemCount[k] = ceil(elemCount[k] / 2)
elemCount = Counter(elemCount).most_common()
mostCommonMinusLeastCommon = elemCount[0][1] - elemCount[-1][1]
print("Part 2:", mostCommonMinusLeastCommon)
end = time.time()
print(end - start)
|
import threading, time, config, measure, readht, readdust, requests, json, Queue
from datetime import datetime
#import urllib2
class Scheduler:
SENSOR = 22
PIN = 4
interval = 0
lock = ""
config_obj = ""
measure_obj = ""
def __init__(self, interval):
self.interval = interval
self.lock = threading.Lock()
self.config_obj = config.Config()
self.measure_obj = measure.Measure()
#self.url_config = self.config_obj.getUrl()
def isOnline(self,url):
try:
r = requests.get(url)
except IOError as err:
print("Url '%s' unreachable. Using local file '%s'" % (url, self.config_obj.filename))
return False
return True
def syncConfig(self):
_url = self.config_obj.getUrl()+"/config"
_id = "id="+str(self.config_obj.getId())
if self.isOnline(self.config_obj.getUrl()):
response = requests.get(url=_url, params=_id)
if response.status_code == 200:
print("Response data from %s: '%s'" % (_url,response.text))
try:
config_str = json.dumps(response.json())
except ValueError as valerr:
print("No JSON in response. Config wasn't updated.")
return False
else:
self.config_obj.updateConfig(config_str)
return True
return False
def syncMeasures(self, measures=5):
url_t = self.config_obj.getUrl()
id_t = self.config_obj.getId()
long_t = self.config_obj.getLong()
lat_t = self.config_obj.getLat()
timestamp = int(time.time())
print("Time: %i" % (timestamp))
dht22 = readht.getAll(measures)
humidity_t = dht22[0]
temperature_t = dht22[1]
sds011 = readdust.getAll(measures)
pm25_t = sds011[0]
pm10_t = sds011[1]
self.measure_obj.addFetch(humidity=humidity_t, temperature=temperature_t, pm25=pm25_t, pm10=pm10_t, id=id_t, long=long_t, lat=lat_t, ts=timestamp)
timestamp = int(time.time())
if self.isOnline(url_t):
response = requests.put(url=url_t, json=self.measure_obj.getJson())
#print("Debug: Status Code = %i" % (response.status_code))
#print("Debug: Response.text = %s" % (response.text))
if response.status_code == 200:
print("Success in sending file!")
self.measure_obj.deleteFile()
return True
print("File could not be sent due to failing connectivity. Measures are cached locally in file '%s' instead." % (self.measure_obj.filename))
return False
def syncConfigInterval(self):
while True:
self.lock.acquire()
self.config_obj.getConfig()
print("Syncing config...")
self.syncConfig()
wait = self.interval*60
waitm = self.interval
print("Syncing config sleeps %i seconds / %f minutes\n" % (wait, waitm))
self.lock.release()
time.sleep(wait)
def syncMeasuresInterval(self, measures=5):
while True:
self.lock.acquire()
self.config_obj.getConfig()
id = self.config_obj.getId()
print("Syncing measures...")
self.syncMeasures(measures)
wait = self.config_obj.getInterval()*60
waitm = wait/60
print("Syncing measures sleeps %i seconds / %f minutes\n" % (wait, waitm))
self.lock.release()
time.sleep(wait)
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../..")))
import argparse
import json
import parameters
import utils.log as log
FAILED_COUNT = 0
PASSED_COUNT = 0
FAILED_COMMANDS = []
PASSED_COMMANDS = []
def exec_cmd(args):
rc = cli.rbd.exec_cmd(args)
if rc is False:
globals()["FAILED_COUNT"] += 1
FAILED_COMMANDS.append(args)
else:
globals()["PASSED_COUNT"] += 1
PASSED_COMMANDS.append(args)
return rc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RBD CLI Test")
parser.add_argument("-e", "--ec-pool-k-m", required=False)
args = parser.parse_args()
k_m = args.ec_pool_k_m
cli = parameters.CliParams(k_m=k_m, num_rep_pool=1, num_data_pool=1 if k_m else 0)
iterator = 0
# Simple Image Creation
combinations = cli.generate_combinations("image_size")
combinations = list(
filter(
lambda val: cli.search_param_val("-s", val).find("G") != -1, combinations
)
)
[
exec_cmd(
"rbd create {} {} {}/img{}".format(
combinations[0],
parameters.data_pool["arg"]
+ " "
+ parameters.data_pool["val"]["pool0"],
parameters.rep_pool["val"]["pool0"],
iterator,
)
)
for iterator in range(0, 2)
]
# Bench
combinations = cli.generate_combinations(
"io_type", "io_size", "io_threads", "io_total", "io_pattern"
)
if cli.ceph_version == 2:
[
exec_cmd(
"rbd bench-{} {}/img{}".format(
param, parameters.rep_pool["val"]["pool0"], iterator
)
)
for param in combinations
]
else:
[
exec_cmd(
"rbd bench {} {}/img{}".format(
param, parameters.rep_pool["val"]["pool0"], iterator
)
)
for param in combinations
]
# Snap Creation
iterator = 0
exec_cmd(
"rbd snap create {}/img{}@snapimg".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
# Disk Usage
exec_cmd(
"rbd du {} {}".format(
parameters.rep_pool["arg"], parameters.rep_pool["val"]["pool0"]
)
)
exec_cmd("rbd du {}/img{}".format(parameters.rep_pool["val"]["pool0"], iterator))
exec_cmd(
"rbd du {}/img{}@snapimg".format(parameters.rep_pool["val"]["pool0"], iterator)
)
# Add Lock
exec_cmd(
"rbd lock add {}/img{} 007".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
[
exec_cmd(
"rbd lock add --shared lock-tag {}/img{} {}".format(
parameters.rep_pool["val"]["pool0"], iterator + 1, lock_id
)
)
for lock_id in range(0, 2)
]
# List Lock
[
exec_cmd(
"rbd lock list {}/img{}".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
for iterator in range(0, 2)
]
# Remove Lock
for iterator in range(0, 2):
if exec_cmd(
"rbd lock list {}/img{} --format=json".format(
parameters.rep_pool["val"]["pool0"], iterator
)
):
json_output = json.loads(
exec_cmd(
"rbd lock list {}/img{} --format=json".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
)
if cli.ceph_version == 3:
[
exec_cmd(
"rbd lock remove {}/img{} {} {}".format(
parameters.rep_pool["val"]["pool0"],
iterator,
key,
val["locker"],
)
)
for key, val in json_output.items()
]
else:
[
exec_cmd(
"rbd lock remove {}/img{} {} {}".format(
parameters.rep_pool["val"]["pool0"],
iterator,
lock["id"],
lock["locker"],
)
)
for lock in json_output
]
# Mapping Images to block-device
iterator += 1
ubuntu_rel = cli.rbd.exec_cmd("lsb_release -is")
if isinstance(ubuntu_rel, str) and "ubuntu" in ubuntu_rel.lower():
exec_cmd("ceph osd crush tunables hammer")
exec_cmd(
"rbd create -s 5G --image-feature layering {}/img{}".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
exec_cmd(
"rbd snap create {}/img{}@snapmapimg".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
exec_cmd(
"sudo rbd map {}/img{}".format(parameters.rep_pool["val"]["pool0"], iterator)
)
exec_cmd(
"sudo rbd map --read-only {}/img{}@snapmapimg".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
# Listing Mapped Images
exec_cmd("rbd showmapped")
# Unmap Images
exec_cmd(
"sudo rbd unmap {}/img{}".format(parameters.rep_pool["val"]["pool0"], iterator)
)
exec_cmd(
"sudo rbd unmap {}/img{}@snapmapimg".format(
parameters.rep_pool["val"]["pool0"], iterator
)
)
# Clean Up
cli.rbd.clean_up(pools=parameters.rep_pool["val"])
if k_m:
cli.rbd.clean_up(pools=parameters.data_pool["val"], profile=cli.ec_profile)
log.info("Result".center(80, "-"))
log.info("Total Commands Executed: {}".format(PASSED_COUNT + FAILED_COUNT))
log.info("Commands Passed: {}".format(PASSED_COUNT))
log.info("Commands Failed: {}".format(FAILED_COUNT))
if FAILED_COUNT > 0:
log.info("Failed commands")
[log.info(fc) for fc in FAILED_COMMANDS]
log.info("Passed commands")
[log.info(fc) for fc in PASSED_COMMANDS]
exit(1)
exit(0)
|
# System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html
from django.utils.timezone import now as timezone_now
from django.utils.timezone import utc as timezone_utc
import hashlib
import logging
import re
import traceback
from typing import Optional
from datetime import datetime, timedelta
from django.conf import settings
from logging import Logger
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter:
last_error = datetime.min.replace(tzinfo=timezone_utc)
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = '\n'.join(traceback.format_exception(*record.exc_info))
else:
tb = str(record)
key = self.__class__.__name__.upper() + hashlib.sha1(tb.encode()).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone_now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone_now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return True
class ReturnEnabled(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return settings.LOGGING_NOT_DISABLED
class RequireReallyDeployed(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record):
# type: (logging.LogRecord) -> bool
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
IGNORABLE_404_URLS = [
re.compile(r'^/apple-touch-icon.*\.png$'),
re.compile(r'^/favicon\.ico$'),
re.compile(r'^/robots\.txt$'),
re.compile(r'^/django_static_404.html$'),
re.compile(r'^/wp-login.php$'),
]
def skip_boring_404s(record):
# type: (logging.LogRecord) -> bool
"""Prevents Django's 'Not Found' warnings from being logged for common
404 errors that don't reflect a problem in Zulip. The overall
result is to keep the Zulip error logs cleaner than they would
otherwise be.
Assumes that its input is a django.request log record.
"""
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') != 404:
return True
# We're only interested in filtering the "Not Found" errors.
if getattr(record, 'msg') != 'Not Found: %s':
return True
path = getattr(record, 'args', [''])[0]
for pattern in IGNORABLE_404_URLS:
if re.match(pattern, path):
return False
return True
def skip_site_packages_logs(record):
# type: (logging.LogRecord) -> bool
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
def find_log_caller_module(record):
# type: (logging.LogRecord) -> Optional[str]
'''Find the module name corresponding to where this record was logged.'''
# Repeat a search similar to that in logging.Logger.findCaller.
# The logging call should still be on the stack somewhere; search until
# we find something in the same source file, and that should give the
# right module name.
f = logging.currentframe() # type: ignore # Not in typeshed, and arguably shouldn't be
while f is not None:
if f.f_code.co_filename == record.pathname:
return f.f_globals.get('__name__')
f = f.f_back
return None
logger_nicknames = {
'root': '', # This one is more like undoing a nickname.
'zulip.requests': 'zr', # Super common.
}
def find_log_origin(record):
# type: (logging.LogRecord) -> str
logger_name = logger_nicknames.get(record.name, record.name)
if settings.LOGGING_SHOW_MODULE:
module_name = find_log_caller_module(record)
if module_name == logger_name or module_name == record.name:
# Abbreviate a bit.
return logger_name
else:
return '{}/{}'.format(logger_name, module_name or '?')
else:
return logger_name
log_level_abbrevs = {
'DEBUG': 'DEBG',
'INFO': 'INFO',
'WARNING': 'WARN',
'ERROR': 'ERR',
'CRITICAL': 'CRIT',
}
def abbrev_log_levelname(levelname):
# type: (str) -> str
# It's unlikely someone will set a custom log level with a custom name,
# but it's an option, so we shouldn't crash if someone does.
return log_level_abbrevs.get(levelname, levelname[:4])
class ZulipFormatter(logging.Formatter):
# Used in the base implementation. Default uses `,`.
default_msec_format = '%s.%03d'
def __init__(self):
# type: () -> None
super().__init__(fmt=self._compute_fmt())
def _compute_fmt(self):
# type: () -> str
pieces = ['%(asctime)s', '%(zulip_level_abbrev)-4s']
if settings.LOGGING_SHOW_PID:
pieces.append('pid:%(process)d')
pieces.extend(['[%(zulip_origin)s]', '%(message)s'])
return ' '.join(pieces)
def format(self, record):
# type: (logging.LogRecord) -> str
if not getattr(record, 'zulip_decorated', False):
# The `setattr` calls put this logic explicitly outside the bounds of the
# type system; otherwise mypy would complain LogRecord lacks these attributes.
setattr(record, 'zulip_level_abbrev', abbrev_log_levelname(record.levelname))
setattr(record, 'zulip_origin', find_log_origin(record))
setattr(record, 'zulip_decorated', True)
return super().format(record)
def create_logger(name, log_file, log_level, log_format="%(asctime)s %(levelname)-8s %(message)s"):
# type: (str, str, str, str) -> Logger
"""Creates a named logger for use in logging content to a certain
file. A few notes:
* "name" is used in determining what gets logged to which files;
see "loggers" in zproject/settings.py for details. Don't use `""`
-- that's the root logger.
* "log_file" should be declared in zproject/settings.py in ZULIP_PATHS.
"""
logging.basicConfig(format=log_format)
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, log_level))
if log_file:
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from click.testing import CliRunner
from BioCompass import cli
def test_importcli():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
#assert 'BioCompass.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
#assert 'Usage: main [OPTIONS] MGBFILE' in help_result.output
import pandas as pd
from BioCompass.BioCompass import find_category_from_product
def test_find_category_from_product():
df = pd.DataFrame(
{'product': ['permease', 'bla bla permease bla bla']})
out = find_category_from_product(df)
assert (out['category'] == 'transporter').all()
def test_find_category_from_product_hypothetical():
""" Uncataloged product should return hypothetical
"""
df = pd.DataFrame(
{'product': ['ipsisLitteris']})
out = find_category_from_product(df)
assert (out['category'] == 'hypothetical').all()
|
#!/usr/bin/env python
import argparse
import numpy as np
# parse command line options
parser = argparse.ArgumentParser(description="Creates a body with uniform segment lengths from a given body input", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--ds", type=float, dest="ds", help="segment length", default=0.004)
parser.add_argument("--infile", dest="infile", help="name of input file", default="snake_cross_section.txt")
parser.add_argument("--outfile", dest="outfile", help="prefix of output file generated", default="snake")
args = parser.parse_args()
x0 = np.arange(0)
y0 = np.arange(0)
nb = 0
f = open(args.infile, 'r')
a = f.readline().strip().split()
x0 = np.append(x0, float(a[0]))
y0 = np.append(y0, float(a[1]))
xmin = x0[nb]
xmax = x0[nb]
xc = 0.
yc = 0.
xc = x0[nb]
yc = y0[nb]
nb = nb+1
while True:
a = f.readline().strip().split()
if a==[]:
break
x0 = np.append(x0, float(a[0]))
y0 = np.append(y0, float(a[1]))
xc = xc + x0[nb]
yc = yc + y0[nb]
if x0[nb] < xmin:
xmin = x0[nb]
if x0[nb] > xmax:
xmax = x0[nb]
nb = nb+1
xc = xc/nb
yc = yc/nb
c = xmax - xmin
x0 = (x0 - xc)/c
y0 = (y0 - yc)/c
f.close()
ds = args.ds
cur = 0
nxt = 1
x = np.zeros(1)
y = np.zeros(1)
x[cur] = x0[0]
y[cur] = y0[0]
while nxt < nb:
dist = np.sqrt( (x0[nxt]-x[cur])**2 + (y0[nxt]-y[cur])**2 )
while dist > ds:
x = np.append( x, x[cur] + (x0[nxt]-x[cur])*ds/dist )
y = np.append( y, y[cur] + (y0[nxt]-y[cur])*ds/dist )
cur = cur+1
dist = np.sqrt( (x0[nxt]-x[cur])**2 + (y0[nxt]-y[cur])**2 )
while dist < ds:
nxt = nxt+1
if nxt == nb:
break
dist = np.sqrt( (x0[nxt]-x[cur])**2 + (y0[nxt]-y[cur])**2 )
if nxt == nb:
break
# solve for the next point
#'''
upp = 1.0
low = 0.0
while abs(dist-ds)/ds > 1e-5:
eta = 0.5*(upp+low)
dist = np.sqrt( ( (1-eta)*x0[nxt-1] + eta*x0[nxt] - x[cur] )**2 + ( (1-eta)*y0[nxt-1] + eta*y0[nxt] - y[cur] )**2 )
if dist < ds:
low = eta
else:
upp = eta
'''
A = (x0[nxt]-x0[nxt-1])**2 + (y0[nxt]-y0[nxt-1])**2
B = - ( (x[cur]-x0[nxt-1])*(x0[nxt]-x0[nxt-1]) + (y[cur]-y0[nxt-1])*(y0[nxt]-y0[nxt-1]) )
C = (x0[nxt-1]**2 + x[cur]**2 + y0[nxt-1]**2 + y[cur]**2 - ds**2)
if B*B-4*A*C < 0:
print cur, A, B, C, B*B-4*A*C
break
eta = (-B + np.sqrt(B*B-4*A*C))/(2*A)
'''
x = np.append( x, (1-eta)*x0[nxt-1] + eta*x0[nxt] )
y = np.append( y, (1-eta)*y0[nxt-1] + eta*y0[nxt] )
cur = cur + 1
dist = np.sqrt( (x0[0]-x[cur])**2 + (y0[0]-y[cur])**2 )
while dist > ds:
x = np.append( x, x[cur] + (x0[0]-x[cur])*ds/dist )
y = np.append( y, y[cur] + (y0[0]-y[cur])*ds/dist )
cur = cur+1
dist = np.sqrt( (x0[0]-x[cur])**2 + (y0[0]-y[cur])**2 )
if dist < 0.5*ds:
x[cur] = 0.5*(x[cur-1] + x0[0])
y[cur] = 0.5*(y[cur-1] + y0[0])
print ""
print "input element width : %f" % ds
dist = np.sqrt( (x[0]-x[cur])**2 + (y[0]-y[cur])**2 )
print "width of last element : %f" % dist
dist = np.sqrt( (x[cur]-x[cur-1])**2 + (y[cur]-y[cur-1])**2 )
print "width of penultimate element : %f" % dist
dist = np.sqrt( (x[cur-1]-x[cur-2])**2 + (y[cur-1]-y[cur-2])**2 )
print "width of third last element : %f" % dist
print "\ntotal number of boundary points: %d\n" % len(x)
outFile = args.outfile + "_" + str(ds) + ".bdy"
g = open(outFile, 'w')
g.write("%d\n" % len(x))
for i in range(len(x)):
g.write( "%f\t%f\n" % (x[i], y[i]) )
g.close()
|
"""Test agrirouter/revoking/parameters.py"""
from agrirouter import RevokingParameter
from tests.constants import application_id
class TestRevokingParameter:
content_type = "json"
account_id = "111"
endpoint_ids = "endpoint_1"
time_zone = "+03:00"
utc_timestamp = "01-01-2021"
test_object = RevokingParameter(
application_id=application_id,
content_type=content_type,
account_id=account_id,
endpoint_ids=endpoint_ids,
utc_timestamp=utc_timestamp,
time_zone=time_zone
)
def test_get_header_params(self):
assert self.test_object.get_header_params()["application_id"] == application_id
assert self.test_object.get_header_params()["content_type"] == self.content_type
def test_get_body_params(self):
assert self.test_object.get_body_params()["account_id"] == self.account_id
assert self.test_object.get_body_params()["endpoint_ids"] == self.endpoint_ids
assert self.test_object.get_body_params()["utc_timestamp"] == self.utc_timestamp
assert self.test_object.get_body_params()["time_zone"] == self.time_zone
|
# Mathias Punkenhofer
# code.mpunkenhofer@gmail.com
#
from .board import GuiBoard
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.io_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Standard Imports
import tensorflow as tf
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx.utils import io_utils
class IoUtilsTest(tf.test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testImportFunc(self):
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
test_fn_file = os.path.join(source_data_dir, 'test_fn.py')
test_fn = io_utils.import_func(test_fn_file, 'test_fn')
self.assertEqual(10, test_fn([1, 2, 3, 4]))
def testImportFuncMissingFile(self):
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
test_fn_file = os.path.join(source_data_dir, 'non_existing.py')
with self.assertRaises(IOError):
io_utils.import_func(test_fn_file, 'test_fn')
def testImportFuncMissingFunction(self):
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
test_fn_file = os.path.join(source_data_dir, 'test_fn.py')
with self.assertRaises(AttributeError):
io_utils.import_func(test_fn_file, 'non_existing')
def testCopyFile(self):
file_path = os.path.join(self._base_dir, 'temp_file')
io_utils.write_string_file(file_path, 'testing')
copy_path = os.path.join(self._base_dir, 'copy_file')
io_utils.copy_file(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode='r')
self.assertEqual('testing', f.read())
self.assertEqual(7, f.tell())
def testCopyDir(self):
old_path = os.path.join(self._base_dir, 'old', 'path')
new_path = os.path.join(self._base_dir, 'new', 'path')
io_utils.write_string_file(old_path, 'testing')
io_utils.copy_dir(os.path.dirname(old_path), os.path.dirname(new_path))
self.assertTrue(file_io.file_exists(new_path))
f = file_io.FileIO(new_path, mode='r')
self.assertEqual('testing', f.read())
self.assertEqual(7, f.tell())
def testGetOnlyFileInDir(self):
file_path = os.path.join(self._base_dir, 'file', 'path')
io_utils.write_string_file(file_path, 'testing')
self.assertEqual(file_path,
io_utils.get_only_uri_in_dir(os.path.dirname(file_path)))
def testGetOnlyDirInDir(self):
top_level_dir = os.path.join(self._base_dir, 'dir_1')
dir_path = os.path.join(top_level_dir, 'dir_2')
file_path = os.path.join(dir_path, 'file')
io_utils.write_string_file(file_path, 'testing')
self.assertEqual('dir_2', os.path.basename(
io_utils.get_only_uri_in_dir(top_level_dir)))
def testDeleteDir(self):
file_path = os.path.join(self._base_dir, 'file', 'path')
io_utils.write_string_file(file_path, 'testing')
self.assertTrue(tf.gfile.Exists(file_path))
io_utils.delete_dir(os.path.dirname(file_path))
self.assertFalse(tf.gfile.Exists(file_path))
def testAllFilesPattern(self):
self.assertEqual('model*', io_utils.all_files_pattern('model'))
def testLoadCsvColumnNames(self):
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
test_file = os.path.join(source_data_dir, 'test.csv')
column_names = io_utils.load_csv_column_names(test_file)
self.assertListEqual(['a', 'b', 'c', 'd'], column_names)
if __name__ == '__main__':
tf.test.main()
|
import io
import os
import logging
import datetime
from mproxy.core.model import CmdResult
from .throttle import ThrottlableMixin, throttle
from .job_status import JobStatus
from subprocess import Popen, PIPE
import tempfile
log = logging.getLogger(__name__)
class OpenSSHMachineConnection(ThrottlableMixin):
"""Perform operations on a remote machine with openssh"""
def __init__(
self, queue_system, hostname, remote_base_dir, min_wait_ms=1, max_wait_ms=2 ** 15
):
super().__init__(min_wait_ms, max_wait_ms)
self.remote_base_dir = remote_base_dir
self.queue_system = queue_system
self.queue_info={}
self.hostname=hostname
self.summary_status={}
self.queue_last_updated=datetime.datetime.now()
def _execute_command(self, command):
p = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors
def _checkForErrors(self, errorString, reportError=True):
if len(errorString.strip()) == 0 or (len(errorString.strip().split('\n')) == 1 and "Shared connection to" in errorString):
return False
else:
if (reportError): print("Error: "+errorString.strip())
return True
@throttle
def run(self, command, env=None):
cmd = "ssh -tt " + self.hostname+" \"cd "+self.remote_base_dir+" ; "+command+"\""
output, errors= self._execute_command(cmd)
errorRaised=self._checkForErrors(errors)
return CmdResult(stdout=output, stderr=errors, error=errorRaised)
@throttle
def put(self, src_bytes, dest):
if (dest.startswith("/")):
full_destination=dest
else:
full_destination=self.remote_base_dir+"/"+dest
temp = tempfile.NamedTemporaryFile()
temp.write(src_bytes)
temp.flush()
output, errors=self._execute_command("scp "+temp.name+" "+self.hostname+":"+full_destination)
self._checkForErrors(errors)
temp.close()
@throttle
def get(self, src):
if (src.startswith("/")):
full_src=src
else:
full_src=self.remote_base_dir+"/"+src
temp = tempfile.NamedTemporaryFile(mode="rb")
output, errors=self._execute_command("scp "+self.hostname+":"+full_src+" "+temp.name)
if not self._checkForErrors(errors):
read_bytes=temp.read()
temp.close()
return read_bytes
else:
return b''
@throttle
def upload(src_file, dest_file):
run_info=self._execute_command("scp "+src_file+" "+self.hostname+":"+dest_file)
self._checkForErrors(run_info.stderr)
@throttle
def download(src_file, dest_file):
run_info=self._execute_command("scp "+self.hostname+":"+src_file+" "+dest_file)
self._checkForErrors(run_info.stderr)
@throttle
def remote_copy(src_file, dest_machine, dest_file):
run_info=self.run("scp "+file+" "+dest_machine+":"+dest_file)
self._checkForErrors(run_info.stderr)
def checkForUpdateToQueueData(self):
elapsed=datetime.datetime.now() - self.queue_last_updated
if not self.queue_info or elapsed.total_seconds() > 600:
self.updateQueueInfo()
def updateQueueInfo(self):
status_command=self.queue_system.getQueueStatusSummaryCommand()
run_info=self.run(status_command)
if not self._checkForErrors(run_info.stderr):
self.queue_info=self.queue_system.parseQueueStatus(run_info.stdout)
self.summary_status=self.queue_system.getSummaryOfMachineStatus(self.queue_info)
self.queue_last_updated=datetime.datetime.now()
print("Updated status information")
@throttle
def getstatus(self):
self.checkForUpdateToQueueData()
if (self.summary_status):
return "Connected (Q="+str(self.summary_status["QUEUED"])+",R="+str(self.summary_status["RUNNING"])+")";
else:
return "Error, can not connect"
@throttle
def getDetailedStatus(self):
str_to_return=""
self.checkForUpdateToQueueData()
for value in list(self.queue_info.values()):
str_to_return+=value.toString()+"\n"
return str_to_return
@throttle
def getHistoricalStatus(self, start_time, end_time):
status_command=self.queue_system.getQueueCommandForHistoricalStatus(start_time, end_time)
run_info=self.run(status_command)
if not self._checkForErrors(run_info.stderr):
return self.queue_system.parseHistorialStatus(run_info.stdout)
@throttle
def getJobStatus(self, queue_ids):
status_command=self.queue_system.getQueueStatusForSpecificJobsCommand(queue_ids)
run_info=self.run(status_command)
to_return={}
if not self._checkForErrors(run_info.stderr):
parsed_jobs=self.queue_system.parseQueueStatus(run_info.stdout)
for queue_id in queue_ids:
if (queue_id in parsed_jobs):
status=parsed_jobs[queue_id]
to_return[queue_id]=[status.getStatus(), status.getWalltime(), status.getQueueTime(), status.getRunTime()]
self.queue_info[queue_id]=status # Update general machine status information too with this
return to_return
@throttle
def cancelJob(self, queue_id):
deletion_command=self.queue_system.getJobDeletionCommand(queue_id)
run_info=self.run(deletion_command)
self._checkForErrors(run_info.stderr)
@throttle
def submitJob(self, num_nodes, requested_walltime, directory, executable):
command_to_run = ""
if len(directory) > 0:
command_to_run += "cd "+directory+" ; "
command_to_run+=self.queue_system.getSubmissionCommand(executable)
run_info=self.run(command_to_run)
if not self._checkForErrors(run_info.stderr):
return [self.queue_system.doesSubmissionReportContainQueueId(run_info.stdout), self.queue_system.extractQueueIdFromSubmissionReport(run_info.stdout)]
else:
return [False, run_info.stderr]
@throttle
def cd(self, dir):
pass #self.sftp.chdir(dir)
@throttle
def getcwd(self):
return "" #self.sftp.getcwd()
@throttle
def ls(self, d="."):
run_info=self.run("ls -l "+d)
self._checkForErrors(run_info.stderr)
line_info=[]
for line in run_info.stdout.splitlines():
if len(line.strip()) > 0:
line_info.append(line)
return line_info
@throttle
def mkdir(self, d, args=""):
if len(args) > 0: args+=" "
run_info=self.run("mkdir "+args+d)
self._checkForErrors(run_info.stderr)
@throttle
def rm(self, file):
run_info=self.run("rm "+file)
self._checkForErrors(run_info.stderr)
@throttle
def rmdir(self, dir):
run_info=self.run("rmdir "+dir)
self._checkForErrors(run_info.stderr)
@throttle
def mv(self, src, dest):
run_info=self.run("mv "+src+" "+dest)
self._checkForErrors(run_info.stderr)
@throttle
def cp(self, src, dest, args=""):
if len(args) > 0: args+=" "
run_info=self.run("cp "+args+src+" "+dest)
self._checkForErrors(run_info.stderr)
pass
|
import requests
import bs4
from .notloggedinexception import NotLoggedInException
class Session:
BASE_URL = 'https://sds.smus.ca/index.php'
PAGE_PARAMS = {
'login': {'next_page': 'login.php'},
'student_information': {'next_page': 'student_sds/student_information.php'},
'course_summary': {'next_page': 'student_sds/course_summary.php', 'course_id': ''},
'course_assignments': {'next_page': 'student_sds/course_assignments.php', 'course_id': ''},
'course_assignment_marks': {'next_page': 'student_sds/course_assignment_marks.php', 'course_id': ''}
}
RPC_URL = 'https://sds.smus.ca/rpc.php'
RPC_PARAMS = {
'student_menu': {'action': 'modern_menu', 'level': '1', 'id': 'Student Menu'},
'my_courses': {'action': 'modern_menu', 'level': '2', 'id': 'My Courses'}
}
def __init__(self, session_id=None, username=None, password=None):
if session_id is not None:
self.cookie_jar = dict(PHPSESSID=session_id)
elif username is not None and password is not None:
self.cookie_jar = self.get_logged_in_cookie_jar(username, password)
else:
self.cookie_jar = dict()
def get_logged_in_cookie_jar(self, username, password):
r = requests.get(self.BASE_URL, params=self.PAGE_PARAMS['login'])
cookie_jar = r.cookies
page = bs4.BeautifulSoup(r.text, 'lxml')
csrf_token = page.select('input[name=CSRFtoken]')[0]['value']
r = requests.post(self.BASE_URL, cookies=cookie_jar, data={
'user_name': username,
'password': password,
'CSRFtoken': csrf_token,
'next_page': 'login.php',
'validator': 'login.php'
})
print(r.text)
return cookie_jar
def is_logged_in(self):
r = self.get(self.BASE_URL, ignore_logged_in=True)
return 'You are:' in r.text
def get(self, url, params=None, ignore_logged_in=False):
if (not ignore_logged_in) and (not self.is_logged_in()):
raise NotLoggedInException
return requests.get(url, params=params, cookies=self.cookie_jar)
def post(self, url, data=None, params=None, ignore_logged_in=False):
if (not ignore_logged_in) and (not self.is_logged_in()):
raise NotLoggedInException
return requests.get(url, data=data, params=params, cookies=self.cookie_jar)
|
from mypy import moduleinfo
from mypy.myunit import (
Suite, assert_equal, assert_true, assert_false
)
class ModuleInfoSuite(Suite):
def test_is_in_module_collection(self) -> None:
assert_true(moduleinfo.is_in_module_collection({'foo'}, 'foo'))
assert_true(moduleinfo.is_in_module_collection({'foo'}, 'foo.bar'))
assert_false(moduleinfo.is_in_module_collection({'foo'}, 'fo'))
assert_true(moduleinfo.is_in_module_collection({'foo.bar'}, 'foo.bar'))
assert_true(moduleinfo.is_in_module_collection({'foo.bar'}, 'foo.bar.zar'))
assert_false(moduleinfo.is_in_module_collection({'foo.bar'}, 'foo'))
|
from app.doc import JWT_ACCESS_TOKEN, parameter
FACILITY_REPORT_POST = {
'tags': ['Report'],
'description': '시설고장신고',
'parameters': [
JWT_ACCESS_TOKEN,
parameter('room', '호실 번호', type_='int'),
parameter('content', '시설 고장 신고 내용')
],
'responses': {
'201': {
'description': '시설고장 신고에 성공했으며, 업로드된 시설고장 신고의 ID를 응답합니다.'
},
'403': {
'description': '권한 없음'
}
}
}
|
# Copyright 2017 <thenakliman@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from susan.common import exceptions
from susan.db.rdbms import dhcp as dhcp_db
from susan.lib.dhcp import dhcp
from susan.lib.dhcp import constants as const
LOG = logging.getLogger(__name__)
# This class provides extra processing needs to be done for requirement
# specific processing.
class DHCP(dhcp.DHCPServer):
def __init__(self, *args, **kwargs):
super(DHCP, self).__init__(*args, **kwargs)
self.db = dhcp_db.DHCPDB()
def handle_discover(self, pkt, datapath, in_port):
"""Handle discover
This method provides hooks for extra processing, needs to be done
for specific to requirement. If no extra information is needed then
call super method.
"""
LOG.info("Handling dhcp 'discover' from %s datapath on %s port",
datapath.id, in_port)
return super(DHCP, self).handle_discover(pkt, datapath, in_port)
def handle_request(self, pkt, datapath, in_port):
"""Handle request
This method provides hooks for extra processing, needs to be done
for specific to requirement. If no extra information is needed then
call super method.
"""
LOG.info("Handling dhcp 'Request' from %s datapath on %s port",
datapath.id, in_port)
return super(DHCP, self).handle_request(pkt, datapath, in_port)
def get_subnet_mac(self, datapath, port):
try:
subnet_id = self.db.get_subnet_id(datapath=datapath,
port=port)
except exceptions.SubnetNotDefinedException:
LOG.error("%s port on %s datapath does not belong to any subnet.",
port, datapath)
raise
return subnet_id, self.db.get_mac_from_port(datapath, port)
def get_ip(self, datapath, port, mac):
subnet_id = self.get_subnet_id(datapath, port)
return self.db.get_reserve_ip(subnet_id=subnet_id, mac=mac)
def reserve_ip(self, datapath_id, in_port, ip, mac):
subnet_id = self.get_subnet_id(datapath_id, in_port)
self.db.reserve_ip(ip=ip, subnet_id=subnet_id, mac=mac)
LOG.info("Reserving %s ip for %s mac in %s subnet",
ip, subnet_id, mac)
def get_committed_ip(self, datapath, port, mac):
try:
return self.get_ip(datapath, port, mac)
except exceptions.CommittedIPNotFoundException:
LOG.error("IP for %s port on %s datapath not found",
port, datapath)
raise
def get_available_ip(self, datapath, port, mac):
"""Gets the free available IP"""
try:
reserved_ip = self.get_ip(datapath, port, mac)
except exceptions.CommittedIPNotFoundException:
reserved_ip = None
if reserved_ip:
return reserved_ip
else:
subnet_id = self.get_subnet_id(datapath, port)
allocated_ips = self.db.get_reserved_ip_in_subnet(subnet_id)
ranges = self.db.get_ranges_in_subnet(subnet_id)
if not ranges:
LOG.error("Ranges for %s subnet is not defined", subnet_id)
raise exceptions.RangeNotFoundException(subnet_id=subnet_id)
allocated_set = set()
for allocated_ip in allocated_ips:
allocated_set.add(int(netaddr.IPAddress(allocated_ip)))
# TODO(thenakliman): Ranges can be merged to process effectlively
for start, end in ranges:
for ip in range(int(netaddr.IPAddress(start)),
int(netaddr.IPAddress(end))):
if ip not in allocated_set:
return str(netaddr.IPAddress(ip))
LOG.error("IP Could not be found in %s subnet", subnet_id)
raise exceptions.IPNotAvailableException(subnet_id=subnet_id)
def get_subnet_id(self, datapath, port):
try:
subnet_id = self.db.get_subnet_id(datapath=datapath,
port=port)
except exceptions.SubnetNotDefinedException:
LOG.error("%d port on %s datapath does not belong to any subnet.",
datapath, port)
raise
return subnet_id
def get_parameters(self, datapath, port, mac):
"""Returns host data for the request"""
try:
subnet_id = self.get_subnet_id(datapath, port)
except exceptions.SubnetNotDefinedException:
LOG.error("%s port on %s datapath does not belong to any "
"subnet.", port, datapath)
raise exceptions.ParameterNotFoundException(datapath_id=datapath,
port=port,
mac=mac)
try:
return self.db.get_parameter(subnet_id, mac)
except exceptions.ParameterNotFoundException:
LOG.error("Patarameter for %s mac in %s subnet not found",
mac, subnet_id)
return dict()
def get_dhcp_server_info(self, datapath, port):
"""Returns mac and ip of the dhcp server being used"""
try:
subnet_id = self.get_subnet_id(datapath, port)
(dhcp_mac, dhcp_ip) = self.db.get_dhcp_server_info(subnet_id)
except exceptions.SubnetNotDefinedException:
LOG.error("%s port on %s datapath does not belong to any subnet.",
datapath_id=datapath,
port=port,
mac=mac)
raise exceptions.ParameterNotFoundException(
datapath_id=datapath,
port=port,
mac=mac)
return (dhcp_mac, dhcp_ip)
def get_next_server_ip(self, datapath, port):
"Get next server ip"""
try:
return self.db.get_next_server(datapath, port)
except exceptions.NextServerNotDefinedException:
LOG.error("Next server is not defined in %s subnet", subnet_id)
raise
def get_lease_time(self, datapath, port, mac):
"""Get lease time for a host"""
parameter = self.get_parameters(datapath, port, mac)
return ((parameter and parameter.get(const.OPTIONS.LEASE_TIME,
const.DEFAULT_LEASE_TIME)
) or const.DEFAULT_LEASE_TIME)
def commit_ip(self, datapath_id, in_port, mac, ip):
subnet_id = self.get_subnet_id(datapath=datapath_id,
port=in_port)
self.db.commit_ip(subnet_id=subnet_id, mac=mac, ip=ip)
LOG.info("Committed %s ip for %s mac in %s subnet", ip, mac, subnet_id)
|
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='testuser@gmail.com',password='user1234'):
"""create a sample user"""
return get_user_model().objects.create_user(email,password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email as successful"""
#django default user model expects username not gmail
email='user@gmail.com'
password='user1234'
user=get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email,email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized(as the domain of email is case insensitive)"""
email='user@GMAIL.COM'
user=get_user_model().objects.create_user(email,'user1234')
self.assertEqual(user.email,email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None,'user1234')
# anything we run here should raise a value error,if not test fails.
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user=get_user_model().objects.create_superuser(
'user#gmail.com',
'user1234'
)
self.assertTrue(user.is_superuser) #comes from permissionMixin
self.assertTrue(user.is_staff)
##########################
def test_tag_str(self):
"""Test tag string representation"""
tag=models.Tag.objects.create(
user=sample_user(),
name="sample tag name"
)
self.assertEqual(str(tag),tag.name)
##########################
def test_ingredient_str(self):
"""Test ingredient string representation"""
ingredient=models.Ingredient.objects.create(
user=sample_user(),
name="tomato"
)
self.assertEqual(str(ingredient),ingredient.name)
##########################
def test_recipe_str(self):
"""Test recipe string representation"""
recipe=models.Recipe.objects.create(
user=sample_user(),
title='veges and bone soup',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe),recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from torch.nn.parallel import DataParallel # , DistributedDataParallel
from models.select_network import define_G
from models.model_base import ModelBase
from models.network_sr import RealSRMD, RealSRMD_RRDB
class ModelPrediction(ModelBase):
def __init__(self, opt):
super(ModelPrediction, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.netG = define_G(opt).to(self.device)
self.netG = DataParallel(self.netG)
try:
self.netEncoder = RealSRMD(in_nc=54, out_nc=3, nc=128, nb=12, upscale=4,
act_mode='R', upsample_mode='pixelshuffle')
self.netEncoder.load_state_dict(torch.load(opt['path_RealSRMD']), strict=True)
except:
self.netEncoder = RealSRMD_RRDB(in_nc=54, out_nc=3, gc= 32, nc=64, nb=8, upscale=4, act_mode='R',
upsample_mode='upconv')
self.netEncoder.load_state_dict(torch.load(opt['path_RealSRMD']), strict=True)
self.netEncoder.eval()
for k, v in self.netEncoder.named_parameters():
v.requires_grad = False
self.netEncoder.to(self.device)
self.netEncoder = self.netEncoder.kernel_encoder
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.opt_train = self.opt['train'] # training option
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG)
# ----------------------------------------
# save model
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_loss_type = self.opt_train['G_loss_type']
if G_loss_type == 'l1':
self.G_loss = nn.L1Loss().to(self.device)
elif G_loss_type == 'l2':
self.G_loss = nn.MSELoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_loss))
self.G_loss_weight = self.opt_train['G_loss_weight']
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=0)
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.L = data['L'].to(self.device)
self.k = data['kernel'].to(self.device)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.G_optimizer.zero_grad()
self.k_pred = self.netG(self.L)
self.k_true_encoded = self.netEncoder(self.k)
G_loss = self.G_loss_weight * self.G_loss(self.k_pred, self.k_true_encoded)
G_loss.backward()
self.G_optimizer.step()
self.log_dict['G_loss'] = G_loss.item()
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.k_pred = self.netG(self.L)
self.netG.train()
self.k_true_encoded = self.netEncoder(self.k)
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['k_pred'] = self.k_pred.detach().float().cpu()
out_dict['k'] = self.k_pred.detach().float().cpu()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
annom.errors
This module holds project defined errors
Author: Jacob Reinhold (jacob.reinhold@jhu.edu)
Created on: Mar 11, 2018
"""
class AnnomError(Exception):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""FAT and BPB parsing for files."""
import errno
import itertools
import math
import struct
import threading
import warnings
from contextlib import contextmanager
from io import BufferedReader, open
from typing import Union
from pyfatfs import FAT_OEM_ENCODING, _init_check
from pyfatfs.EightDotThree import EightDotThree
from pyfatfs.FATDirectoryEntry import FATDirectoryEntry, FATLongDirectoryEntry
from pyfatfs.FATHeader import FATHeader, FAT32Header, FAT12Header
from pyfatfs._exceptions import PyFATException, NotAFatEntryException
def _readonly_check(func):
def _wrapper(*args, **kwargs):
read_only = args[0].is_read_only
if read_only is False:
return func(*args, **kwargs)
else:
raise PyFATException("Filesystem has been opened read-only, not "
"able to perform a write operation!")
return _wrapper
class PyFat(object):
"""PyFAT base class, parses generic filesystem information."""
#: Used as fat_type if unable to detect FAT type
FAT_TYPE_UNKNOWN = 0
#: Used as fat_type if FAT12 fs has been detected
FAT_TYPE_FAT12 = 12
#: Used as fat_type if FAT16 fs has been detected
FAT_TYPE_FAT16 = 16
#: Used as fat_type if FAT32 fs has been detected
FAT_TYPE_FAT32 = 32
#: Maps fat_type to BS_FilSysType from FS header information
FS_TYPES = {FAT_TYPE_UNKNOWN: b"FAT ",
FAT_TYPE_FAT12: b"FAT12 ",
FAT_TYPE_FAT16: b"FAT16 ",
FAT_TYPE_FAT32: b"FAT32 "}
#: Possible cluster values for FAT12 partitions
FAT12_CLUSTER_VALUES = {'FREE_CLUSTER': 0x000,
'MIN_DATA_CLUSTER': 0x002,
'MAX_DATA_CLUSTER': 0xFEF,
'BAD_CLUSTER': 0xFF7,
'END_OF_CLUSTER_MIN': 0xFF8,
'END_OF_CLUSTER_MAX': 0xFFF}
FAT12_SPECIAL_EOC = 0xFF0
#: Possible cluster values for FAT16 partitions
FAT16_CLUSTER_VALUES = {'FREE_CLUSTER': 0x0000,
'MIN_DATA_CLUSTER': 0x0002,
'MAX_DATA_CLUSTER': 0xFFEF,
'BAD_CLUSTER': 0xFFF7,
'END_OF_CLUSTER_MIN': 0xFFF8,
'END_OF_CLUSTER_MAX': 0xFFFF}
#: Possible cluster values for FAT32 partitions
FAT32_CLUSTER_VALUES = {'FREE_CLUSTER': 0x0000000,
'MIN_DATA_CLUSTER': 0x0000002,
'MAX_DATA_CLUSTER': 0x0FFFFFEF,
'BAD_CLUSTER': 0xFFFFFF7,
'END_OF_CLUSTER_MIN': 0xFFFFFF8,
'END_OF_CLUSTER_MAX': 0xFFFFFFF}
#: Maps fat_type to possible cluster values
FAT_CLUSTER_VALUES = {FAT_TYPE_FAT12: FAT12_CLUSTER_VALUES,
FAT_TYPE_FAT16: FAT16_CLUSTER_VALUES,
FAT_TYPE_FAT32: FAT32_CLUSTER_VALUES}
#: BPB header layout in struct formatted string
bpb_header_layout = "<3s8sHBHBHHBHHHLL"
#: BPB header fields when extracted with bpb_header_layout
bpb_header_vars = ["BS_jmpBoot", "BS_OEMName", "BPB_BytsPerSec",
"BPB_SecPerClus", "BPB_RsvdSecCnt", "BPB_NumFATS",
"BPB_RootEntCnt", "BPB_TotSec16", "BPB_Media",
"BPB_FATSz16", "BPB_SecPerTrk", "BPB_NumHeads",
"BPB_HiddSec", "BPB_TotSec32"]
#: FAT16 bit mask for clean shutdown bit
FAT16_CLEAN_SHUTDOWN_BIT_MASK = 0x8000
#: FAT16 bit mask for volume error bit
FAT16_DRIVE_ERROR_BIT_MASK = 0x4000
#: FAT32 bit mask for clean shutdown bit
FAT32_CLEAN_SHUTDOWN_BIT_MASK = 0x8000000
#: FAT32 bit mask for volume error bit
FAT32_DRIVE_ERROR_BIT_MASK = 0x4000000
#: Dirty bit in FAT header
FAT_DIRTY_BIT_MASK = 0x01
def __init__(self,
encoding: str = 'ibm437',
offset: int = 0):
"""Set up PyFat class instance.
:param encoding: Define encoding to use for filenames
:param offset: Offset of the FAT partition in the given file
:type encoding: str
:type offset: int
"""
self.__fp = None
self.__fp_offset = offset
self._fat_size = 0
self.bpb_header = None
self.fat_header: FATHeader = FATHeader()
self.root_dir = None
self.root_dir_sector = 0
self.root_dir_sectors = 0
self.bytes_per_cluster = 0
self.first_data_sector = 0
self.first_free_cluster = 0
self.fat_type = self.FAT_TYPE_UNKNOWN
self.fat = {}
self.initialised = False
self.encoding = encoding
self.is_read_only = True
self.__lock = threading.Lock()
def __set_fp(self, fp):
if isinstance(self.__fp, BufferedReader):
raise PyFATException("Cannot overwrite existing file handle, "
"create new class instance of PyFAT.")
self.__fp = fp
def __seek(self, address: int):
"""Seek to given address with offset."""
if self.__fp is None:
raise PyFATException("Cannot seek without a file handle!",
errno=errno.ENXIO)
self.__fp.seek(address + self.__fp_offset)
@_init_check
def read_cluster_contents(self, cluster: int) -> bytes:
"""Read contents of given cluster.
:param cluster: Cluster number to read contents from
:returns: Contents of cluster as `bytes`
"""
sz = self.bytes_per_cluster
cluster_address = self.get_data_cluster_address(cluster)
with self.__lock:
self.__seek(cluster_address)
return self.__fp.read(sz)
def __get_clean_shutdown_bitmask(self):
"""Get clean shutdown bitmask for current FS.
:raises: AttributeError
"""
return getattr(self, f"FAT{self.fat_type}_CLEAN_SHUTDOWN_BIT_MASK")
def _is_dirty(self) -> bool:
"""Check whether or not the partition currently is dirty."""
try:
clean_shutdown_bitmask = self.__get_clean_shutdown_bitmask()
except AttributeError:
# Bit not set on FAT12
dos_dirty = False
else:
dos_dirty = (self.fat[1] &
clean_shutdown_bitmask) != clean_shutdown_bitmask
nt_dirty = (self.fat_header["BS_Reserved1"] &
self.FAT_DIRTY_BIT_MASK) == self.FAT_DIRTY_BIT_MASK
return dos_dirty or nt_dirty
def _mark_dirty(self):
"""Mark partition as not cleanly unmounted.
Apparently the dirty bit in FAT[1] is used by DOS,
while BS_Reserved1 is used by NT. Always set both.
"""
try:
clean_shutdown_bitmask = self.__get_clean_shutdown_bitmask()
except AttributeError:
pass
else:
# Only applicable for FAT16/32
self.fat[1] = (self.fat[1] & ~clean_shutdown_bitmask) | \
(0 & clean_shutdown_bitmask)
self.flush_fat()
self.fat_header["BS_Reserved1"] |= self.FAT_DIRTY_BIT_MASK
self._write_fat_header()
def _mark_clean(self):
"""Mark partition as cleanly unmounted."""
try:
clean_shutdown_bitmask = self.__get_clean_shutdown_bitmask()
except AttributeError:
pass
else:
self.fat[1] |= clean_shutdown_bitmask
self.flush_fat()
self.fat_header["BS_Reserved1"] = (self.fat_header["BS_Reserved1"]
& ~self.FAT_DIRTY_BIT_MASK) | \
(0 & self.FAT_DIRTY_BIT_MASK)
self._write_fat_header()
def open(self, filename: str, read_only: bool = False):
"""Open filesystem for usage with PyFat.
:param filename: `str`: Name of file to open for usage with PyFat.
:param read_only: `bool`: Force read-only mode of filesystem.
"""
self.is_read_only = read_only
if read_only is True:
mode = 'rb'
else:
mode = 'rb+'
try:
self.__set_fp(open(filename, mode=mode))
except OSError as ex:
raise PyFATException(f"Cannot open given file \'{filename}\'.",
errno=ex.errno)
# Parse BPB & FAT headers of given file
self.parse_header()
# Parse FAT
self._parse_fat()
# Check for clean shutdown
if self._is_dirty():
warnings.warn("Filesystem was not cleanly unmounted on last "
"access. Check for data corruption.")
if not self.is_read_only:
self._mark_dirty()
# Parse root directory
# TODO: Inefficient to always recursively parse the root dir.
# It would make sense to parse it on demand instead.
self.parse_root_dir()
@_init_check
def get_fs_location(self):
"""Retrieve path of opened filesystem."""
return self.__fp.name
@_init_check
def _get_total_sectors(self):
"""Get total number of sectors for all FAT sizes."""
if self.bpb_header["BPB_TotSec16"] != 0:
return self.bpb_header["BPB_TotSec16"]
return self.bpb_header["BPB_TotSec32"]
def _get_fat_size_count(self):
"""Get BPB_FATsz value."""
if self.bpb_header["BPB_FATSz16"] != 0:
return self.bpb_header["BPB_FATSz16"]
try:
return self._parse_fat_header(force_fat32=True)["BPB_FATSz32"]
except KeyError:
raise PyFATException("Invalid FAT size of 0 detected in header, "
"cannot continue")
@_init_check
def _parse_fat(self):
"""Parse information in FAT."""
# Read all FATs
fat_size = self.bpb_header["BPB_BytsPerSec"]
fat_size *= self._fat_size
# Seek FAT entries
first_fat_bytes = self.bpb_header["BPB_RsvdSecCnt"]
first_fat_bytes *= self.bpb_header["BPB_BytsPerSec"]
fats = []
for i in range(self.bpb_header["BPB_NumFATS"]):
with self.__lock:
self.__seek(first_fat_bytes + (i * fat_size))
fats += [self.__fp.read(fat_size)]
if len(fats) < 1:
raise PyFATException("Invalid number of FATs configured, "
"cannot continue")
elif len(set(fats)) > 1:
warnings.warn("One or more FATs differ, filesystem most "
"likely corrupted. Using first FAT.")
# Parse first FAT
self.bytes_per_cluster = self.bpb_header["BPB_BytsPerSec"] * \
self.bpb_header["BPB_SecPerClus"]
if len(fats[0]) != self.bpb_header["BPB_BytsPerSec"] * self._fat_size:
raise PyFATException("Invalid length of FAT")
# FAT12: 12 bits (1.5 bytes) per FAT entry
# FAT16: 16 bits (2 bytes) per FAT entry
# FAT32: 32 bits (4 bytes) per FAT entry
fat_entry_size = self.fat_type / 8
total_entries = int(fat_size // fat_entry_size)
self.fat = [None] * total_entries
curr = 0
cluster = 0
incr = self.fat_type / 8
while curr < fat_size:
offset = curr + incr
if self.fat_type == self.FAT_TYPE_FAT12:
fat_nibble = fats[0][int(curr):math.ceil(offset)]
fat_nibble = fat_nibble.ljust(2, b"\0")
try:
self.fat[cluster] = struct.unpack("<H", fat_nibble)[0]
except IndexError:
# Out of bounds, FAT size is not cleanly divisible by 3
# Do not touch last clusters
break
if cluster % 2 == 0:
# Even: Keep low 12-bits of word
self.fat[cluster] &= 0x0FFF
else:
# Odd: Keep high 12-bits of word
self.fat[cluster] >>= 4
if math.ceil(offset) == (fat_size - 1):
# Sector boundary case for FAT12
del self.fat[-1]
break
elif self.fat_type == self.FAT_TYPE_FAT16:
self.fat[cluster] = struct.unpack("<H",
fats[0][int(curr):
int(offset)])[0]
elif self.fat_type == self.FAT_TYPE_FAT32:
self.fat[cluster] = struct.unpack("<L",
fats[0][int(curr):
int(offset)])[0]
# Ignore first four bits, FAT32 clusters are
# actually just 28bits long
self.fat[cluster] &= 0x0FFFFFFF
else:
raise PyFATException("Unknown FAT type, cannot continue")
curr += incr
cluster += 1
if None in self.fat:
raise AssertionError("Unknown error during FAT parsing, please "
"report this error.")
@_init_check
def __bytes__(self):
"""Represent current state of FAT as bytes.
:returns: `bytes` representation of FAT.
"""
b = b''
if self.fat_type == self.FAT_TYPE_FAT12:
for i, e in enumerate(self.fat):
if i % 2 == 0:
b += struct.pack("<H", e)
else:
nibble = b[-1:]
nibble = struct.unpack("<B", nibble)[0]
b = b[:-1]
b += struct.pack("<BB", ((e & 0xF) << 4) | nibble, e >> 4)
else:
if self.fat_type == self.FAT_TYPE_FAT16:
fmt = "H"
else:
# FAT32
fmt = "L"
b = struct.pack(f"<{fmt * len(self.fat)}",
*self.fat)
return b
@_init_check
@_readonly_check
def _write_data_to_address(self, data: bytes,
address: int):
"""Write given data directly to the filesystem.
Directly writes to the filesystem without any consistency check.
**Use with caution**
:param data: `bytes`: Data to write to address
:param address: `int`: Offset to write data to.
"""
with self.__lock:
self.__seek(address)
self.__fp.write(data)
@_init_check
@_readonly_check
def free_cluster_chain(self, cluster: int):
"""Mark a cluster(chain) as free in FAT.
:param cluster: `int`: Cluster to mark as free
"""
_freeclus = self.FAT_CLUSTER_VALUES[self.fat_type]['FREE_CLUSTER']
with self.__lock:
tmp_fat = self.fat.copy()
for cl in self.get_cluster_chain(cluster):
tmp_fat[cl] = _freeclus
self.first_free_cluster = min(cl, self.first_free_cluster)
self.fat = tmp_fat
@_init_check
@_readonly_check
def write_data_to_cluster(self, data: bytes,
cluster: int,
extend_cluster: bool = True,
erase: bool = False) -> None:
"""Write given data to cluster.
Extends cluster chain if needed.
:param data: `bytes`: Data to write to cluster
:param cluster: `int`: Cluster to write data to.
:param extend_cluster: `bool`: Automatically extend cluster chain
if not enough space is available.
:param erase: `bool`: Erase cluster contents before writing.
This is useful when writing `FATDirectoryEntry` data.
"""
data_sz = len(data)
cluster_sz = 0
last_cluster = None
for c in self.get_cluster_chain(cluster):
cluster_sz += self.bytes_per_cluster
last_cluster = c
if cluster_sz >= data_sz:
break
if data_sz > cluster_sz:
if extend_cluster is False:
raise PyFATException("Cannot write data to cluster, "
"not enough space available.",
errno=errno.ENOSPC)
new_chain = self.allocate_bytes(data_sz - cluster_sz,
erase=erase)[0]
self.fat[last_cluster] = new_chain
# Fill rest of data with zeroes if erase is set to True
if erase:
new_sz = max(1, math.ceil(data_sz / self.bytes_per_cluster))
new_sz *= self.bytes_per_cluster
data += b'\0' * (new_sz - data_sz)
# Write actual data
bytes_written = 0
for c in self.get_cluster_chain(cluster):
b = self.get_data_cluster_address(c)
t = bytes_written
bytes_written += self.bytes_per_cluster
self._write_data_to_address(data[t:bytes_written], b)
if bytes_written >= len(data):
break
@_init_check
@_readonly_check
def flush_fat(self) -> None:
"""Flush FAT(s) to disk."""
fat_size = self.bpb_header["BPB_BytsPerSec"]
fat_size *= self._fat_size
first_fat_bytes = self.bpb_header["BPB_RsvdSecCnt"]
first_fat_bytes *= self.bpb_header["BPB_BytsPerSec"]
with self.__lock:
binary_fat = bytes(self)
for i in range(self.bpb_header["BPB_NumFATS"]):
self.__seek(first_fat_bytes + (i * fat_size))
self.__fp.write(binary_fat)
def calc_num_clusters(self, size: int = 0) -> int:
"""Calculate the number of required clusters.
:param size: `int`: required bytes to allocate
:returns: Number of required clusters
"""
num_clusters = size / self.bytes_per_cluster
num_clusters = math.ceil(num_clusters)
return num_clusters
@_init_check
@_readonly_check
def allocate_bytes(self, size: int, erase: bool = False) -> list:
"""Try to allocate a cluster (-chain) in FAT for `size` bytes.
:param size: `int`: Size in bytes to try to allocate.
:param erase: `bool`: If set to true, the newly allocated
space is zeroed-out for clean allocation.
:returns: List of newly-allocated clusters.
"""
free_clus = self.FAT_CLUSTER_VALUES[self.fat_type]["FREE_CLUSTER"]
min_clus = self.FAT_CLUSTER_VALUES[self.fat_type]["MIN_DATA_CLUSTER"]
max_clus = self.FAT_CLUSTER_VALUES[self.fat_type]["MAX_DATA_CLUSTER"]
num_clusters = self.calc_num_clusters(size)
# Fill list of found free clusters
free_clusters = []
for i in range(self.first_free_cluster, len(self.fat)):
if min_clus > i or i > max_clus:
# Ignore out of bound entries
continue
if num_clusters == len(free_clusters):
# Allocated enough clusters!
break
if self.fat[i] == free_clus:
if i == self.FAT_CLUSTER_VALUES[self.fat_type]["BAD_CLUSTER"]:
# Do not allocate a BAD_CLUSTER
continue
if self.fat_type == self.FAT_TYPE_FAT12 and \
i == self.FAT12_SPECIAL_EOC:
# Do not allocate special EOC marker on FAT12
continue
free_clusters += [i]
else:
free_space = len(free_clusters) * self.bytes_per_cluster
raise PyFATException(f"Not enough free space to allocate "
f"{size} bytes ({free_space} bytes free)",
errno=errno.ENOSPC)
self.first_free_cluster = i
# Allocate cluster chain in FAT
eoc_max = self.FAT_CLUSTER_VALUES[self.fat_type]["END_OF_CLUSTER_MAX"]
for i, _ in enumerate(free_clusters):
try:
self.fat[free_clusters[i]] = free_clusters[i+1]
except IndexError:
self.fat[free_clusters[i]] = eoc_max
if erase is True:
with self.__lock:
self.__seek(self.get_data_cluster_address(i))
self.__fp.write(b'\0' * self.bytes_per_cluster)
return free_clusters
@_init_check
@_readonly_check
def update_directory_entry(self, dir_entry: FATDirectoryEntry) -> None:
"""Update directory entry on disk.
Special handling is required, since the root directory
on FAT12/16 is on a fixed location on disk.
:param dir_entry: `FATDirectoryEntry`: Directory to write to disk
"""
is_root_dir = False
extend_cluster_chain = True
if self.root_dir == dir_entry:
if self.fat_type != self.FAT_TYPE_FAT32:
# FAT12/16 doesn't have a root directory cluster,
# which cannot be enhanced
extend_cluster_chain = False
is_root_dir = True
# Gather all directory entries
dir_entries = b''
d, f, s = dir_entry.get_entries()
for d in list(itertools.chain(d, f, s)):
dir_entries += bytes(d)
# Write content
if not is_root_dir or self.fat_type == self.FAT_TYPE_FAT32:
# FAT32 and non-root dir entries can be handled normally
self.write_data_to_cluster(dir_entries,
dir_entry.get_cluster(),
extend_cluster=extend_cluster_chain,
erase=True)
else:
# FAT12/16 does not have a root directory cluster
root_dir_addr = self.root_dir_sector * \
self.bpb_header["BPB_BytsPerSec"]
root_dir_sz = self.root_dir_sectors * \
self.bpb_header["BPB_BytsPerSec"]
if len(dir_entries) > root_dir_sz:
raise PyFATException("Cannot create directory, maximum number "
"of root directory entries exhausted!",
errno=errno.ENOSPC)
# Overwrite empty space as well
dir_entries += b'\0' * (root_dir_sz - len(dir_entries))
self._write_data_to_address(dir_entries, root_dir_addr)
def _fat12_parse_root_dir(self):
"""Parse FAT12/16 root dir entries.
FAT12/16 has a fixed location of root directory entries
and is therefore size limited (BPB_RootEntCnt).
"""
root_dir_byte = self.root_dir_sector * \
self.bpb_header["BPB_BytsPerSec"]
self.root_dir.set_cluster(self.root_dir_sector //
self.bpb_header["BPB_SecPerClus"])
max_bytes = self.bpb_header["BPB_RootEntCnt"] * \
FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
# Parse all directory entries in root directory
subdirs, _ = self.parse_dir_entries_in_address(root_dir_byte,
root_dir_byte +
max_bytes)
for dir_entry in subdirs:
self.root_dir.add_subdirectory(dir_entry)
def _fat32_parse_root_dir(self):
"""Parse FAT32 root dir entries.
FAT32 actually has its root directory entries distributed
across a cluster chain that we need to follow
"""
root_cluster = self.fat_header["BPB_RootClus"]
self.root_dir.set_cluster(root_cluster)
# Follow root directory cluster chain
for dir_entry in self.parse_dir_entries_in_cluster_chain(root_cluster):
self.root_dir.add_subdirectory(dir_entry)
def parse_root_dir(self):
"""Parse root directory entry."""
root_dir_sfn = EightDotThree()
root_dir_sfn.set_str_name("")
dir_attr = FATDirectoryEntry.ATTR_DIRECTORY
self.root_dir = FATDirectoryEntry(DIR_Name=root_dir_sfn,
DIR_Attr=dir_attr,
DIR_NTRes=0,
DIR_CrtTimeTenth=0,
DIR_CrtTime=0,
DIR_CrtDate=0,
DIR_LstAccessDate=0,
DIR_FstClusHI=0,
DIR_WrtTime=0,
DIR_WrtDate=0,
DIR_FstClusLO=0,
DIR_FileSize=0,
encoding=self.encoding)
if self.fat_type in [self.FAT_TYPE_FAT12, self.FAT_TYPE_FAT16]:
self._fat12_parse_root_dir()
else:
self._fat32_parse_root_dir()
def parse_lfn_entry(self,
lfn_entry: FATLongDirectoryEntry = None,
address: int = 0):
"""Parse LFN entry at given address."""
dir_hdr_sz = FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
with self.__lock:
self.__seek(address)
lfn_dir_data = self.__fp.read(dir_hdr_sz)
lfn_hdr_layout = FATLongDirectoryEntry.FAT_LONG_DIRECTORY_LAYOUT
lfn_dir_hdr = struct.unpack(lfn_hdr_layout, lfn_dir_data)
lfn_dir_hdr = dict(zip(FATLongDirectoryEntry.FAT_LONG_DIRECTORY_VARS,
lfn_dir_hdr))
lfn_entry.add_lfn_entry(**lfn_dir_hdr)
def __parse_dir_entry(self, address):
"""Parse directory entry at given address."""
with self.__lock:
self.__seek(address)
dir_hdr_size = FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
dir_data = self.__fp.read(dir_hdr_size)
dir_hdr = struct.unpack(FATDirectoryEntry.FAT_DIRECTORY_LAYOUT,
dir_data)
dir_hdr = dict(zip(FATDirectoryEntry.FAT_DIRECTORY_VARS, dir_hdr))
return dir_hdr
def parse_dir_entries_in_address(self,
address: int = 0,
max_address: int = 0,
tmp_lfn_entry: FATLongDirectoryEntry =
None):
"""Parse directory entries in address range."""
if tmp_lfn_entry is None:
tmp_lfn_entry = FATLongDirectoryEntry()
dir_hdr_size = FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
if max_address == 0:
max_address = FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
dir_entries = []
for hdr_addr in range(address, max_address, dir_hdr_size):
# Parse each entry
dir_hdr = self.__parse_dir_entry(hdr_addr)
dir_sn = EightDotThree(encoding=self.encoding)
dir_first_byte = dir_hdr["DIR_Name"][0]
try:
dir_sn.set_byte_name(dir_hdr["DIR_Name"])
except NotAFatEntryException as ex:
# Not a directory of any kind, invalidate temporary LFN entries
tmp_lfn_entry = FATLongDirectoryEntry()
if ex.free_type == FATDirectoryEntry.FREE_DIR_ENTRY_MARK:
# Empty directory entry,
continue
elif ex.free_type == FATDirectoryEntry.LAST_DIR_ENTRY_MARK:
# Last directory entry, do not parse any further
break
else:
dir_hdr["DIR_Name"] = dir_sn
# Long File Names
if FATLongDirectoryEntry.is_lfn_entry(dir_first_byte,
dir_hdr["DIR_Attr"]):
self.parse_lfn_entry(tmp_lfn_entry, hdr_addr)
continue
# Normal directory entries
if not tmp_lfn_entry.is_lfn_entry_complete():
# Ignore incomplete LFN entries altogether
tmp_lfn_entry = None
dir_entry = FATDirectoryEntry(encoding=self.encoding,
lfn_entry=tmp_lfn_entry,
**dir_hdr)
dir_entries += [dir_entry]
if dir_entry.is_directory() and not dir_entry.is_special():
# Iterate all subdirectories except for dot and dotdot
cluster = dir_entry.get_cluster()
subdirs = self.parse_dir_entries_in_cluster_chain(cluster)
for d in subdirs:
dir_entry.add_subdirectory(d)
# Reset temporary LFN entry
tmp_lfn_entry = FATLongDirectoryEntry()
return dir_entries, tmp_lfn_entry
def parse_dir_entries_in_cluster_chain(self, cluster):
"""Parse directory entries while following given cluster chain."""
dir_entries = []
tmp_lfn_entry = FATLongDirectoryEntry()
max_bytes = (self.bpb_header["BPB_SecPerClus"] *
self.bpb_header["BPB_BytsPerSec"])
for c in self.get_cluster_chain(cluster):
# Parse all directory entries in chain
b = self.get_data_cluster_address(c)
ret = self.parse_dir_entries_in_address(b, b+max_bytes,
tmp_lfn_entry)
tmp_dir_entries, tmp_lfn_entry = ret
dir_entries += tmp_dir_entries
return dir_entries
def get_data_cluster_address(self, cluster: int) -> int:
"""Get offset of given cluster in bytes.
:param cluster: Cluster number as `int`
:returns: Bytes address location of cluster
"""
# First two cluster entries are reserved
sector = (cluster - 2) * self.bpb_header["BPB_SecPerClus"] + \
self.first_data_sector
return sector * self.bpb_header["BPB_BytsPerSec"]
@_init_check
def get_cluster_chain(self, first_cluster):
"""Follow a cluster chain beginning with the first cluster address."""
cluster_vals = self.FAT_CLUSTER_VALUES[self.fat_type]
min_data_cluster = cluster_vals["MIN_DATA_CLUSTER"]
max_data_cluster = cluster_vals["MAX_DATA_CLUSTER"]
eoc_min = cluster_vals["END_OF_CLUSTER_MIN"]
eoc_max = cluster_vals["END_OF_CLUSTER_MAX"]
i = first_cluster
while i <= len(self.fat):
if min_data_cluster <= self.fat[i] <= max_data_cluster:
# Normal data cluster, follow chain
yield i
elif self.fat_type == self.FAT_TYPE_FAT12 and \
self.fat[i] == self.FAT12_SPECIAL_EOC:
# Special EOC
yield i
return
elif eoc_min <= self.fat[i] <= eoc_max:
# End of cluster, end chain
yield i
return
elif self.fat[i] == cluster_vals["BAD_CLUSTER"]:
# Bad cluster, cannot follow chain, file broken!
raise PyFATException("Bad cluster found in FAT cluster "
"chain, cannot access file")
elif self.fat[i] == cluster_vals["FREE_CLUSTER"]:
# FREE_CLUSTER mark when following a chain is treated an error
raise PyFATException("FREE_CLUSTER mark found in FAT cluster "
"chain, cannot access file")
else:
raise PyFATException("Invalid or unknown FAT cluster "
"entry found with value "
"\'{}\'".format(hex(self.fat[i])))
i = self.fat[i]
@_init_check
def close(self):
"""Close session and free up all handles."""
if not self.is_read_only:
self._mark_clean()
self.__fp.close()
self.initialised = False
def __del__(self):
"""Try to close open handles."""
try:
self.close()
except PyFATException:
pass
def __determine_fat_type(self) -> Union["PyFat.FAT_TYPE_FAT12",
"PyFat.FAT_TYPE_FAT16",
"PyFat.FAT_TYPE_FAT32"]:
"""Determine FAT type.
An internal method to determine whether this volume is FAT12,
FAT16 or FAT32.
returns: `str`: Any of PyFat.FAT_TYPE_FAT12, PyFat.FAT_TYPE_FAT16
or PyFat.FAT_TYPE_FAT32
"""
if self.bpb_header["BPB_TotSec16"] != 0:
total_sectors = self.bpb_header["BPB_TotSec16"]
else:
total_sectors = self.bpb_header["BPB_TotSec32"]
rsvd_sectors = self.bpb_header["BPB_RsvdSecCnt"]
fat_sz = self.bpb_header["BPB_NumFATS"] * self._fat_size
root_dir_sectors = self.root_dir_sectors
data_sec = total_sectors - (rsvd_sectors + fat_sz + root_dir_sectors)
count_of_clusters = data_sec // self.bpb_header["BPB_SecPerClus"]
if count_of_clusters < 4085:
msft_fat_type = self.FAT_TYPE_FAT12
elif count_of_clusters < 65525:
msft_fat_type = self.FAT_TYPE_FAT16
else:
msft_fat_type = self.FAT_TYPE_FAT32
if self.bpb_header["BPB_FATSz16"] == 0:
fat_hdr = self._parse_fat_header(force_fat32=True)
if fat_hdr["BPB_FATSz32"] != 0:
linux_fat_type = self.FAT_TYPE_FAT32
else:
linux_fat_type = msft_fat_type
elif count_of_clusters >= 4085:
linux_fat_type = self.FAT_TYPE_FAT16
else:
linux_fat_type = self.FAT_TYPE_FAT12
if msft_fat_type != linux_fat_type:
warnings.warn(f"Unable to reliably determine FAT type, "
f"guessing either FAT{msft_fat_type} or "
f"FAT{linux_fat_type}. Opting for "
f"FAT{linux_fat_type}.")
return linux_fat_type
@_readonly_check
def _write_fat_header(self):
with self.__lock:
self.__seek(36)
self.__fp.write(bytes(self.fat_header))
def _parse_fat_header(self, force_fat32: bool = False) -> FATHeader:
"""Parse FAT header."""
with self.__lock:
self.__seek(0)
boot_sector = self.__fp.read(512)
if self.fat_type in [self.FAT_TYPE_FAT12, self.FAT_TYPE_FAT16] \
and not force_fat32:
fat_header = FAT12Header()
fat_header.parse_header(boot_sector[36:][:26])
return fat_header
elif self.fat_type == self.FAT_TYPE_FAT32 or force_fat32:
# FAT32, probably - probe for it
if self.bpb_header["BPB_FATSz16"] != 0:
raise PyFATException(f"Invalid BPB_FATSz16 value of "
f"'{self.bpb_header['BPB_FATSz16']}', "
f"filesystem corrupt?",
errno=errno.EINVAL)
fat_header = FAT32Header()
fat_header.parse_header(boot_sector[36:][:54])
return fat_header
raise PyFATException("Unknown FAT filesystem type, "
"filesystem corrupt?", errno=errno.EINVAL)
def parse_header(self):
"""Parse BPB & FAT headers in opened file."""
with self.__lock:
self.__seek(0)
boot_sector = self.__fp.read(512)
header = struct.unpack(self.bpb_header_layout, boot_sector[:36])
self.bpb_header = dict(zip(self.bpb_header_vars, header))
# Verify BPB headers
self.__verify_bpb_header()
# Determine FAT type
self._fat_size = self._get_fat_size_count()
self.fat_type = self.__determine_fat_type()
# Parse FAT type specific header
self.fat_header = self._parse_fat_header()
# Calculate root directory sectors and starting point of root directory
root_entries = self.bpb_header["BPB_RootEntCnt"]
hdr_size = FATDirectoryEntry.FAT_DIRECTORY_HEADER_SIZE
bytes_per_sec = self.bpb_header["BPB_BytsPerSec"]
rsvd_secs = self.bpb_header["BPB_RsvdSecCnt"]
num_fats = self.bpb_header["BPB_NumFATS"]
self.root_dir_sectors = ((root_entries * hdr_size) +
(bytes_per_sec - 1)) // bytes_per_sec
self.root_dir_sector = rsvd_secs + (self._fat_size * num_fats)
# Calculate first data sector
self.first_data_sector = (rsvd_secs + (num_fats * self._fat_size) +
self.root_dir_sectors)
# Check signature
with self.__lock:
self.__seek(510)
signature = struct.unpack("<H", self.__fp.read(2))[0]
if signature != 0xAA55:
raise PyFATException(f"Invalid signature: \'{hex(signature)}\'.")
# Initialisation finished
self.initialised = True
def __verify_bpb_header(self):
"""Verify BPB header for correctness."""
if self.bpb_header["BS_jmpBoot"][0] == 0xEB:
if self.bpb_header["BS_jmpBoot"][2] != 0x90:
raise PyFATException("Boot code must end with 0x90")
elif self.bpb_header["BS_jmpBoot"][0] == 0xE9:
pass
else:
raise PyFATException("Boot code must start with 0xEB or "
"0xE9. Is this a FAT partition?")
#: 512,1024,2048,4096: As per fatgen103.doc
byts_per_sec_range = [2**x for x in range(9, 13)]
if self.bpb_header["BPB_BytsPerSec"] not in byts_per_sec_range:
raise PyFATException(f"Expected one of {byts_per_sec_range} "
f"bytes per sector, got: "
f"\'{self.bpb_header['BPB_BytsPerSec']}\'.")
#: 1,2,4,8,16,32,64,128: As per fatgen103.doc
sec_per_clus_range = [2**x for x in range(8)]
if self.bpb_header["BPB_SecPerClus"] not in sec_per_clus_range:
raise PyFATException(f"Expected one of {sec_per_clus_range} "
f"sectors per cluster, got: "
f"\'{self.bpb_header['BPB_SecPerClus']}\'.")
bytes_per_cluster = self.bpb_header["BPB_BytsPerSec"]
bytes_per_cluster *= self.bpb_header["BPB_SecPerClus"]
if bytes_per_cluster > 32768:
warnings.warn("Bytes per cluster should not be more than 32K, "
"but got: {}K. Trying to continue "
"anyway.".format(bytes_per_cluster // 1024), Warning)
if self.bpb_header["BPB_RsvdSecCnt"] == 0:
raise PyFATException("Number of reserved sectors must not be 0")
if self.bpb_header["BPB_Media"] not in [0xf0, 0xf8, 0xf9, 0xfa, 0xfb,
0xfc, 0xfd, 0xfe, 0xff]:
raise PyFATException("Invalid media type")
if self.bpb_header["BPB_NumFATS"] < 1:
raise PyFATException("At least one FAT expected, None found.")
root_entry_count = self.bpb_header["BPB_RootEntCnt"] * 32
root_entry_count %= self.bpb_header["BPB_BytsPerSec"]
if self.bpb_header["BPB_RootEntCnt"] != 0 and root_entry_count != 0:
raise PyFATException("Root entry count does not cleanly align with"
" bytes per sector!")
if self.bpb_header["BPB_TotSec16"] == 0 and \
self.bpb_header["BPB_TotSec32"] == 0:
raise PyFATException("16-Bit and 32-Bit total sector count "
"value empty.")
@staticmethod
@contextmanager
def open_fs(filename: str, offset: int = 0,
encoding=FAT_OEM_ENCODING):
"""Context manager for direct use of PyFAT."""
pf = PyFat(encoding=encoding, offset=offset)
pf.open(filename)
yield pf
pf.close()
|
from django.views.generic import base
from rest_framework import routers
from api.base.auth.viewsets import (
RegisterViewsets,
LoginViewsets
)
from api.base.products.viewsets import (
ProductViewsets
)
router = routers.DefaultRouter()
router.register('auth/register', RegisterViewsets, basename='register-viewsets')
router.register('auth/login', LoginViewsets, basename='login-viewsets')
router.register('products', ProductViewsets, basename='products-viewsets')
api_v1 = router.urls
|
import argparse
import os
import cv2
from CountsPerSec import CountsPerSec
from VideoGet import VideoGet
from VideoShow import VideoShow
def putIterationsPerSec(frame, iterations_per_sec):
"""
Add iterations per second text to lower-left corner of a frame.
"""
cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec),
(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
return frame
def noThreading(source=0):
"""Grab and show video frames without multithreading."""
cap = cv2.VideoCapture(source)
cps = CountsPerSec().start()
while True:
grabbed, frame = cap.read()
if not grabbed or cv2.waitKey(1) == ord("q"):
break
frame = putIterationsPerSec(frame, cps.countsPerSec())
cv2.imshow("Video", frame)
cps.increment()
def threadVideoGet(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Main thread shows video frames.
"""
video_getter = VideoGet(source).start()
cps = CountsPerSec().start()
while True:
if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
cv2.imshow("Video", frame)
cps.increment()
def threadVideoShow(source=0):
"""
Dedicated thread for showing video frames with VideoShow object.
Main thread grabs video frames.
"""
cap = cv2.VideoCapture(source)
(grabbed, frame) = cap.read()
video_shower = VideoShow(frame).start()
cps = CountsPerSec().start()
while True:
(grabbed, frame) = cap.read()
if not grabbed or video_shower.stopped:
video_shower.stop()
break
frame = putIterationsPerSec(frame, cps.countsPerSec())
video_shower.frame = frame
cps.increment()
def threadBoth(folder):
print(folder)
video_getter = []
video_shower= []
cps=[]
frame = [0,0,0]
"""
Dedicated thread for grabbing video frames with VideoGet object.
Dedicated thread for showing video frames with VideoShow object.
Main thread serves only to pass frames between VideoGet and
VideoShow objects/threads.
"""
for (filename,x) in zip(os.listdir(folder),range(3)):
source = os.path.join(folder,filename)
print (x,' ',source)
video_getter.append( VideoGet(source).start())
print (x,' started ')
video_shower.append( VideoShow(video_getter[x].frame,source).start())
cps.append( CountsPerSec().start())
while True:
for x in range(len(video_shower)):
if video_getter[x].stopped or video_shower[x].stopped:
video_shower[x].stop()
video_getter[x].stop()
break
frame[x] = video_getter[x].frame
frame[x] = putIterationsPerSec(frame[x], cps[x].countsPerSec())
video_shower[x].frame = frame[x]
def main():
'''
ap = argparse.ArgumentParser()
ap.add_argument("--source", "-s", default=0,
help="Path to video file or integer representing webcam index"
+ " (default 0).")
ap.add_argument("--thread", "-t", default="none",
help="Threading mode: get (video read in its own thread),"
+ " show (video show in its own thread), both"
+ " (video read and video show in their own threads),"
+ " none (default--no multithreading)")
args = vars(ap.parse_args())
if (
isinstance(args["source"], str)
and args["source"].isdigit()
and not os.path.isfile(args["source"])
):
args["source"] = int(args["source"])
if args["thread"] == "both":
threadBoth(args["source"])
elif args["thread"] == "get":
threadVideoGet(args["source"])
elif args["thread"] == "show":
threadVideoShow(args["source"])
else:
noThreading(args["source"])
'''
folder = "D:\\Users\\aaoob\\Desktop\\projects\\samples"
threadBoth(folder)
if __name__ == "__main__":
main()
|
import json
import logging
import os
from curation_utils import scraping
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s")
configuration = {}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'local_config.json'), 'r') as handle:
configuration = json.load(handle)
site_configuration = configuration['vedanidhi']
def get_logged_in_browser(headless=True):
"""Sometimes headless browser fails with selenium.common.exceptions.ElementClickInterceptedException: Message: element click intercepted . Then, non-headless browser works fine! Or can try https://stackoverflow.com/questions/48665001/can-not-click-on-a-element-elementclickinterceptedexception-in-splinter-selen """
browser = scraping.get_selenium_chrome(headless=headless)
browser.get("https://vaakya.vedanidhi.in/login/")
username = browser.find_element_by_id("username")
username.send_keys(site_configuration["user"])
browser.find_element_by_id("password").send_keys(site_configuration["pass"])
browser.find_element_by_id("submit_button").click()
browser.get("https://vaakya.vedanidhi.in/browse/?lang=En")
return browser
|
#! /usr/bin/env python2
'''
Register a mDNS/DNS-SD alias name for your computer using the Avahi daemon
This script will register an alternate CNAME alias besides your hostname,
which could be useful for ex. when serving several http virtual hosts to
your ffriends on the local network and you don't want to make them configure
their /etc/hosts.
Why a CNAME? You could also publish your current address with avahi-publish-address
but on a multihomed host (connected via wifi0 and eth0 perhaps) a single
address will not be valid on both networks. So this publishes a CNAME to your
hostname, which, by default, is already published by Avahi.
domain should almost always be .local
the cname is not restricted to ascii, it'll be encoded as IDNA
The alias will stay published until the script runs.
'''
import avahi, dbus
from encodings.idna import ToASCII
TTL = 60
# Got these from /usr/include/avahi-common/defs.h
CLASS_IN = 0x01
TYPE_CNAME = 0x05
def publish_cname(cname):
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
avahi.DBUS_INTERFACE_SERVER)
group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
if not u'.' in cname:
cname = cname + '.local'
cname = encode_cname(cname)
rdata = encode_rdata(server.GetHostNameFqdn())
rdata = avahi.string_to_byte_array(rdata)
group.AddRecord(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
cname, CLASS_IN, TYPE_CNAME, TTL, rdata)
group.Commit()
def encode_cname(name):
return '.'.join( ToASCII(p) for p in name.split('.') if p )
def encode_rdata(name):
def enc(part):
a = ToASCII(part)
return chr(len(a)), a
return ''.join( '%s%s' % enc(p) for p in name.split('.') if p ) + '\0'
if __name__ == '__main__':
import time, sys, locale
if len(sys.argv)<2:
script_name = sys.argv[0]
print "Usage: %s hostname.local [hostname2.local] [hostname3.local]" % script_name
sys.exit(1)
for each in sys.argv[1:]:
name = unicode(each, locale.getpreferredencoding())
publish_cname(name)
try:
while True: time.sleep(60)
except KeyboardInterrupt:
print "Exiting"
sys.exit(0)
|
default_app_config = 'workflow.apps.WorkflowAppConfig'
|
from twilio.rest import TwilioRestClient
# put your own credentials here
ACCOUNT_SID = 'ACCOUNT DATA'
AUTH_TOKEN = 'ACOUNT DATA'
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
client.messages.create(
to = 'TO_NUMBER',
from_ = 'FROM_NUMBER',
body = 'MESSAGE',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.