content stringlengths 5 1.05M |
|---|
import numpy as np
import pickle
import matplotlib.pyplot as plt
import random
from encoder import get_encoder_layer
from decoder import decoding_layer,process_decoder_input
import tensorflow as tf
import os
device_name = "/gpu:0"
#def corrupt_noise(traj, rate_noise, factor):
# new_traj={}
# for count, key in enumerate(traj):
# if count%500==0:
# print('count:',count)
# new_traj[key] = traj[key]
# for i in range(len(traj[key])):
# seed = random.random()
# if seed < rate_noise:
# #adding gauss noise
# for col in range(46):
# new_traj[key][i][col] = traj[key][i][col] + factor * random.gauss(0,1)
# return new_traj
#
#def corrupt_drop(traj, rate_drop):
# new_traj={}
# for count, key in enumerate(traj):
# if count%500==0:
# print('count:',count)
# new_traj[key] = traj[key]
# droprow = []
# for i in range(len(traj[key])):
# seed = random.random()
# if seed < rate_drop:
# #dropping
# droprow.append(i)
# new_traj[key] = np.delete(new_traj[key], droprow, axis = 0)
#
# return new_traj
def get_inputs():
'''
model inputs tensor
'''
embed_seq = tf.placeholder(tf.float32, [None, None, 20], name='embed_seq')
inputs = tf.placeholder(tf.int32, [None, None], name='inputs')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
# define target sequence length (target_sequence_length and source_sequence_length are used to paprameters of feed_dict)
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return embed_seq, inputs, targets, learning_rate, target_sequence_length, max_target_sequence_length, source_sequence_length
def seq2seq_model(embed_seq,
input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
source_vocab_size,
target_vocab_size,
encoder_embedding_size,
decoder_embedding_size,
rnn_size,
num_layers):
# get encoder output
_, encoder_state = get_encoder_layer(embed_seq,
input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
#tf.add_to_collection("encoder_state",encoder_state)
print(encoder_state)
print('Done encoder state')
# decoder input after Data_Preprocessing
decoder_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# state vector and input to decoder
training_decoder_output, predicting_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
encoder_state,
decoder_input,
batch_size)
return training_decoder_output, predicting_decoder_output
def pad_sentence_batch(sentence_batch, pad_int):
'''
batch completion,guarantee the same sequence_length
parameters:
- sentence batch
- pad_int: <PAD> respond to index
'''
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int, embed_mat):
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# complete sequence
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# record each length
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
embed_batch = np.array([[embed_mat[i] for i in psb] for psb in pad_sources_batch])
yield embed_batch, pad_targets_batch, pad_sources_batch, targets_lengths, source_lengths
def extract_character_vocab(data, UNIQUE_WORDS):
'''
build data mapping
'''
vocab_to_int = {}
int_to_vocab = {}
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
for plays in data:
for segs in plays:
if frozenset(segs) in UNIQUE_WORDS == False:
print('No this segment! please build it.')
else:
vocab_to_int[frozenset(segs)] = UNIQUE_WORDS[frozenset(segs)]
int_to_vocab[UNIQUE_WORDS[frozenset(segs)]] = frozenset(segs)
vocab_to_int['<PAD>'] = max(UNIQUE_WORDS.values()) + 1
vocab_to_int['<UNK>'] = max(UNIQUE_WORDS.values()) + 2
vocab_to_int['<GO>'] = max(UNIQUE_WORDS.values()) + 3
vocab_to_int['<EOS>'] = max(UNIQUE_WORDS.values()) + 4
int_to_vocab[max(UNIQUE_WORDS.values()) + 1] = '<PAD>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 2] = '<UNK>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 3] = '<GO>'
int_to_vocab[max(UNIQUE_WORDS.values()) + 4] = '<EOS>'
return int_to_vocab, vocab_to_int
def mapping_source_int(cor_ogm_train_data, UNIQUE_WORDS):
source_int = []
for plays in cor_ogm_train_data:
temp = []
for word in plays:
temp.append(UNIQUE_WORDS[frozenset(word)])
source_int.append(temp)
return source_int
def mapping_target_int(ogm_train_data, UNIQUE_WORDS):
target_int = []
for plays in ogm_train_data:
temp = []
for word in plays:
temp.append(UNIQUE_WORDS[frozenset(word)])
temp.append(target_letter_to_int['<EOS>'])
target_int.append(temp)
return target_int
if __name__ == '__main__':
TF_CONFIG_ = tf.ConfigProto()
TF_CONFIG_.gpu_options.allow_growth = True
sess = tf.Session(config=TF_CONFIG_)
print('autoencoder')
path1=r'TrainedData/'
#cor_ogm_train_data = ogm_train_data=pickle.load(open(path1+'drop_ogm_train_data', 'rb'), encoding='bytes') #for drop version
cor_ogm_train_data = ogm_train_data=pickle.load(open(path1+'noise_ogm_train_data', 'rb'), encoding='bytes') #for noise version
ogm_train_data=pickle.load(open(path1+'ogm_train_data', 'rb'), encoding='bytes')
embed_mat=pickle.load(open(path1+'embed_mat', 'rb'), encoding='bytes')
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
embed_mat = np.r_[embed_mat,np.random.rand(len(embed_mat[0])).reshape(1,-1)]
UNIQUE_WORDS=pickle.load(open(path1+'corpus', 'rb'), encoding='bytes')
source_int_to_letter, source_letter_to_int = extract_character_vocab(cor_ogm_train_data, UNIQUE_WORDS)
target_int_to_letter, target_letter_to_int = extract_character_vocab(ogm_train_data, UNIQUE_WORDS)
source_int = mapping_source_int(cor_ogm_train_data, UNIQUE_WORDS)
target_int = mapping_target_int(ogm_train_data, UNIQUE_WORDS)
#look transform
print('source', source_int[:5])
print('target', target_int[:5])
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 10
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 50
decoding_embedding_size = 50
# Learning Rate
learning_rate = 0.01
with tf.device(device_name):
#building graph
train_graph = tf.Graph()
with train_graph.as_default():
embed_seq, input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_inputs()
training_decoder_output, predicting_decoder_output = seq2seq_model(embed_seq,
input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
predicting_logits = tf.identity(predicting_decoder_output.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
print('done building graph')
# train and validation
train_source = source_int[batch_size:]
train_target = target_int[batch_size:]
# leave one batch for validation
valid_source = source_int[:batch_size]
valid_target = target_int[:batch_size]
(valid_embed_batch, valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'],
embed_mat))
display_step = 5
checkpoint = path1 + "model_1/trained_model.ckpt"
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
with tf.Session(graph=train_graph, config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (embed_batch, targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'],
embed_mat)):
_ , loss = sess.run(
[train_op, cost],
{embed_seq: embed_batch,
input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
if batch_i % display_step == 0:
# validation loss
validation_loss = sess.run(
[cost],
{embed_seq:valid_embed_batch,
input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# save model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved') |
from summarizer import commons, graph, keywords, pagerank_weighted, \
summarizer, syntactic_unit, textrank
|
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdgenomics.adam.adamContext import ADAMContext
from bdgenomics.adam.models import ReferenceRegion
from bdgenomics.adam.test import SparkTestCase
class ADAMContextTest(SparkTestCase):
def test_load_alignments(self):
testFile = self.resourceFile("small.sam")
ac = ADAMContext(self.ss)
reads = ac.loadAlignments(testFile)
self.assertEqual(reads.toDF().count(), 20)
self.assertEqual(reads._jvmDataset.jrdd().count(), 20)
def test_load_indexed_bam(self):
testFile = self.resourceFile("indexed_bams/sorted.bam")
ac = ADAMContext(self.ss)
reads = ac.loadIndexedBam(testFile,
[ReferenceRegion("chr2", 100, 101),
ReferenceRegion("3", 10, 17)])
self.assertEqual(reads.toDF().count(), 2)
def test_load_gtf(self):
testFile = self.resourceFile("Homo_sapiens.GRCh37.75.trun20.gtf")
ac = ADAMContext(self.ss)
reads = ac.loadFeatures(testFile)
self.assertEqual(reads.toDF().count(), 15)
self.assertEqual(reads._jvmDataset.jrdd().count(), 15)
def test_load_bed(self):
testFile = self.resourceFile("gencode.v7.annotation.trunc10.bed")
ac = ADAMContext(self.ss)
reads = ac.loadFeatures(testFile)
self.assertEqual(reads.toDF().count(), 10)
self.assertEqual(reads._jvmDataset.jrdd().count(), 10)
def test_load_narrowPeak(self):
testFile = self.resourceFile("wgEncodeOpenChromDnaseGm19238Pk.trunc10.narrowPeak")
ac = ADAMContext(self.ss)
reads = ac.loadFeatures(testFile)
self.assertEqual(reads.toDF().count(), 10)
self.assertEqual(reads._jvmDataset.jrdd().count(), 10)
def test_load_interval_list(self):
testFile = self.resourceFile("SeqCap_EZ_Exome_v3.hg19.interval_list")
ac = ADAMContext(self.ss)
reads = ac.loadFeatures(testFile)
self.assertEqual(reads.toDF().count(), 369)
self.assertEqual(reads._jvmDataset.jrdd().count(), 369)
def test_load_coverage(self):
testFile = self.resourceFile("sample_coverage.bed")
ac = ADAMContext(self.ss)
coverage = ac.loadCoverage(testFile)
self.assertEqual(coverage.toDF().count(), 3)
def test_load_genotypes(self):
testFile = self.resourceFile("small.vcf")
ac = ADAMContext(self.ss)
reads = ac.loadGenotypes(testFile)
self.assertEqual(reads.toDF().count(), 18)
self.assertEqual(reads._jvmDataset.jrdd().count(), 18)
def test_load_variants(self):
testFile = self.resourceFile("small.vcf")
ac = ADAMContext(self.ss)
reads = ac.loadVariants(testFile)
self.assertEqual(reads.toDF().count(), 6)
self.assertEqual(reads._jvmDataset.jrdd().count(), 6)
def test_load_slices(self):
testFile = self.resourceFile("HLA_DQB1_05_01_01_02.fa")
ac = ADAMContext(self.ss)
slices = ac.loadSlices(testFile, 10000)
self.assertEqual(slices.toDF().count(), 1)
self.assertEqual(slices._jvmDataset.jrdd().count(), 1)
def test_load_dna_sequences(self):
testFile = self.resourceFile("HLA_DQB1_05_01_01_02.fa")
ac = ADAMContext(self.ss)
sequences = ac.loadDnaSequences(testFile)
self.assertEqual(sequences.toDF().count(), 1)
self.assertEqual(sequences._jvmDataset.jrdd().count(), 1)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from franka_tut_reasoning.ontology_manager import TaskSemanticManager
from franka_tut_reasoning.main import Reasoning
from franka_tut_reasoning.grounding_action_server import GroundingActionServer
from franka_tut_reasoning.task_server import TaskActionServer
from franka_tut_msgs.msg import *
from franka_tut_msgs.srv import *
from std_msgs.msg import Empty
def select_planner_mode():
mode = rospy.get_param("~mode")
print "SELECTED MODE IS : {}".format(mode)
if (mode == "automated"):
from franka_tut_reasoning.planner import AutomatedPlanner
result = AutomatedPlanner()
elif (mode == "interactive"):
from franka_tut_reasoning.planner import InteractivePlanner
result = InteractivePlanner()
elif (mode == "collaborative"):
from franka_tut_reasoning.planner import CollaborativePlanner
result = CollaborativePlanner()
return result
if __name__ == '__main__':
try:
#rospy.init_node('reasoning_node', log_level=rospy.DEBUG)
rospy.init_node('reasoning_node', log_level=rospy.DEBUG)
knowledge_manager = TaskSemanticManager()
planner = select_planner_mode()
server = TaskActionServer('task_action')
reasoner = Reasoning(knowledge_manager, planner, server)
grounding_server = GroundingActionServer('grounding_action', knowledge_manager)
#Initialize subscribers
rospy.Subscriber("franka_gripper_feedback", Action, server.actuator_cb)
#rospy.Subscriber("franka_sensor_feedback", Action, server.sensor_feedback_cb)
rospy.Subscriber("processed_inputs", Instruction, reasoner.callback)
rospy.Subscriber("grounding_triple", Instruction, grounding_server.new_triple_cb)
rospy.Subscriber("end_grounding", Empty, grounding_server.grounding_end_cb)
rospy.Subscriber("grounding_skill_link", Instructions, grounding_server.relate_skills)
rospy.Subscriber("human_done", Empty, reasoner.human_callback)
rospy.spin()
except rospy.ROSInterruptException:
pass
|
#!/usr/bin/python
'''
This script collects the running times of the tests present in the current
directory.
It does not run the tests but reads the already present output files.
Usage:
python performance.py
Dependencies:
gnuplot http://www.gnuplot.info/
'''
import time
from subprocess import check_output
from subprocess import check_call
git_output = check_output(['git', 'show', 'HEAD'])
commit = git_output.split('\n')[0]
commit_id = time.strftime('%Y_%m_%d__%H_%M_%S')+'__'+commit[7:]
process = check_output(['grep', '^Runtime decision procedure', '-R'])
file_name = 'performance_'+commit_id+'.out'
print 'writing to file', file_name
with open(file_name, 'w') as f:
f.write(process)
print('drawing to file', file_name+'.png')
check_call(['gnuplot', '-e', 'file="'+file_name+'"', '-e',
'outputfile="'+file_name+'.png"', 'performance_draw.gp'])
|
# Generated by Django 2.2 on 2020-10-13 10:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='is_admin',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='account',
name='is_dealer',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='plotter',
name='dealer',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='owner',
to='accounts.Dealer'),
),
]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflowjs.converters import converter as tfjs_converter
DEFAULT_SCALE, DEFAULT_ZERO_POINT = 0, 0
def set_batch_size(model, batch_size):
"""Sets batch size for the model."""
for model_input in model.inputs:
new_shape = [batch_size] + model_input.shape[1:]
model_input.set_shape(new_shape)
def _create_temp_dir(convert_from_saved_model):
"""Creates temp dir, if True is given."""
if convert_from_saved_model:
return tempfile.TemporaryDirectory()
else:
return DummyContextManager()
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
def export_tflite(model,
tflite_filepath,
quantization_config=None,
convert_from_saved_model_tf2=False,
preprocess=None,
supported_ops=(tf.lite.OpsSet.TFLITE_BUILTINS,)):
"""Converts the retrained model to tflite format and saves it.
Args:
model: model to be converted to tflite.
tflite_filepath: File path to save tflite model.
quantization_config: Configuration for post-training quantization.
convert_from_saved_model_tf2: Convert to TFLite from saved_model in TF 2.x.
preprocess: A preprocess function to apply on the dataset.
# TODO(wangtz): Remove when preprocess is split off from CustomModel.
supported_ops: A list of supported ops in the converted TFLite file.
"""
if tflite_filepath is None:
raise ValueError(
"TFLite filepath couldn't be None when exporting to tflite.")
if compat.get_tf_behavior() == 1:
lite = tf.compat.v1.lite
else:
lite = tf.lite
convert_from_saved_model = (
compat.get_tf_behavior() == 1 or convert_from_saved_model_tf2)
with _create_temp_dir(convert_from_saved_model) as temp_dir_name:
if temp_dir_name:
save_path = os.path.join(temp_dir_name, 'saved_model')
model.save(save_path, include_optimizer=False, save_format='tf')
converter = lite.TFLiteConverter.from_saved_model(save_path)
else:
converter = lite.TFLiteConverter.from_keras_model(model)
if quantization_config:
converter = quantization_config.get_converter_with_quantization(
converter, preprocess)
converter.target_spec.supported_ops = supported_ops
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
def get_lite_runner(tflite_filepath, model_spec):
"""Gets `LiteRunner` from file path to TFLite model and `model_spec`."""
# Gets the functions to handle the input & output indexes if exists.
reorder_input_details_fn = None
if hasattr(model_spec, 'reorder_input_details'):
reorder_input_details_fn = model_spec.reorder_input_details
reorder_output_details_fn = None
if hasattr(model_spec, 'reorder_output_details'):
reorder_output_details_fn = model_spec.reorder_output_details
lite_runner = LiteRunner(tflite_filepath, reorder_input_details_fn,
reorder_output_details_fn)
return lite_runner
def _get_input_tensor(input_tensors, input_details, i):
"""Gets input tensor in `input_tensors` that maps `input_detail[i]`."""
if isinstance(input_tensors, dict):
# Gets the mapped input tensor.
input_detail = input_details[i]
for input_tensor_name, input_tensor in input_tensors.items():
if input_tensor_name in input_detail['name']:
return input_tensor
raise ValueError('Input tensors don\'t contains a tensor that mapped the '
'input detail %s' % str(input_detail))
else:
return input_tensors[i]
class LiteRunner(object):
"""Runs inference with the TFLite model."""
def __init__(self,
tflite_filepath,
reorder_input_details_fn=None,
reorder_output_details_fn=None):
"""Initializes Lite runner with tflite model file.
Args:
tflite_filepath: File path to the TFLite model.
reorder_input_details_fn: Function to reorder the input details to map the
order of keras model.
reorder_output_details_fn: Function to reorder the output details to map
the order of keras model.
"""
with tf.io.gfile.GFile(tflite_filepath, 'rb') as f:
tflite_model = f.read()
self.interpreter = tf.lite.Interpreter(model_content=tflite_model)
self.interpreter.allocate_tensors()
# Gets the indexed of the input tensors.
self.input_details = self.interpreter.get_input_details()
if reorder_input_details_fn is not None:
self.input_details = reorder_input_details_fn(self.input_details)
self.output_details = self.interpreter.get_output_details()
if reorder_output_details_fn is not None:
self.output_details = reorder_output_details_fn(self.output_details)
def run(self, input_tensors):
"""Runs inference with the TFLite model.
Args:
input_tensors: List / Dict of the input tensors of the TFLite model. The
order should be the same as the keras model if it's a list. It also
accepts tensor directly if the model has only 1 input.
Returns:
List of the output tensors for multi-output models, otherwise just
the output tensor. The order should be the same as the keras model.
"""
if not isinstance(input_tensors, list) and \
not isinstance(input_tensors, tuple) and \
not isinstance(input_tensors, dict):
input_tensors = [input_tensors]
interpreter = self.interpreter
for i, input_detail in enumerate(self.input_details):
input_tensor = _get_input_tensor(input_tensors, self.input_details, i)
if input_detail['quantization'] != (DEFAULT_SCALE, DEFAULT_ZERO_POINT):
# Quantize the input
scale, zero_point = input_detail['quantization']
input_tensor = input_tensor / scale + zero_point
input_tensor = np.array(input_tensor, dtype=input_detail['dtype'])
interpreter.set_tensor(input_detail['index'], input_tensor)
interpreter.invoke()
output_tensors = []
for output_detail in self.output_details:
output_tensor = interpreter.get_tensor(output_detail['index'])
if output_detail['quantization'] != (DEFAULT_SCALE, DEFAULT_ZERO_POINT):
# Dequantize the output
scale, zero_point = output_detail['quantization']
output_tensor = output_tensor.astype(np.float32)
output_tensor = (output_tensor - zero_point) * scale
output_tensors.append(output_tensor)
if len(output_tensors) == 1:
return output_tensors[0]
return output_tensors
def export_tfjs(keras_or_saved_model, output_dir, **kwargs):
"""Exports saved model to tfjs.
https://www.tensorflow.org/js/guide/conversion?hl=en
Args:
keras_or_saved_model: Keras or saved model.
output_dir: Output TF.js model dir.
**kwargs: Other options.
"""
# For Keras model, creates a saved model first in a temp dir. Otherwise,
# convert directly.
is_keras = isinstance(keras_or_saved_model, tf.keras.Model)
with _create_temp_dir(is_keras) as temp_dir_name:
if is_keras:
keras_or_saved_model.save(
temp_dir_name, include_optimizer=False, save_format='tf')
path = temp_dir_name
else:
path = keras_or_saved_model
tfjs_converter.dispatch_keras_saved_model_to_tensorflowjs_conversion(
path, output_dir, **kwargs)
def load_tfjs_keras_model(model_path):
"""Loads tfjs keras model from path."""
return tfjs_converter.keras_tfjs_loader.load_keras_model(
model_path, load_weights=True)
|
import pandas as pd
from gensim.models import Word2Vec
import os
import time
import datetime
def format_time(elapsed):
elapsed_rounded = int(round(elapsed))
return str(datetime.timedelta(seconds = elapsed_rounded))
train_data = pd.read_csv("../tianchi_datasets/track3_round1_newtrain3.tsv", sep="\t", header=None,
names=["sentence1", "sentence2", "labels"])
train_data["document"] = train_data["sentence1"].str.cat(train_data["sentence2"], sep = " [SEP] ")
test_data = pd.read_csv("../tianchi_datasets/track3_round1_testA.tsv", sep="\t", header=None,
names=["sentence1", "sentence2"])
test_data["document"] = test_data["sentence1"].str.cat(test_data["sentence2"], sep=" [SEP] ")
print("train Word2Vec model .......")
t0 = time.time()
all_data = pd.concat([train_data["document"], test_data["document"]])
path = "./Word2Vec"
model_name = "Word2Vec_400.model"
## size生成词向量维度,window预测窗口大小,min_count总频率低于此值的单词忽略,sg = 1代表使用skip_gram
model = Word2Vec([[word for word in document.split(" ")] for document in all_data.values],
size = 400, window = 5, iter = 10, workers=3, seed = 2021, min_count = 2, sg = 1)
##保存模型和词向量, 以二进制的格式保存
model.save(os.path.join(path, model_name)) #### 这样子保存的模型才可以继续训练
model.wv.save_word2vec_format(os.path.join(path, "Word2Vec_400.bin"), binary= True) ## 这样保存的只是词向量
train_time = format_time(time.time() - t0)
print(f"train time consume {train_time}") |
from discord.ext import commands
from app import errors
from app.classes.bot import Bot
from app.i18n import t_
async def not_disabled(ctx: commands.Context) -> bool:
if ctx.guild is None:
return True
if ctx.channel.permissions_for(ctx.message.author).manage_guild:
return True
guild = await ctx.bot.db.guilds.get(ctx.guild.id)
if not guild["allow_commands"]:
raise errors.AllCommandsDisabled()
name = ctx.command.qualified_name
if name in guild["disabled_commands"]:
raise errors.CommandDisabled(
t_(
"The command {0} has been disabled "
"by the moderators of this server."
).format(name)
)
return True
GLOBAL_CHECKS = [
not_disabled,
commands.bot_has_permissions(send_messages=True),
]
def setup(bot: Bot) -> None:
for check in GLOBAL_CHECKS:
bot.add_check(check)
|
#!/opt/mvsd/bin/python3
## this script will take asset tags in DeviceWipeAsset.csv and using an API call
## to Snipe retrieve the serial numbers for those devices and put them in a new
## CSV, DeviceWipeSerial.csv then it takes the serial numbers from
## DeviceWipeSerial.csv and issues an API command to WS1 (Airwatch) to wipe
## those devices then outputs the wiped device serial numbers to a file named
## %Y%m%d-%H%M%S_WipedDevices.csv
## no checks are in place, use at your own risk
## make sure you have the follow modules installed, then fill in your
## authentication information for bearer token, airwatch tenant code, and airwatch basic auth
import csv
from datetime import datetime
import requests
import credentials
SNIPEURL = "https://snipe.domain.com/api/v1/hardware/bytag/"
WS1URL = "https://awconsole.domain.com/api/mdm/devices/commands?command=DeviceWipe&reason="
SNIPEAPIHEADERS = credentials.snipeAPI
WS1HEADERS = credentials.ws1API
CSVHEADERS = [ 'serialnumber' ]
def get_today():
return datetime.now().strftime("%Y%m%d-%H%M%S")
filename = "%s_%s.%s" % (get_today() , "WipedDevices","csv")
with open('DeviceWipeAsset.csv', 'r') as infile, open("DeviceWipeSerial.csv", 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.get(SNIPEURL+lines['asset'], headers=SNIPEAPIHEADERS).json()
csv_writer.writerow([response['serial']])
with open('DeviceWipeSerial.csv', 'r') as infile, open (filename, 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.post(WS1URL+'mdm migration'+'&searchBy=Serialnumber&id='+lines['serialnumber'], headers=WS1HEADERS)
if response.status_code == 202:
csv_writer.writerow([lines['serialnumber'], 'OK'])
else:
csv_writer.writerow([lines['serialnumber'], 'X'])
|
import pytest
import time
from common.exceptions import PlenumTypeError, PlenumValueError
from stp_core.ratchet import Ratchet
from plenum.common.throttler import Throttler
def test_throttler_init_invalid_args():
for windowSize in (None, '5', [4]):
with pytest.raises(PlenumTypeError):
Throttler(windowSize)
for windowSize in (-1, 0):
with pytest.raises(PlenumValueError):
Throttler(windowSize)
def test_throttler_case1():
"""
Tests throttler with default delay function
"""
windowSize = 3
throttler = Throttler(windowSize)
testIterations = windowSize * 5
for i in range(testIterations):
hasAcquired, timeToWait = throttler.acquire()
if i % windowSize == 0:
assert hasAcquired
assert round(timeToWait) == 0
else:
assert not hasAcquired
assert windowSize - i % windowSize == round(timeToWait)
time.sleep(1)
def test_throttler_case2():
"""
Tests throttler with custom delay function
"""
windowSize = 10
testIterations = windowSize - 2
ratchet = Ratchet(a=2, b=0.05, c=1, base=2, peak=windowSize)
throttler = Throttler(windowSize, ratchet.get)
cooldowns = [time.sleep(1) or throttler.acquire()[1]
for i in range(testIterations)]
middle = len(cooldowns) // 2
firstIteration, secondIteration = cooldowns[:middle], cooldowns[middle:]
for a, b in zip(firstIteration, secondIteration):
if not a == b == 0:
assert b > a
|
import numpy as np
import torch
try:
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
except Exception:
print("ERROR: gym not loaded, this is okay when generating docs")
from .helpers import load_struct_from_dict
from .gym_wrapper import GymWrapper
class CartpoleWrapper(GymWrapper):
def __init__(self, gym_instance, asset_root='', sim_urdf='', asset_options='', init_state=None,
tensor_args={'device':torch.device('cpu'), 'dtype':torch.float32}, **kwargs):
super(CartpoleWrapper, self).__init__(gym_instance, asset_root, sim_urdf, asset_options,
init_state, tensor_args, **kwargs)
cam_pos = gymapi.Vec3(0, 10, 10)
cam_target = gymapi.Vec3(0, 2, 0)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
def _init_assets(self, env_ptr, asset_ptr):
dof_props = self.gym.get_actor_dof_properties(env_ptr, asset_ptr)
dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT
dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE
dof_props['stiffness'][:] = 0.0
dof_props['damping'][:] = 0.0
self.gym.set_actor_dof_properties(env_ptr, asset_ptr, dof_props)
#self.gym.set_rigid_body_color(env_ptr, asset_ptr, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.9, 0.6, 0.2))
#self.gym.set_rigid_body_color(env_ptr, asset_ptr, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.3, 0.5, 0.7))
#self.gym.set_rigid_body_color(env_ptr, asset_ptr, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.1, 0.1, 0.3))
def set_control(self, action, env_ids=torch.tensor([0])):
if env_ids is None:
env_ids = torch.from_numpy(np.arange(self.num_envs))
env_ids = env_ids.to(dtype=torch.long, device=self.device)
action = action.to(**self.tensor_args)
if action.shape[0] != env_ids.shape[0]*self.num_dof:
actions_tensor = torch.zeros(env_ids.shape[0]*self.num_dof, **self.tensor_args)
actions_tensor[::self.num_dof] = action.squeeze()
else:
actions_tensor = action
env_ids_int32 = gymtorch.unwrap_tensor(env_ids.to(dtype=torch.int32))
efforts = gymtorch.unwrap_tensor(actions_tensor)
self.gym.set_dof_actuation_force_tensor_indexed(self.sim,
efforts,
env_ids_int32,
env_ids_int32.shape[0])
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from data_loading import *
from rdkit import Chem
'''
the model
'''
class generative_model(nn.Module):
def __init__(self, vocabs_size, hidden_size, output_size, embedding_dimension, n_layers):
super(generative_model, self).__init__()
self.vocabs_size = vocabs_size
self.hidden_size = hidden_size
self.output_size = output_size
self.embedding_dimension = embedding_dimension
self.n_layers = n_layers
self.embedding = nn.Embedding(vocabs_size, embedding_dimension)
self.rnn = nn.LSTM(embedding_dimension, hidden_size, n_layers, dropout = 0.2)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
batch_size = input.size(0)
input = self.embedding(input)
output, hidden = self.rnn(input.view(1, batch_size, -1), hidden)
output = self.linear(output.view(batch_size, -1))
return output, hidden
def init_hidden(self, batch_size):
hidden=(Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)),
Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)))
return hidden
data,vocabs=load_data()
data = set(list(data))
vocabs = list(vocabs)
vocabs_size = len(vocabs)
output_size = len(vocabs)
batch_size = 128
cuda = True
hidden_size = 1024
embedding_dimension = 248
n_layers=3
end_token = ' '
model = generative_model(vocabs_size,hidden_size,output_size,embedding_dimension,n_layers)
model.load_state_dict(torch.load('mytraining.pt'))
if cuda:
model = model.cuda()
model.eval()
def evaluate(prime_str='!', temperature=0.4):
max_length = 200
inp = Variable(tensor_from_chars_list(prime_str,vocabs,cuda)).cuda()
batch_size = inp.size(0)
hidden = model.init_hidden(batch_size)
if cuda:
hidden = (hidden[0].cuda(), hidden[1].cuda())
predicted = prime_str
while True:
output, hidden = model(inp, hidden)
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
# Add predicted character to string and use as next input
predicted_char = vocabs[top_i]
if predicted_char ==end_token or len(predicted)>max_length:
return predicted
predicted += predicted_char
inp = Variable(tensor_from_chars_list(predicted_char,vocabs,cuda)).cuda()
return predicted
def valid_smile(smile):
return Chem.MolFromSmiles(smile) is not None
def get_canonical_smile(smile):
return Chem.MolToSmiles(Chem.MolFromSmiles(smile))
def valid_smiles_at_temp(temp):
range_test = 100
c=0
for i in range(range_test):
s= evaluate(prime_str='!', temperature=temp)[1:] # remove the first character !.
if valid_smile(s):
print(s)
c+=1
return float(c)/range_test
def smiles_in_db(smile):
smile = '!'+get_canonical_smile(smile)+' '
if smile in data:
return True
return False
def percentage_variety_of_valid_at_temp(temp):
range_test = 100
c_v=0
c_nd=0
for i in range(range_test):
s= evaluate(prime_str='!', temperature=temp)[1:] # remove the first character !.
if valid_smile(s):
c_v+=1
if not smiles_in_db(s):
c_nd+=1
return float(c_nd)/c_v
|
import os
CONFIG_FOLDER_PATH = os.path.dirname(__file__)
|
from django.conf import settings
def display_username(user: settings.AUTH_USER_MODEL) -> str:
return f"@{user.username}"
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
from io import StringIO
from unittest.mock import MagicMock, patch
import mycroft.configuration
import mycroft.stt
from mycroft.client.speech.listener import RecognizerLoop
from mycroft.util.log import LOG
from ovos_stt_plugin_vosk import VoskKaldiSTT
from test.util import base_config
class TestSTT(unittest.TestCase):
def test_factory(self):
config = {'module': 'mycroft',
'mycroft': {'uri': 'https://test.com'}}
stt = mycroft.stt.STTFactory.create(config)
self.assertEqual(type(stt), mycroft.stt.MycroftSTT)
config = {'stt': config}
stt = mycroft.stt.STTFactory.create(config)
self.assertEqual(type(stt), mycroft.stt.MycroftSTT)
@patch.object(mycroft.configuration.Configuration, 'get')
def test_factory_from_config(self, mock_get):
mycroft.stt.STTApi = MagicMock()
config = base_config()
config.merge(
{
'stt': {
'module': 'mycroft',
"fallback_module": "ovos-stt-plugin-vosk",
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
})
mock_get.return_value = config
stt = mycroft.stt.STTFactory.create()
self.assertEqual(type(stt), mycroft.stt.MycroftSTT)
@patch.object(mycroft.configuration.Configuration, 'get')
def test_mycroft_stt(self, mock_get):
mycroft.stt.STTApi = MagicMock()
config = base_config()
config.merge(
{
'stt': {
'module': 'mycroft',
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
})
mock_get.return_value = config
stt = mycroft.stt.MycroftSTT()
audio = MagicMock()
stt.execute(audio, 'en-us')
self.assertTrue(mycroft.stt.STTApi.called)
@patch.object(mycroft.configuration.Configuration, 'get')
def test_fallback_stt(self, mock_get):
config = base_config()
config.merge(
{
'stt': {
'module': 'mycroft',
"fallback_module": "ovos-stt-plugin-vosk",
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
})
mock_get.return_value = config
# check class matches
fallback_stt = RecognizerLoop.get_fallback_stt()
self.assertEqual(fallback_stt, VoskKaldiSTT)
@patch.object(mycroft.configuration.Configuration, 'get')
@patch.object(LOG, 'error')
@patch.object(LOG, 'warning')
def test_invalid_fallback_stt(self, mock_warn, mock_error, mock_get):
config = base_config()
config.merge(
{
'stt': {
'module': 'mycroft',
'fallback_module': 'invalid',
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
})
mock_get.return_value = config
fallback_stt = RecognizerLoop.get_fallback_stt()
self.assertIsNone(fallback_stt)
mock_warn.assert_called_with("Could not find plugin: invalid")
mock_error.assert_called_with("Failed to create fallback STT")
@patch.object(mycroft.configuration.Configuration, 'get')
@patch.object(LOG, 'error')
@patch.object(LOG, 'warning')
def test_fallback_stt_not_set(self, mock_warn, mock_error, mock_get):
config = base_config()
config.merge(
{
'stt': {
'module': 'mycroft',
'fallback_module': None,
'mycroft': {'uri': 'https://test.com'}
},
'lang': 'en-US'
})
mock_get.return_value = config
fallback_stt = RecognizerLoop.get_fallback_stt()
self.assertIsNone(fallback_stt)
mock_warn.assert_called_with("No fallback STT configured")
mock_error.assert_called_with("Failed to create fallback STT")
|
import json
import re
import os
import requests
import traceback
from datetime import datetime
from hashlib import md5
from pathlib import Path
from queue import Queue
from random import randint
from subprocess import call, PIPE
from threading import Thread, Lock
from time import sleep
from time import time
from types import SimpleNamespace
from typing import List, Tuple, Optional, Dict, Union, Callable, Any, Set, Iterable
from uuid import uuid4 as uuid
from dotenv import load_dotenv
from flask import Flask, Response, render_template, redirect, url_for, request
from routeros_api import RouterOsApiPool
from routeros_api.api import RouterOsApi
from gevent.pywsgi import WSGIServer
from flask_httpauth import HTTPBasicAuth
try:
import notification as notification_module
except ImportError:
notification_module = None
load_dotenv()
CachedRequestActiveClientsCache = List[Tuple[str, Optional[str], bool]]
CachedRequestNetUsageByIPCache = Dict[str, Tuple[int, int]]
RequestLimits = List[Tuple[str, str, float, float, Optional[int]]]
class CachedRequest(SimpleNamespace):
cache: Union[CachedRequestActiveClientsCache, CachedRequestNetUsageByIPCache]
nextRequestTime: float
nextRequestDelay: float
lock: Lock
CACHE: Dict[str, CachedRequest] = {
'clients': CachedRequest(
cache=[],
nextRequestTime=0.0,
nextRequestDelay=0.0,
lock=Lock()
),
'net-usage-by-ip': CachedRequest(
cache={},
nextRequestTime=0.0,
nextRequestDelay=0.0,
lock=Lock()
),
}
app = Flask(__name__, static_folder='static', template_folder='html')
auth = HTTPBasicAuth()
ROUTER_ADDRESS = os.getenv('ROUTER_ADDRESS')
LOCAL_NETWORK = os.getenv('LOCAL_NETWORK')
WEB_PORT = os.getenv('WEB_UI_PORT')
DoH_SERVER = os.getenv('AUTO_DoH_SERVER')
DNS_TRUSTED_SERVERS = os.getenv('TRUSTED_DNS_SERVERS')
FILE_ROUTER_LOG = Path(os.getenv('ROUTER_LOG')) if os.getenv('ROUTER_LOG') is not None else None
LOCK_ROUTER_LOG = Lock()
FILE_SELF_LOG = Path(os.getenv('LOG')) if os.getenv('LOG') is not None else None
DNS_MONITOR_DOMAINS_FILE = os.getenv('DNS_MONITOR_DOMAINS_FILE')
UI_USER: Optional[str] = os.getenv('UI_USER')
UI_PASSWORD: Optional[str] = os.getenv('UI_PASSWORD')
SELF_LOG_QUEUE = Queue(maxsize=2048)
def rt(data: any) -> Response:
return Response(json.dumps(data), mimetype='application/json')
def get_login_credentials() -> Optional[Tuple[str, str]]:
username = os.getenv('ROUTER_USER')
password = os.getenv('ROUTER_PASSWORD')
if not username or not password:
return None
return username, password
def get_api() -> Tuple[RouterOsApi, RouterOsApiPool]:
username, password = get_login_credentials()
conn = RouterOsApiPool(ROUTER_ADDRESS,
username=username,
password=password,
use_ssl=True,
ssl_verify=False,
plaintext_login=True)
return conn.get_api(), conn
def retry_on_error(f: Callable) -> Callable:
def i() -> Any:
while True:
# noinspection PyBroadException
try:
return f()
except Exception:
exc = traceback.format_exc()
log('[ERROR] Retrying')
log('[TRACEBACK]', exc.replace('\n', '\n '))
sleep(60)
return i
def ping(host: str) -> bool:
try:
return call(['ping', '-c', '3', host], timeout=300, stdout=PIPE, stdin=PIPE, stderr=PIPE) == 0
except TimeoutError:
return False
def is_dns_healthy() -> bool:
return (not ping("1.1.1.1") and not ping("8.8.8.8")) or ping(f"{uuid().hex}.local.devmonthor.eu")
def set_doh_enabled(enabled: bool, reset_after: Optional[int] = None) -> None:
"""
Enables/disables DoH
:param enabled: True if DoH should be enabled
:param reset_after: if not None, then opposite state is set after given seconds
:return: None
"""
api, conn = get_api()
if enabled:
api.get_resource('/ip/dns').call('set', arguments={
'use-doh-server': DoH_SERVER,
'verify-doh-cert': 'yes',
'servers': '' if DNS_TRUSTED_SERVERS is None else DNS_TRUSTED_SERVERS
})
else:
api.get_resource('/ip/dns').call('set', arguments={
'use-doh-server': '',
'servers': '1.1.1.1,1.0.0.1,8.8.8.8,8.4.4.8' if DNS_TRUSTED_SERVERS is None else DNS_TRUSTED_SERVERS
})
conn.disconnect()
if reset_after is not None:
def reset() -> None:
sleep(reset_after)
set_doh_enabled(not enabled)
Thread(target=reset).start()
def limit_get_names() -> Iterable[str]:
api, conn = get_api()
r = map(lambda x: x['name'], api.get_resource('/queue/simple').get())
conn.disconnect()
return r
def limit_remove(name: str) -> None:
api, conn = get_api()
limits = api.get_resource('/queue/simple')
limit_id = limits.get(name=name)[0]['id']
limits.remove(id=limit_id)
conn.disconnect()
def limit_add(name: str, target: str, upload: float, download: float) -> None:
"""
:param name: name of the queue
:param target: IP address, /32 is added
:param upload: in MiB
:param download: in MiB
:return: None
"""
for existing_limit_name in limit_get_names():
if existing_limit_name == name or existing_limit_name.startswith(f"_{target}"):
limit_remove(existing_limit_name)
break
api, conn = get_api()
api.get_resource('/queue/simple').call('add', arguments={
'name': name,
'target': f"{target}/32" if target != "EVERYONE" else LOCAL_NETWORK,
'max-limit': "%.2fM/%.2fM" % (upload * 8, download * 8)
})
conn.disconnect()
def limits_fetch() -> RequestLimits:
api, conn = get_api()
r: RequestLimits = []
for limit in api.get_resource('/queue/simple').get():
name: str = limit.get('name')
if not name.startswith('_'):
continue
_, target, timeout = name.split('_')
upload, download = limit.get('max-limit').split('/')
timeout = int(timeout) if timeout != 'EVER' else None
r.append((name, str(target), int(download) / 8000000, int(upload) / 8000000, timeout))
conn.disconnect()
return r
def log(*args) -> None:
date = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
line = ' '.join([str(x) for x in args])
line_offset = " " * (len(date) + 1)
line = line.replace('\n', '\n' + line_offset)
line = f"{date}: {line}"
print(line)
SELF_LOG_QUEUE.put(line)
@retry_on_error
def get_updates_available() -> bool:
api, conn = get_api()
res = api.get_resource('/system/package/update').call('check-for-updates')
conn.disconnect()
return 'available' in res[-1]['status'].lower()
@retry_on_error
def get_log() -> List[Dict[str, str]]:
api, conn = get_api()
res = api.get_resource('/log').get()
conn.disconnect()
return res
@retry_on_error
def get_sniffer_running() -> bool:
api, conn = get_api()
r = api.get_resource('/tool/sniffer').get()[0]['running'] == 'true'
conn.disconnect()
return r
@retry_on_error
def get_clients() -> CachedRequestActiveClientsCache:
api, conn = get_api()
res = api.get_resource('/ip/dhcp-server/lease').get()
conn.disconnect()
r: List[Tuple[str, Optional[str], bool]] = []
for client in res:
if client.get('address', '') == ROUTER_ADDRESS:
continue
if client.get('disabled', 'false') == 'true':
continue
r.append((client.get('address'), client.get('comment'), client.get('status', '') == 'bound'))
return r
@retry_on_error
def get_net_usage_by_ip() -> CachedRequestNetUsageByIPCache:
api, conn = get_api()
ip_speed: Dict[str, Tuple[int, int]] = {}
if not get_sniffer_running():
api.get_resource('/tool/sniffer').call('start')
packets = api.get_resource('/tool/sniffer/host').get()
conn.disconnect()
for packet in packets:
ip_from: str = packet.get('address', '')
speed: Tuple[str, str] = tuple(packet.get('rate', '0/0').split('/'))
if not ip_from.startswith('10.'):
continue
speed_down, speed_up = int(speed[0]) // (8 * 1024), int(speed[1]) // (8 * 1024)
if speed_up + speed_down == 0:
continue
ip_speed[ip_from] = (speed_down, speed_up)
router_ip = ROUTER_ADDRESS
server_ip = os.getenv('LOCAL_ADDRESS')
if router_ip in ip_speed and server_ip in ip_speed:
router_down, router_up = ip_speed[router_ip]
server_down, server_up = ip_speed[server_ip]
ip_speed[server_ip] = (max(0, server_down - router_up), max(0, server_up - router_down))
ip_speed[router_ip] = (max(0, router_up - server_down), max(0, router_down - server_up))
for ip in (server_ip, router_ip):
if sum(ip_speed[ip]) <= 0:
del ip_speed[ip]
return ip_speed
@auth.verify_password
def verify_password(username: str, password: str):
if UI_USER is not None and username != UI_USER:
return False
if UI_PASSWORD is not None and password != UI_PASSWORD:
return False
return True
@app.route('/')
@auth.login_required
def web_root() -> str:
return render_template('index.html', router_address=ROUTER_ADDRESS)
@app.route('/api/clients')
@auth.login_required
def api_clients() -> Response:
entry = CACHE['clients']
time_to_next_request = entry.nextRequestTime - time()
lock: Lock = entry.lock
if time_to_next_request < 0 and lock.acquire(blocking=False):
if entry.nextRequestTime and time_to_next_request < -5 * 60:
entry.nextRequestDelay = 30
else:
entry.nextRequestDelay = min(entry.nextRequestDelay + 0.2 + randint(0, 10) / 10, 60)
entry.nextRequestTime = int(time()) + entry.nextRequestDelay
def job():
try:
entry.cache = get_clients()
finally:
lock.release()
Thread(target=job, daemon=True).start()
return rt(entry.cache)
@app.route('/api/net-usage-by-ip')
@auth.login_required
def api_net_usage_by_ip() -> Response:
entry = CACHE['net-usage-by-ip']
time_to_next_request = entry.nextRequestTime - time()
lock: Lock = entry.lock
if time_to_next_request < 0 and lock.acquire(blocking=False):
if entry.nextRequestTime and time_to_next_request < -5 * 60:
entry.nextRequestDelay = 5
else:
entry.nextRequestDelay = min(entry.nextRequestDelay + 0.5 + randint(0, 20) / 10, 30)
entry.nextRequestTime = int(time()) + entry.nextRequestDelay
def job():
try:
entry.cache = get_net_usage_by_ip()
finally:
lock.release()
Thread(target=job, daemon=True).start()
return rt(entry.cache)
@app.route('/api/new-limit', methods=['POST'])
@auth.login_required
def api_new_limit() -> Response:
target = request.form.get('target')
if not target:
return rt({'error': 'No target specified'})
upload = max(float(request.form.get('upload')), 0.1)
download = max(float(request.form.get('download')), 0.1)
until_date = request.form.get('date')
until_time = request.form.get('time')
if not until_date and not until_time:
ttl = 'EVER'
elif not until_time and until_date:
ttl = str(int(datetime.strptime(until_date, '%Y-%m-%d').timestamp()))
elif until_time and not until_date:
hours, minutes = until_time.split(':')
ttl = str(int(datetime.now().replace(hour=0, minute=0).timestamp() + (int(hours) * 3600) + (int(minutes) * 60)))
else:
ttl = str(int(datetime.strptime(f"{until_date} {until_time}", '%Y-%m-%d %H:%M').timestamp()))
limit_add(f"_{target}_{ttl}", target, upload, download)
return redirect(request.referrer if request.referrer else url_for('web_root'))
@app.route('/api/limit-remove', methods=['POST'])
@auth.login_required
def api_limit_remove() -> Response:
name = request.form.get('name')
assert name
limit_remove(name)
return redirect(request.referrer if request.referrer else url_for('web_root'))
@app.route('/api/limits')
@auth.login_required
def api_limits() -> Response:
return rt(limits_fetch())
def send_notification(msg: str) -> bool:
if notification_module is None:
return False
return notification_module.send_notification(msg)
@retry_on_error
def thread_stop_sniffer() -> None:
while True:
if CACHE['net-usage-by-ip'].nextRequestTime > 0 and \
CACHE['net-usage-by-ip'].nextRequestTime - time() < -600 and get_sniffer_running():
api, conn = get_api()
api.get_resource('/tool/sniffer').call('stop')
conn.disconnect()
sleep((5 + randint(0, 10)) * 60)
@retry_on_error
def thread_check_updates() -> None:
while True:
if get_updates_available():
send_notification('Router updates available')
sleep((24 + randint(0, 24)) * 3600)
@retry_on_error
def thread_notif_logged_errors() -> None:
message_hashes: Set[str] = set()
first_load = True
while True:
message_hashes_curr: Set[str] = set()
for rec in get_log():
rec_time: str = rec.get('time', '')
rec_message: str = rec.get('message', '')
rec_hash_input = (rec_message + (rec_time if ' ' not in rec_time else rec_time.split(' ', 1)[1]))
rec_hash: str = md5(rec_hash_input.encode('utf8')).hexdigest()
rec_id = int(rec.get('id', '*-1')[1:], 16)
message_hashes_curr.add(rec_hash)
if rec_hash in message_hashes or first_load:
continue
topics: List[str] = rec.get('topics', '').split(',')
if FILE_ROUTER_LOG:
with LOCK_ROUTER_LOG:
try:
with FILE_ROUTER_LOG.open('a') as f:
rec_log_data = {'timestamp': int(time())}
rec_log_data.update(rec)
f.write(json.dumps(rec_log_data) + '\n')
except PermissionError:
log('[FATAL] [LOG] cannot write log to a file')
if 'error' not in topics:
continue
if 'DoH server connection error: ' in rec_message:
# disable DoH until it works again
log("[DoH]", f"error in log - {rec_message}")
set_doh_enabled(False, 5 * 60 + randint(0, 120))
continue
message = f"Router error {rec_id} @ {rec_time}: {rec_message}"
log("[LOG]", message)
send_notification(message)
message_hashes = message_hashes_curr
first_load = False
sleep(600 + randint(0, 600))
@retry_on_error
def thread_test_dns() -> None:
while True:
if not is_dns_healthy():
log('[DNS HEALTH] Restoring DNS')
set_doh_enabled(False)
sleep(5 * 60)
if not is_dns_healthy():
sleep(30)
continue
set_doh_enabled(True)
sleep(30)
@retry_on_error
def thread_check_cpu() -> None:
while True:
# noinspection HttpUrlsUsage
html = requests.get(f'http://{ROUTER_ADDRESS}/graphs/cpu/', timeout=60).text
for r in re.finditer(r'Max:\s+[0-9]+%;\s+Average:\s+[0-9]+%;\s+Current:\s+([0-9]+)%', html, re.I):
current_usage = int(r.group(1))
if current_usage > 65:
msg = f"High router CPU usage ({current_usage}%)"
log("[CPU]", msg)
send_notification(msg)
break
sleep(5 * 60 + randint(30, 50))
@retry_on_error
def thread_remove_old_limits() -> None:
while True:
limits_to_remove: List[str] = []
for limit in limits_fetch():
limit_name = limit[0]
limit_timeout = limit[4]
if limit_timeout and limit_timeout < time():
limits_to_remove.append(limit_name)
for limit_name in limits_to_remove:
limit_remove(limit_name)
sleep(60 + randint(30, 50))
@retry_on_error
def thread_write_log() -> None:
if not FILE_SELF_LOG:
return
while True:
line = SELF_LOG_QUEUE.get()
try:
with FILE_SELF_LOG.open('a') as f:
f.write(line + '\n')
except PermissionError:
print(f'[LOG] Fatal: Cannot access log file "{FILE_SELF_LOG}"')
@retry_on_error
def thread_monitor_dns() -> None:
if DNS_MONITOR_DOMAINS_FILE is None:
return
file_bad_domains = Path(DNS_MONITOR_DOMAINS_FILE)
filtered_bad_domains: Set[str] = set()
with file_bad_domains.open('r') as f:
filtered_bad_domains.update(map(lambda x: x.strip(), f.readlines()))
seen_bad_domains_last: Set[str] = set()
while True:
api, conn = get_api()
cache = api.get_resource('/ip/dns/cache').get()
seen_bad_domains_now: Set[str] = set()
for record in cache:
name: str = record['name']
data: str = record['data']
for bad_domain in filtered_bad_domains:
if bad_domain in name or bad_domain in data:
seen_bad_domains_now.add(bad_domain)
if bad_domain in seen_bad_domains_last:
continue
message = f"[DNS MONITOR] Bad domain accessed '{name}' -> '{data}'"
log(message)
send_notification(message)
conn.disconnect()
seen_bad_domains_last = seen_bad_domains_now
sleep(5 * 60 + randint(0, 280))
def main() -> int:
log("[MAIN] starting up")
if not get_login_credentials():
log("[MAIN] Error: Login credentials are missing!")
return 1
if not WEB_PORT or not LOCAL_NETWORK or not ROUTER_ADDRESS:
log("[MAIN] Error: Some required settings are missing")
return 1
Thread(target=thread_notif_logged_errors, daemon=True).start()
Thread(target=thread_check_updates, daemon=True).start()
Thread(target=thread_stop_sniffer, daemon=True).start()
Thread(target=thread_check_cpu, daemon=True).start()
Thread(target=thread_write_log, daemon=True).start()
Thread(target=thread_remove_old_limits, daemon=True).start()
Thread(target=thread_monitor_dns, daemon=True).start()
if DoH_SERVER is not None:
set_doh_enabled(True)
Thread(target=thread_test_dns, daemon=True).start()
log(f"[MAIN] Starting web server @ http://127.0.0.1:{WEB_PORT}")
http_server = WSGIServer(('', int(WEB_PORT)), app)
try:
http_server.serve_forever()
except KeyboardInterrupt:
log("[MAIN] Shutting down")
return 0
if __name__ == '__main__':
exit(main())
|
import configparser
import os
import unittest
import json
from joplin_api import JoplinApi
from jong_toolkit.core import JongToolKitCollector, JongToolKitImporter
class TestStringMethods(unittest.TestCase):
def setUp(self):
current_folder = os.path.dirname(__file__)
self.config = configparser.ConfigParser()
self.config.read(os.path.join(current_folder, 'settings.ini'))
def test_config(self):
self.assertTrue('JOPLIN_CONFIG' in self.config)
self.assertTrue('JOPLIN_DEFAULT_TAG' in self.config['JOPLIN_CONFIG'])
self.assertTrue('PYPANDOC_MARKDOWN' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_IMPORT_FOLDER' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_PROFILE_PATH' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_DEFAULT_FOLDER' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_WEBCLIPPER' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_WEBCLIPPER_TOKEN' in self.config['JOPLIN_CONFIG'])
self.assertTrue('JOPLIN_BIN_PATH' in self.config['JOPLIN_CONFIG'])
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_DEFAULT_TAG']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['PYPANDOC_MARKDOWN']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_IMPORT_FOLDER']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_PROFILE_PATH']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_DEFAULT_FOLDER']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_WEBCLIPPER']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_WEBCLIPPER_TOKEN']) is str)
self.assertTrue(type(self.config['JOPLIN_CONFIG']['JOPLIN_BIN_PATH']) is str)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
# vim: set fileencoding=utf-8 :
# Version 1.1.1
from os import path, readlink, remove, walk
from re import escape, match, sub
from subprocess import call
from sys import argv, exit
from uuid import uuid4 as uuid
from hashlib import md5
try:
from multiprocessing import Queue, Lock, Pool, cpu_count, current_process
from time import sleep
mp = True
except ModuleNotFoundError:
print('No multiprocessing module found, parallel processing disabled.')
mp = False
old_args = ''
if not(len(argv) == 7) and not(len(argv) == 8):
old_args = '\nYou gave:\n ' + ' '.join(argv)
argv[1:] = ['-h']
if ('-h' in argv) or ('--help' in argv):
print('This script is to be used in this way:')
print(argv[0] + ' /source/name/space/path /source/path [user@]destination.server[:port]'
' /destination/name/space/path /destination/path user:group [number of threads]' + old_args)
exit(0)
s_ns = argv[1]
src = argv[2]
src_id = md5((src + str(uuid())).encode('utf-8')).hexdigest()
d_server = argv[3]
d_ns = argv[4]
dest = argv[5]
fileog = argv[6]
dtransfers = 1
illegals = []
sync_dirs_only = ['--include=*/', '--exclude=*']
usermatch = r'[a-z][a-z0-9\\\-]+'
if len(argv) == 8 and mp and match(r'^\d+$', str(argv[7])):
mp_threads = int(argv[7])
elif mp:
mp_threads = int(cpu_count() * 2)
else:
mp_threads = 1
if mp_threads < 2:
mp = False
if not match(usermatch + ':' + usermatch, fileog):
exit(fileog + ' is not a valid user:group definition.')
if ':' in list(d_server):
d_server, port = d_server.split(':')
else:
port = '22'
if '@' in list(d_server):
user, d_server = d_server.split('@')
else:
user = 'root'
def ssh(socket_id=src_id):
return [
'/usr/bin/ssh',
'-o', 'ControlMaster=auto',
'-o', 'ControlPath=/dev/shm/.xrd-drain-' + socket_id + '.socket',
'-o', 'ControlPersist=1200',
'-o', 'Compression=no',
'-x',
'-T',
'-p', port,
'-l', user
]
def sshf(socket_id_f=src_id):
return ' '.join(ssh(socket_id_f)) + ' '
class noop(object):
"""Does nothing by design"""
def __init__(*args):
pass
def __enter__(*args):
pass
def __exit__(*args):
pass
def cdnf(cdir):
"""This check if parameter is existing dir or it fails script"""
if not path.isdir(cdir):
exit(cdir + ' should be directory but it is not!')
return
def flatten(s):
"""Flattens list recursively"""
if s == []:
return s
if isinstance(s[0], list):
return flatten(s[0]) + flatten(s[1:])
return s[:1] + flatten(s[1:])
def explode(filestring, start='/'):
"""Returns all dirs on a way from "start" to last dir (like in dirname)"""
members = []
while not match(start + '/*$', filestring + '/'):
members.append(filestring)
filestring = path.dirname(filestring)
return members
def rds(string):
"""Removes // from string"""
return sub('//', '/', string)
def rsync(cmd, rsocket_name=src_id):
"""Call rsync with given arguments (Pass only options to this function)"""
cmd = flatten(['/usr/bin/rsync', '-a', '-e', sshf(rsocket_name), cmd])
return call(cmd)
def migrate(lin, fil, m_transfers, ilock, worker_id):
"""""'This migrates data and link to new destination"""
# Create destination 'addresses'
d_file = rds(sub(escape(src), dest + '/', fil))
d_link = rds(sub(escape(s_ns), d_ns + '/', lin))
cmd = [rds(fil), rds(d_server + ':/' + d_file)]
d_filedir = path.dirname(d_file)
d_linkdir = path.dirname(d_link)
# get all dest dirs up to d_ns and dest
d_linkdir_members = ' '.join(explode(d_linkdir, d_ns))
d_filedir_members = ' '.join(explode(d_filedir, dest))
if mp:
# separate ssh socket for each worker
socket_name = src_id + '-' + worker_id
# Sleep during first 110% of mp_threads transfers up to ~10 seconds to not to overwhelm the destinations sshd
if int(m_transfers) < (mp_threads + max((int(mp_threads/10)),1)):
sleep(int(m_transfers)/max((int(mp_threads/10), 1)))
else:
socket_name = src_id
# create directory structure on destination
if call(flatten([ssh(socket_name), d_server, '/bin/mkdir -p ' + d_filedir + ' ' + d_linkdir])) == 0:
# Rsync data file
if rsync(cmd, socket_name) == 0:
# Create link on destination and set owner:group
if call(flatten([ssh(socket_name), d_server, '/bin/ln -sf ' + d_file + ' ' + d_link + ' && /bin/chown -h '
+ fileog + ' ' + d_link + ' ' + d_filedir_members
+ ' ' + d_linkdir_members])) == 0:
# Remove source data
remove(lin)
remove(fil)
with ilock:
print('Done migrating file %s: %s' % (m_transfers.rjust(10), lin))
else:
with ilock:
# Link failure
print('Failed to create link ' + d_server + ':' + d_link + ' or to set permissions! File: '
+ m_transfers.rjust(10) + ': ' + lin)
else:
with ilock:
# Data failure
print('Failed to copy file: ' + fil + ' to: ' + d_server + ':' + d_file + '. File: '
+ m_transfers.rjust(10) + ': ' + lin)
else:
with ilock:
print('Failed to create directories: ' + d_filedir + ' and/or ' + d_linkdir + '. File: '
+ m_transfers.rjust(10) + ': ' + lin)
def mp_process(mp_queue, mp_iolock):
worker_id = '{0:0>4}'.format(int(current_process().name.split('-')[1]))
while True:
mp_link, mp_file, mp_number = mp_queue.get()
if mp_link is None or mp_file is None or mp_number is None:
break
migrate(mp_link, mp_file, mp_number, mp_iolock, worker_id)
def clean_empty_dirs(directory_to_clean):
"""Finds (depth first) all empty dirs and deletes them"""
call(['/bin/find', directory_to_clean, '-mindepth', '1', '-type', 'd', '-empty', '-delete'])
if __name__ == '__main__':
# Check if name space and source are dirs
cdnf(s_ns)
cdnf(src)
testfile = '/.xrd-drain-testfile_55c4e792761ddeb2dca627ffadca546f82359' + src_id
testfile = [d_ns + testfile, dest + testfile]
for f in testfile:
returncode = call(flatten([ssh(), d_server, '/bin/touch ' + f + ' && /bin/chown ' + fileog
+ ' ' + f + ' && /bin/rm -f ' + f]))
if returncode != 0:
exit('Writing of test files to ' + d_ns + ' and '
+ dest + ' failed!\n Is ' + fileog + ' defined on ' + d_server + '?')
if mp:
# Setup the multiprocess pool and queue
m_queue = Queue(maxsize=mp_threads)
iolock = Lock()
pool = Pool(mp_threads, initializer=mp_process, initargs=(m_queue, iolock))
else:
m_queue = None
pool = None
iolock = noop()
# Find all valid links and corresponding files
for root, dirs, files in walk(s_ns, topdown=False):
for filename in files:
# Create file name
filepath = path.join(root, filename)
# Check if it is link
if path.islink(filepath):
# Get link target
target = readlink(filepath)
# Check if link address is absolute
if not path.isabs(target):
# Create absolute link address
target = path.abspath(path.join(path.dirname(filepath), target))
# Select only links belonging to src
if match(escape(src), target):
if not path.exists(target):
# Delete all matching dead links
remove(filepath)
else:
# Migrate all data
with iolock:
print('Start migrating file %s: %s' % (str(dtransfers).rjust(10), filepath))
if mp:
m_queue.put((filepath, target, str(dtransfers)))
else:
migrate(filepath, target, str(dtransfers), iolock, src_id)
dtransfers += 1
else:
# Add to illegal files if file is not a link
illegals.append(filepath)
if mp:
# Finish and close queues
for _ in range(mp_threads):
m_queue.put((None, None, None))
pool.close()
pool.join()
print('Data migration done')
# Count all illegals
icount = len(illegals)
if icount > 0:
d = ''
# Ask what to do about all illegal files
# Ignore it with 'q'
while d != 'q' or d != 'Q':
print('Found %d illegal (not links) entries in namespace.\nWhat would you like to do about it?' % icount)
d = input('(D)elete entries\n(L)ist entries\n(Q)uit and do nothing about it\n')
# Delete illegals
if d == 'D' or d == 'd':
for f in illegals:
remove(f)
print('Illegal entries were deleted.')
break
# List all illegal files
elif d == 'L' or d == 'l':
print(illegals)
elif d == 'Q' or d == 'q':
break
else:
print('Unknown choice "' + d + '"!')
# Clean all empty dirs in src and s_ns
print('Cleaning empty directories in ' + src + ' and ' + s_ns)
clean_empty_dirs(src)
clean_empty_dirs(s_ns)
print('Migration finished')
exit(0)
|
from aiopg.sa import create_engine
from sqlalchemy import (
MetaData, Table, Column, ForeignKey,
Integer, String, Date
)
meta = MetaData()
question = Table(
'question', meta,
Column('id', Integer, primary_key=True),
Column('question_text', String(200), nullable=False),
Column('pub_date', Date, nullable=False)
)
choice = Table(
'choice', meta,
Column('id', Integer, primary_key=True),
Column('choice_text', String(200), nullable=False),
Column('votes', Integer, server_default="0", nullable=False),
Column('question_id',
Integer,
ForeignKey('question.id', ondelete='CASCADE'))
)
async def init_pg(app):
conf = app['config']['postgres']
engine = await create_engine(
database=conf['database'],
user=conf['user'],
password=conf['password'],
host=conf['host'],
port=conf['port'],
minsize=conf['minsize'],
maxsize=conf['maxsize'],
)
app['db'] = engine
async def close_pg(app):
app['db'].close()
await app['db'].wait_closed()
|
mapping = {
"sf": "snowfall_m",
"sp": "pressure_pa",
"t2m": "temperature_k",
"tp": "precipitation_m",
"u10": "wind_ms"
}
era5_renamed = era5.rename(mapping)
era5_renamed |
from threading import Lock
global bot, cryptos_json, CMC_API_KEY, log_lock
log_lock = Lock()
def initilize(telebot, cryptos_json_other, cmc_api_key):
global bot, cryptos_json, CMC_API_KEY
bot = telebot
cryptos_json = cryptos_json_other
CMC_API_KEY = cmc_api_key
|
import pytest
class TestClassAddition():
@pytest.mark.parametrize(
"params,expected",
[
[
{
'a': 3,
'b': 3
},
9
]
]
)
def test_return(self, params, expected):
import tests.example_module as example_module
assert example_module.Arithmetic().addition(params['a'], params['b']) == expected
class TestClassDivision():
@pytest.mark.parametrize(
"params,expected",
[
[
{
'a': 3,
'b': 3
},
9
]
]
)
def test_return(self, params, expected):
import tests.example_module as example_module
assert example_module.Arithmetic().division(params['a'], params['b']) == expected
class TestClassMultiplication():
@pytest.mark.parametrize(
"params,expected",
[
[
{
'a': 3,
'b': 3
},
9
]
]
)
def test_return(self, params, expected):
import tests.example_module as example_module
assert example_module.Arithmetic().multiplication(params['a'], params['b']) == expected
class TestClassSubtraction():
@pytest.mark.parametrize(
"params,expected",
[
[
{
'a': 3,
'b': 3
},
9
]
]
)
def test_return(self, params, expected):
import tests.example_module as example_module
assert example_module.Arithmetic().subtraction(params['a'], params['b']) == expected
|
#!/usr/bin/env python
import sys
from cbapi.response.models import Alert
from cbapi.example_helpers import build_cli_parser, get_cb_response_object
import time
def main():
parser = build_cli_parser("Bulk resolve alerts")
parser.add_argument("--query", action="store", default="", required=True,
help="The query string of alerts to resolve. All matching alerts will be resolved.")
args = parser.parse_args()
cb = get_cb_response_object(args)
alert_query = cb.select(Alert).where("-status:Resolved")
alert_query = alert_query.where(args.query)
alert_count = len(alert_query)
if alert_count > 0:
print("Resolving {0:d} alerts...".format(len(alert_query)))
alert_query.change_status("Resolved")
print("Waiting for alert changes to take effect...")
time.sleep(25)
print("Complete. Resolved {0:d} alerts.".format(alert_count))
else:
print("Congratulations! You have no unresolved alerts!")
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-01-20 06:51
# @Author : Your Name (you@example.org)
# @Link : https://www.kaggle.com/uysimty/keras-cnn-dog-or-cat-classification
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import random
import os
import pickle
import tensorflow as tf
from keras import backend as K
from model import simple_CNN
def auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
# data path
TRAIN_DATA_DIR = "./data/train/"
MODEL_SAVES_DIR = "./models-simpleCNN/"
# constants
IF_FAST_RUN = True
EPOCHS_OVER_NIGHT = 50
IMAGE_WIDTH = IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
BATCH_SIZE = 15
def main():
""" Dir """
if not os.path.exists(MODEL_SAVES_DIR):
os.mkdir(MODEL_SAVES_DIR)
""" Create Model """
model_type = "simpleCNN"
model = simple_CNN(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)
model.summary()
print(model_type)
print("Continuing training...")
# model_ckpt = "model-" + model_type + ".h5"
model_ckpt = os.path.join(MODEL_SAVES_DIR, "model_24-val_acc-0.7852.h5")
if os.path.isfile(model_ckpt):
print("loading existed model...")
model.load_weights(model_ckpt)
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
earlystop = EarlyStopping(patience=10)
learning_rate_reduction = ReduceLROnPlateau(monitor="val_acc",
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
filename = "model_{epoch:02d}-val_acc-{val_acc:.4f}.h5"
checkpoint = ModelCheckpoint(
filepath=os.path.join(MODEL_SAVES_DIR, filename), monitor="val_acc", verbose=1, period=1)
callbacks = [learning_rate_reduction, checkpoint]
"""Prepare Data Frame"""
filenames = os.listdir(TRAIN_DATA_DIR)
categories = []
for filename in filenames:
category = filename.split('.')[0]
if category == 'donkey': # donkey 1
categories.append(1)
else: # rabbit 0
categories.append(0)
df = pd.DataFrame({
'filename': filenames,
'category': categories
})
print(df.head())
print(df.tail())
# df['category'].value_counts().plot.bar()
# plt.show()
"""Sample Image"""
# sample = random.choice(filenames)
# image = load_img("./data/train/"+sample)
# plt.imshow(image)
# plt.show()
"""Prepare data"""
df["category"] = df["category"].replace({0: 'rabbit', 1: 'donkey'})
""" 这里用来自动划分 train 集和 val 集 """
train_df, validate_df = train_test_split(
df, test_size=0.20, random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
# train_df['category'].value_counts().plot.bar()
total_train = train_df.shape[0]
total_validate = validate_df.shape[0]
"""Traning Generator"""
train_datagen = ImageDataGenerator(
rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1
)
train_generator = train_datagen.flow_from_dataframe(
train_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True
)
"""Validation Generator"""
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_dataframe(
validate_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True
)
"""Example Generation"""
example_df = train_df.sample(n=1).reset_index(drop=True)
example_generator = train_datagen.flow_from_dataframe(
example_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical'
)
"""Example Generation Ploting"""
# plt.figure(figsize=(12, 12))
# for i in range(0, 15):
# plt.subplot(5, 3, i+1)
# for X_batch, Y_batch in example_generator:
# image = X_batch[0]
# plt.imshow(image)
# break
# plt.tight_layout()
# plt.show()
"""Fit Model"""
epochs = 3 if IF_FAST_RUN else EPOCHS_OVER_NIGHT
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//BATCH_SIZE,
steps_per_epoch=total_train//BATCH_SIZE,
callbacks=callbacks
)
print("Save history")
with open('./history', 'wb') as pickle_file:
pickle.dump(history.history, pickle_file)
print("Save model...")
model.save_weights("model-" + model_type + ".h5")
print("Visualize training...")
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.plot(history.history['loss'], color='b', label="Training loss")
ax1.plot(history.history['val_loss'], color='r', label="validation loss")
ax1.set_xticks(np.arange(1, epochs, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(history.history['acc'], color='b', label="Training accuracy")
ax2.plot(history.history['val_acc'], color='r',
label="Validation accuracy")
ax2.set_xticks(np.arange(1, epochs, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
# TODO plot.save
plt.show()
if __name__ == "__main__":
main()
|
import numpy as np
# dictionary describing options available to tune this algorithm
options = {
"peak_size": {"purpose": "Estimate of the peak size, in pixels. If 'auto', attempts to determine automatically. Otherwise, this should be an integer.",
"default": "auto",
"type": "int",
"has_auto": True},
"refine_positions": {"purpose": "TODO",
"default": False,
"type": "bool"},
"progress_object": {"purpose": "Object used to present a progress bar to the user. For definition, see UI_interface folder.",
"default": None},
}
def run(data):
# TODO: need to actually implement this peak finder.
return np.zeros((4,2)) |
"""This module contains the general information for FirmwareActivity ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareActivityConsts:
SERVERS_POWER_STATE_NONE = "none"
SERVERS_POWER_STATE_OFF = "off"
SERVERS_POWER_STATE_OFF_NOWAIT = "off-nowait"
SERVERS_POWER_STATE_ON = "on"
class FirmwareActivity(ManagedObject):
"""This is FirmwareActivity class."""
consts = FirmwareActivityConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareActivity", "firmwareActivity", "fw-activity", VersionMeta.Version251a, "InputOutput", 0x1f, [], ["admin"], [u'equipmentChassis'], [], [None])
prop_meta = {
"activity_trigger_time": MoPropertyMeta("activity_trigger_time", "activityTriggerTime", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"chassis_comp_in_activation_dn": MoPropertyMeta("chassis_comp_in_activation_dn", "chassisCompInActivationDn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version251a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"server_comp_in_activation_dn": MoPropertyMeta("server_comp_in_activation_dn", "serverCompInActivationDn", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"servers_power_state": MoPropertyMeta("servers_power_state", "serversPowerState", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["none", "off", "off-nowait", "on"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version251a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"upgrade_priority_info": MoPropertyMeta("upgrade_priority_info", "upgradePriorityInfo", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|cmc-update|cmc-activate|board-controller|storage-controller|chassis-adaptor-update|chassis-adaptor-activate|cmc-right-update|cmc-right-activate|chassis-adaptor-right-update|chassis-adaptor-right-activate|sas-expander-update|sas-expander-activate|sas-expander-right-update|sas-expander-right-activate|board-controller-right),){0,15}(none|cmc-update|cmc-activate|board-controller|storage-controller|chassis-adaptor-update|chassis-adaptor-activate|cmc-right-update|cmc-right-activate|chassis-adaptor-right-update|chassis-adaptor-right-activate|sas-expander-update|sas-expander-activate|sas-expander-right-update|sas-expander-right-activate|board-controller-right){0,1}""", [], []),
}
prop_map = {
"activityTriggerTime": "activity_trigger_time",
"chassisCompInActivationDn": "chassis_comp_in_activation_dn",
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"serverCompInActivationDn": "server_comp_in_activation_dn",
"serversPowerState": "servers_power_state",
"status": "status",
"upgradePriorityInfo": "upgrade_priority_info",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.activity_trigger_time = None
self.chassis_comp_in_activation_dn = None
self.child_action = None
self.sacl = None
self.server_comp_in_activation_dn = None
self.servers_power_state = None
self.status = None
self.upgrade_priority_info = None
ManagedObject.__init__(self, "FirmwareActivity", parent_mo_or_dn, **kwargs)
|
'''
Given a string A consisting of lowercase characters.
We need to tell minimum characters to be appended (insertion at end) to make the string A a palindrome.
Example Input
Input 1:
A = "abede"
Input 2:
A = "aabb"
Example Output
Output 1:
2
Output 2:
2
'''
def min_append_to_palindrome(A: str) -> int:
len_A = len(A)
for i in range(len_A):
temp_str = A[i:]
if temp_str == temp_str[::-1]:
return len_A - (len_A - i)
return 1
if __name__ == "__main__":
A = "aabb"
print(min_append_to_palindrome(A)) |
import os
try:
import catboost_dev as catboost
from catboost_dev import CatBoost, CatBoostRegressor, Pool
except:
import catboost
from catboost import CatBoost, CatBoostRegressor, Pool
## CatBoost tutorial: Categorical features parameters
#
#*Mastering the parameters you didn't know exist*
#
#|  |
#|:--:|
#| *Categorical feature example: cat's face shape* |
#
#
#CatBoost is an open-sourced gradient boosting library. One of the differences between CatBoost and other gradient boosting libraries is its advanced processing of the categorical features (in fact "Cat" in the package name stands not for a 🐱 but for "CATegorical").
#
#CatBoost deals with the categorical data quite well out-of-the-box. However, it also has a huge number of training parameters, which provide fine control over the categorical features preprocessing. In this tutorial, we are going to learn how to use these parameters for the greater good. The tutorial is split into the following sections:
#
#1. **Introduction: categorical features in machine learning**
#2. **Categorical features processing in CatBoost**
#3. **Experiment: How the categorical features settings affect accuracy in predicting the prices of the old cars**.
#
## 1. Introduction: categorical features in machine learning
#**Сategorical feature** is a feature that has a discrete set of values called *categories* that are *not comparable* by < or > to each other. In real-world datasets, we quite often deal with categorical data. The cardinality of a categorical feature, i.e. the number of different values that the feature can take varies drastically among features and datasets -- from just a few to thousands and millions of distinct values. The values of a categorical feature can be distributed almost uniformly and there might be values with a frequency different by the orders of magnitude. To be used in gradient boosting categorical features need to be transformed to some form that can be handeled by a decision tree, for example to numbers. In the next section, we are going to briefly go through the most popular in machine learning methods of transforming categorical features values into numbers.
#Standard approaches to categorical features preprocessing
#* **One-hot Encoding** consists in creating a binary feature for each category. The main problem of the method is that features with huge cardinalities (such as user id for example) lead to a huge number of features.
#* **Label Encoding** maps each category, i.e. value that a categorical feature can take into a random number. Does it not make a terrible amount of sense? It does not work very well in practice either.
#* **Hash Encoding** converts string type features into a fixed dimension vector using a hash function.
#* **Frequency Encoding** consists in replacing categorical feature values with the frequency of the category in the dataset.
#* **Target Encoding** replaces the values of the categorical feature with a number that is calculated from the distribution of the target values for that particular value of the categorical variable. The most straightforward approach sometimes referred to as **Greedy Target Encoding** is to use the mean value of target on the objects belonging to the category. However, this method leads to target leakage and overfitting. One possible solution to these problems is **Holdout Target Encoding** -- one part of the training dataset is used to compute the target statistics for each category, and the training is performed on the rest of the training data. It solves the target leakage problem but requires us to sacrifice part of our precious training data. For this reason, the most popular in practice solutions are **K-Fold Target Encoding** and **Leave-One-Out Target Encoding**. The idea behind K-Fold Target Encoding is very similar to K-Fold Cross Validation -- we split the training data into several folds in each fold we replace the categorical feature values with the target statistics for the category calculated on the other folds. Leave-One-Out Target Encoding is a special case of K-Fold Encoding where K is equal to the length of training data. K-Fold Encoding and Leave-One-Out Target Encoding can also lead to overfitting. Consider the following example: in a training dataset, we have a single categorical feature with a single value and 5 objects of class 0 and 6 objects of class 1. Obviously feature that has only one possible value is useless, however, if we use Leave-One-Out Target Encoding with mean function for all the objects of class 0 the feature value will be encoded into 0.6 while for all the objects of class 1 the feature encoding value will be 0.5. This will allow a decision tree classifier to choose a split at 0.55 and achieve 100% accuracy on the training set.
#
## 2. Categorical features processing in CatBoost
#CatBoost supports some traditional methods of categorical data preprocessing, such as One-hot Encoding and Frequency Encoding. However one of the signatures of this package is its original solution for categorical features encoding.
#
#The core idea behind CatBoost categorical features preprocessing is **Ordered Target Encoding**: a random permutation of the dataset is performed and then target encoding of some type (for example just computing mean of the target for objects of this category) is performed on each example *using only the objects that are placed before the current object*.
#
#Generally transforming categorical features to numerical features in CatBoost includes the following steps:
#
#1. **Permutation** of the training objects in random order.
#2. **Quantization** i.e. converting the target value from a floating point to an integer depending on the task type:
# * Classification - Possible values for target value are “0” (doesn't belong to the specified target class) and “1” (belongs to the specified target class).
# * Multiclassification - The target values are integer identifiers of target classes (starting from “0”).
# * Regression - Quantization is performed on the label value. **The mode and number of buckets are set in the starting parameters**. All values located inside a single bucket are assigned a label value class – an integer in the range defined by the formula: <bucket ID – 1>.
#3. **Encoding** the categorical feature values.
#
#CatBoost creates four permutations of the training objects and for each permutation, a separate model is trained. Three models are used for the tree structure selection and the fourth is used to compute the leaves values of the final model that we save. At each iteration one of the three models is chosen randomly; this model is used to choose the new tree structure and to calculate the leaves values for all the four models.
#
#Using several models for tree structure selection enhances the robustness of the categorical features encoding. If in one permutation an object is close to the beginning of the dataset and the statistics for encoding are calculated on a small number of objects in the other two permutations it may be closer to the end of the dataset and many objects will be used to compute the statistics.
#
#Another important point is that **CatBoost can create new categorical features combining the existing ones**. And it will actually do so unless you explicitly tell it not to :) Treatment of the original features and the created features can be controlled separately by the settings `simple_ctr` and `combinations_ctr` respectively (we will talk about them in detail).
#
## 3. Categorical features parameters in practice: the old cars prices prediction
#
#
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import KFold
### Dataset
#
#For the experiments in this tutorial, we are going to use https://www.kaggle.com/lepchenkov/usedcarscatalog
#
#This dataset consists of the old cars descriptions and their characteristics - both numerical, such as mileage, production year, etc and categorical, such as color, manufacturer name, model name, etc.
#
#Our goal is to solve the *regression* task, i.e. to predict the price of an old car.
os.system("wget https://gist.githubusercontent.com/garkavem/2613ca62c9ace338b217eccc99fe1f6f/raw/981dae5111bb2a30e20dca888a99020d5c4f5899/gistfile1.txt -O cars.csv")
df = pd.read_csv('cars.csv')
df
categorical_features_names = ['manufacturer_name', 'model_name', 'transmission', 'color', 'engine_fuel',
'engine_type', 'body_type', 'state', 'drivetrain','location_region']
#Lets see how many unique values each categorical variable has:
df[categorical_features_names].nunique()
#Here is the target value distribution:
sns.distplot(df.price_usd.values)
np.median(df.price_usd.values)
#First, we are going to roughly estimate the number of trees and the learning rate required that are sufficient for this task.
df_ = df.sample(frac=1., random_state=0)
df_train = df_.iloc[: 2 * len(df) // 3]
df_test = df_.iloc[2 * len(df) // 3 :]
train_pool = Pool(df_train.drop(['price_usd'], 1),
label=df_train.price_usd,
cat_features=categorical_features_names)
test_pool = Pool(df_test.drop(['price_usd'], 1),
label=df_test.price_usd,
cat_features=categorical_features_names)
model = CatBoostRegressor(custom_metric= ['R2', 'RMSE'], learning_rate=0.95, n_estimators=100)
model.fit(train_pool, eval_set=test_pool, verbose=500, plot=True)
#Now we are going to write a simple function that tests CatBoost performance on 3-fold cross-validation given the parameters and returns the full list of parameters for the last model. Optionally this function compares the model's metrics with the results of the model trained with the default categorical features parameters.
#
#We will fix the number of estimators at 4500 and the learning rate at 0.1.
kf = KFold(n_splits=3, shuffle=True)
DEFAULT_PARAMETERS = {'n_estimators' : 100, 'learning_rate' : 0.95}
DEFAULT_MODEL_METRICS = {}
def score_catboost_model(catboost_parameters, update_defaults=False):
r2_values = []
rmse_values = []
catboost_parameters.update(DEFAULT_PARAMETERS)
for train_index, test_index in kf.split(df):
train_pool = Pool(df.iloc[train_index].drop(['price_usd'], 1),
label=df.iloc[train_index].price_usd,
cat_features=categorical_features_names)
test_pool = Pool(df.iloc[test_index].drop(['price_usd'], 1),
label=df.iloc[test_index].price_usd,
cat_features=categorical_features_names)
model = CatBoost(catboost_parameters)
model.fit(train_pool, verbose=False)
r2_values.append(r2_score(df.iloc[test_index].price_usd.values, model.predict(test_pool)))
rmse_values.append(mean_squared_error(df.iloc[test_index].price_usd.values,
model.predict(test_pool),
squared=False))
if update_defaults:
DEFAULT_MODEL_METRICS['R2'] = np.mean(r2_values)
DEFAULT_MODEL_METRICS['RMSE'] = np.mean(rmse_values)
print('R2 score: {:.4f}({:.4f})'.format(np.mean(r2_values), np.std(r2_values)))
print('RMSE score: {:.0f}({:.0f})'.format(np.mean(rmse_values), np.std(rmse_values)))
else:
DEFAULT_MODEL_R2 = DEFAULT_MODEL_METRICS['R2']
DEFAULT_MODEL_RMSE = DEFAULT_MODEL_METRICS['RMSE']
r2_change = 100 * (np.mean(r2_values) - DEFAULT_MODEL_R2) / DEFAULT_MODEL_R2
rmse_change = 100 * (np.mean(rmse_values) - DEFAULT_MODEL_RMSE) / DEFAULT_MODEL_RMSE
print('R2 score: {:.4f}({:.4f}) {:+.1f}% compared to default parameters'.format(
np.mean(r2_values), np.std(r2_values), r2_change))
print('RMSE score: {:.0f}({:.0f}) {:+.1f}% compared to default parameters'.format(
np.mean(rmse_values), np.std(rmse_values), rmse_change))
return model.get_all_params()
## Categorical features encoding parameters in CatBoost
#
#The amount of parameters related to categorical features processing in CatBoost is overwhelming. Here is a hopefully the full list:
#* `one_hot_max_size` (int) - use one-hot encoding for all categorical features with a number of different values less than or equal to the given parameter value. No complex encoding is performed for such features. *Default for regression task is 2.*
#* `model_size_reg` (float from 0 to inf) - The model size regularization coefficient. The larger the value, the smaller the model size. Refer to the Model size regularization coefficient section for details. This regularization is needed only for models with categorical features (other models are small). Models with categorical features might weight tens of gigabytes or more if categorical features have a lot of values. If the value of the regularizer differs from zero, then the usage of categorical features or feature combinations with a lot of values has a penalty, so fewer of them are used in the resulting model. *Default value is 0.5*
#* `max_ctr_complexity` - The maximum number of features that can be combined. Each resulting combination consists of one or more categorical features and can optionally contain binary features in the following form: “numeric feature > value”. *For regression task on CPU the default value is 4.*
#* `has_time` (bool) - if `true`, the 1-st step of categorical features processing, permutation, is not performed. Useful when the objects in your dataset are ordered by time. For our dataset, we don't need it. *Default value is* `False`
#* `simple_ctr` - Quantization settings for simple categorical features.
#* `combinations_ctr` - Quantization settings for combinations of categorical features.
#* `per_feature_ctr` - Per-feature quantization settings for categorical features.
#* `counter_calc_method` determines whether to use validation dataset(provided through parameter `eval_set` of `fit` method) to estimate categories frequencies with `Counter`. By default, it is `Full` and the objects from validation dataset are used; Pass `SkipTest` value to ignore the objects from the validation set
#* `ctr_target_border_count` - The maximum number of borders to use in target quantization for categorical features that need it. *Default for regression task is 1.*
#* `ctr_leaf_count_limit` - The maximum number of leaves with categorical features. *Default value is None i.e. no limit.*
#* `store_all_simple_ctr` - If the previous parameter `ctr_leaf_count_limit` at some point gradient boosting tree can no longer make splits by categorical features. With *Default value* `False` the limitation applies both to original categorical features and the features, that CatBoost creates by combining different features. If this parameter is set to `True` only the number of splits made on combination features is limited.
#
#The three parameters `simple_ctr`, `combinations_ctr`, and `per_feature_ctr` are complex parameters that control the second and the third steps of categorical features processing. We will talk about them more in the next sections.
#
### Default parameters
#First, we test the out-of-the-box CatBoost categorical features processing.
last_model_params = score_catboost_model({}, True)
#We will save the metrics of the model with the default categorical features parameters for the further comparison.
### One-Hot Encoding Max Size
#The first thing we try is to make CatBoost use one-hot encoding for all our categorical features (the max categorical feature cardinality in our dataset is 1118 < 2000). The documentation says, that for the features for which one-hot encoding is used no other encodings are computed.
#
#*Default value is:*
#* N/A if training is performed on CPU in Pairwise scoring mode
#* 255 if training is performed on GPU and the selected Ctr types require target data that is not available during the training
#* 10 if training is performed in Ranking mode
#* 2 if none of the conditions above is met
model_params = score_catboost_model({'one_hot_max_size' : 2000})
#As can be seen on our dataset it works quite well. The main problem of one-hot encoding, however, is that it is simply impossible to use it on categorical features with really huge cardinality.
### Model Size Regularization
#*This parameter influences the model size if training data has categorical features.*
#
#*The information regarding categorical features makes a great contribution to the final size of the model. The mapping from the categorical feature value hash to some statistic values is stored for each categorical feature that is used in the model. The size of this mapping for a particular feature depends on the number of unique values that this feature takes.*
#
#*Therefore, the potential weight of a categorical feature can be taken into account in the final model when choosing a split in a tree to reduce the final size of the model. When choosing the best split, all split scores are calculated and then the split with the best score is chosen. But before choosing the split with the best score, all scores change according to the following formula:*
#
#
#
#*$s^{new}$ is the new score for the split by some categorical feature or combination feature,
#$s^{old}$ is the old score for the split by the feature, $u$ is the number of unique values of the feature, $U$ is the maximum of all $u$ values among all features and $M$ is the value of the `model_size_reg` parameter.*
#
#This regularization works slightly differently on GPU: feature combinations are regularized more aggressively than on CPU. For CPU cost of a combination is equal to number of different feature values in this combinations that are present in training dataset. On GPU cost of a combination is equal to number of all possible different values of this combination. For example, if combination contains two categorical features c1 and c2, then the cost will be #categories in c1 * #categories in c2, even though many of the values from this combination might not be present in the dataset.
#
#Let us try to set model size regularization coefficient to 0 - thus we allow our model to use as many categorical features and its combinations as it wants.
model_params = score_catboost_model({'model_size_reg': 0})
model_params = score_catboost_model({'model_size_reg': 1})
#To check how the size of the model is affected by this setting we will write a function that given parameters dict will train a model, save it in a file and return the model's weight:
from pathlib import Path
def weight_model(catboost_parameters):
catboost_parameters.update(DEFAULT_PARAMETERS)
model = CatBoost(catboost_parameters)
model.fit(train_pool, verbose=False)
model.save_model('model_tmp')
model_size = Path('model_tmp').stat().st_size
return model_size
model_size_reg_0 = weight_model({'model_size_reg': 0})
model_size_reg_1 = weight_model({'model_size_reg': 1})
model_size_reg_0/model_size_reg_1
#As we can see the model with the strong regularization is almost 13 times smaller than the model without regularization.
### Number of Features Combined
#
#**Feature combinations**: Note that any combination of several categorical features could be considered
#as a new one. For example, assume that the task is music recommendation and we have two categorical
#features: user ID and musical genre. Some user prefers, say, rock music. When we convert user ID
#and musical genre to numerical features we loose this information. A combination of two features solves this problem and gives a new powerful feature. However, the number of combinations grows exponentially with the number of categorical features in dataset and it is not possible to consider all of them in the algorithm. When constructing a new split for the current tree, CatBoost considers combinations in a greedy way. No combinations are considered for the first split in the tree. For the next splits CatBoost combines all combinations and categorical features present in current tree with all categorical features in dataset. Combination values are converted to numbers on the fly. CatBoost also generates combinations of numerical and categorical features in the following way: all the splits selected in the tree are considered as categorical with two values and used in
#combinations in the same way as categorical ones.
#
#*The maximum number of features that can be combined. Each resulting combination consists of one or more categorical features and can optionally contain binary features in the following form: “numeric feature > value”. For regression task on CPU the default value is 4.*
#
#Although it is not mentioned in the documentation, this parameter value has to be $\le 15$.
model_params = score_catboost_model({'max_ctr_complexity': 6})
model_params = score_catboost_model({'max_ctr_complexity': 0})
#As we can see on our dataset the difference in the model's accuracy is not significant. To check how the size of the model is affected we will use our function that weights a model.
model_size_max_ctr_6 = weight_model({'max_ctr_complexity': 6})
model_size_max_ctr_0 = weight_model({'max_ctr_complexity': 0})
model_size_max_ctr_6/model_size_max_ctr_0
#As can be seen, the model that can combine up to 6 features weights 6 times more than the model that does not combine features at all.
### Has Time
#
#With this setting on we do not perform random permutations during the Transforming categorical features to numerical. This might be useful when the objects of our dataset are already ordered by time. If a Timestamp type column is present in the input data it is used to determine the order of objects.
model_params = score_catboost_model({'has_time': True})
### `simple_ctr` and `combinations_ctr`
#
#Both `simple_ctr` and `combinations_ctr` are complex parameters that provide regulation of the categorical features encodings types. While `simple_ctr` is responsible for processing the categorical features initially present in the dataset, `combinations_ctr` affects the encoding of the new features, that CatBoost creates by combining the existing features. The available methods of encodings and possible values of `simple_ctr` and `combinations_ctr` are the same, so we are not going to look at them separately. But of course, you can always tune them separately on your task!
#
#### Encodings without target quantization
#
#**Target quantization** is transforming *float* target values to *int* target values using some borders. We will first consider the target encoding methods that do not require such a transformation.
#
##### FloatTargetMeanValue (GPU only)
#The first option *FloatTargetMeanValue* is the most straightforward approach. Each value of the categorical variable is replaced with the mean of the target over the objects of the same category that are placed before the current object.
model_params = score_catboost_model({'simple_ctr' : 'FloatTargetMeanValue',
'combinations_ctr' : 'FloatTargetMeanValue',
'task_type' : 'GPU'})
##### FeatureFreq (GPU only)
#The second option is *FeatureFreq*. The categorical feature values are replaced with the frequencies of the category in the dataset. Again only the objects placed before the current objects are used.
model_params = score_catboost_model({'simple_ctr' : 'FeatureFreq',
'combinations_ctr' : 'FeatureFreq',
'task_type' : 'GPU'})
##### Counter
#
#`Counter` method is very similar to the traditional Frequency Encoding described in the introduction and is defined by the following formula:
#
#$curCount$ is the number of objects of the current category, $maxCount$ is the number of objects of the most frequent category and $prior$ is a number defined by the parameter `prior`.
model_params = score_catboost_model({'simple_ctr' : 'Counter', 'combinations_ctr' : 'Counter'})
##### `CtrBorderCount` parameter
#
#Let us say we have calculated encodings for our categorical variable. These encodings are floats and they are comparable: in case of `Counter` the larger encoding value corresponds to the more frequent category. However, if we have a large number of categories the difference between close categories encodings may be caused by noise and we do not want our model to differentiate between close categories. For this reason we transform our float encoding into int encoding $i \in [0, l]$. By default `CtrBorderCount=15` setting means that $l=14(15-1)$. We can try to use bigger value:
model_params = score_catboost_model({'combinations_ctr':
['Counter:CtrBorderCount=40:Prior=0.5/1'],
'simple_ctr':
['Counter:CtrBorderCount=40:Prior=0.5/1']})
##### Binarized Target Mean Value
#The second method `BinarizedTargetMeanValue` is very similar to target encoding, except that instead of the sum over the exact target values we use the sum of the values of the beans. Which corresponds to the following formula:
#
#
#
#where:
#* countInClass is the ratio of the sum of the label value integers for this categorical feature to the maximum label value integer ().
#* totalCount is the total number of objects that have a feature value matching the current one.
#* prior is a number (constant) defined by the starting parameters.
model_params = score_catboost_model({'combinations_ctr': 'BinarizedTargetMeanValue',
'simple_ctr': 'BinarizedTargetMeanValue'})
#While using the `BinarizedTargetMeanValue` method we can also finetune `Prior` and `CtrBorderCount`(the number of borders for quantization the category feature encoding). By default `CtrBorderCount`=15 and 0, 0.5 and 1 `Prior` values are used to build three different encodings.
### Encodings with Target Quantization
#### Buckets and Borders
#
#
#Now we proceed to the settings of the encodings methods that require target quantization. The first choice is `Borders` vs. `Buckets`. The difference between the two is pretty simple. Both are described by the following formula:
#
#for $i \in [0, k-1]$ in case of `Borders` and for $i \in [0, k-1]$ in case of `Buckets`:
#
#
#where $k$ is the number of borders regulated by parameter `TargetBorderCount`, $totalCount$ is the number of objects of the same category. $prior$ is defined by the parameter $prior$. The only difference is that for `Borders` $countInClass$ is the number of the objects of the category with the discretized target value **greater** than $i$ while for `Buckets` $countInClass$ is the number of the objects of the category with the discretized target value **equal** to $i$.
#
#
#
#Let us see a small example: we have objects of two categories shown as suns and moons. We will compute the categorical feature encodings in case of borders and buckets.
#Borders:
#We have two borders(which corresponds to `TargetBorderCount=2`), so we need to calculate 2 encodings. Let us say our Prior is 0.5
#* Border k=0: there are 2 objects of category sun and 4 objects of category moon with target values greater than the border. In total there are 3 suns and 5 moons. So our encodings are:
#
#$encoding^{0}_{sun} = \frac{2 + 0.5}{3} = 0.83 $ and $encoding^{0}_{moon} = \frac{4 + 0.5}{5} = 0.9 $
#
#* Border k=1:
#
#$ encoding^{1}_{sun} = \frac{0 + 0.5}{3} = 0.17 $ and $ encoding^{1}_{moon} = \frac{3 + 0.5}{5} = 0.7 $
#
#Buckets:
#$i \in [0, k]$ creates $k+1$ buckets. So the same value of `TargetBorderCount=2` creates more features from each categorical feature if we choose `Buckets`.
#* Bucket k=0: there is 1 object of category sun and 1 object of category moon with target values within Bucket k=0. In total there are 3 suns and 5 moons. So our encodings are:
#
#$encoding^{0}_{sun} = \frac{1 + 0.5}{3} = 0.5 $ and $encoding^{0}_{moon} = \frac{1 + 0.5}{5} = 0.3 $
#
#* Bucket k=1:
#
#$ encoding^{1}_{sun} = \frac{2 + 0.5}{3} = 0.83 $ and $ encoding^{1}_{moon} = \frac{1 + 0.5}{5} = 0.3 $
#
#* Bucket k=2:
#
#$ encoding^{1}_{sun} = \frac{0 + 0.5}{3} = 0.17 $ and $ encoding^{1}_{moon} = \frac{3 + 0.5}{5} = 0.7 $
#| Categorical feature value | Borders features | | Buckets features | | |
#|---|---|---|---|---|--|
#| | | | | | |
#| sun | 0.83 | 0.17 | 0.5 | 0.83 | 0.17 |
#| moon | 0.9 | 0.7 | 0.3 | 0.3 | 0.7|
#
#
#*Important note! This example just serves to illustrate the difference between `Borders` and `Buckets` and the whole dataset is used to compute $countInClass$ and $totalCount$. In reality, CatBoost us only the objects placed before the current object are used.*
#
#Let us see if it makes any difference in practice:
model_params = score_catboost_model({'combinations_ctr': 'Borders',
'simple_ctr': 'Borders'})
model_params = score_catboost_model({'combinations_ctr': 'Buckets',
'simple_ctr': 'Buckets'})
#An attentive reader may remember that by default CatBoost creates some features using `Borders` splits and also some features using `Counter` method. When we explicitly pass the `Borders` option, `Counter` method is not used.
#
#Generally, it is recommended to use `Borders` for the regression task and `Buckets` for the multiclassification task.
#### Treatment of missing values and new categories
#
#1. What happens if there is a **new category in the test set** that never appeared in the training set? The answer is, that since $𝑐𝑜𝑢𝑛𝑡𝐼𝑛𝐶𝑙𝑎𝑠𝑠$ is equal to zero, the prior is used to compute the encoding:
#
#$$ctr_i=\frac{prior}{totalCount + 1}$$
#
#2. Meanwhile, missing values in the categorical feature are replaced with `"None"` string. Then all the objects with the missing feature value are treated as a new category.
#### Number of target borders
#
#The number of borders or buckets is can be controlled with the `TargetBorderCount` parameter. By default we have only one border, let us see if having more borders helps:
model_params = score_catboost_model({'combinations_ctr': 'Borders:TargetBorderCount=4',
'simple_ctr': 'Borders:TargetBorderCount=4'})
#### Default value of `simple_ctr` and `combinations_ctr`
#
#By default, CatBoost uses several encoding techniques to encode each categorical feature.
#
#
#* First it uses `Borders` method with one target border `TargetBorderCount`=1 (in our example for each categorical feature we just want to see if it makes the car more expensive). The obtained float encodings are further discretized into `CtrBorderCount`=15 different values. Three values of `Prior` parameter are used to create 3 three different encodings: `Prior=0/1:Prior=0.5/1:Prior=1/1`
#
#* Also for each categorical feature, we create an encoding with `Counter` method. The number of categorical encoding value borders `CtrBorderCount` is also equal to 15, and only one value of `Prior=0/1` is used.
#
#We can always check the parameters used by our model with `get_all_params()` method.
last_model_params = score_catboost_model({}, True)
last_model_params['simple_ctr']
last_model_params['combinations_ctr']
### Individual features control via `per_feature_ctr` parameter
#
#The next thing I would like to talk about in this tutorial is using different encoding methods for different features with the parameter `per_feature_ctr`. It might be useful in cases when you know that one of your features is more important than the others. We can, for example, increase the number of target borders for model_name feature:
model_params = score_catboost_model({'per_feature_ctr': ['1:Borders:TargetBorderCount=10:Prior=0/1'] })
### Other parameters
#### Counter Calculation Method
#
#The parameter determines whether to use validation dataset(provided through parameter `eval_set` of `fit` method) to estimate categories frequencies with `Counter`. By default, it is `Full` and the objects from validation dataset are used; Pass `SkipTest` value to ignore the objects from the validation set In our `score_catboost_model` function we don't give to CatBoost the validation dataset at all during training so to check this method effect we will use train/test split.
model = CatBoostRegressor(custom_metric= ['R2', 'RMSE'], learning_rate=0.95, n_estimators=100,
counter_calc_method='Full')
model.fit(train_pool, eval_set=test_pool, verbose=False)
r2_res = r2_score(df_test.price_usd.values, model.predict(test_pool))
rmse_res = mean_squared_error(df_test.price_usd.values, model.predict(test_pool))
print('Counter Calculation Method Full: R2={:.4f} RMSE={:.0f}'.format(r2_res, rmse_res))
model = CatBoostRegressor(custom_metric= ['R2', 'RMSE'], learning_rate=0.95, n_estimators=100,
counter_calc_method='SkipTest')
model.fit(train_pool, eval_set=test_pool, verbose=False)
r2_res = r2_score(df_test.price_usd.values, model.predict(test_pool))
rmse_res = mean_squared_error(df_test.price_usd.values, model.predict(test_pool))
print('Counter Calculation Method SkipTest: R2={:.4f} RMSE={:.0f}'.format(r2_res, rmse_res))
#### Number of Borders for Target Quantization
#
#*The maximum number of borders to use in target quantization for categorical features that need it. Default for regression task is 1.*
#
#Let us try a rather big number of borders:
model_params = score_catboost_model({'ctr_target_border_count': 10})
#It is worth noticing that this setting seems to have a huge impact on training time -- on my machine, it increased almost 5 times.
#### Categorical Values Limit
#
#This parameter regulates the number of the most common categorical feature values that are used by the model. If we have $n$ unique categories and `ctr_leaf_count_limit`=$m$ we preserve the categorical feature value only for objects from $m$ most frequent categories. For the objects from the remaining $n-m$ categories, we replace categorical feature value with `None`.
#
#The default value of this parameter is `None` -- all the categorical features values are preserved.
model_params = score_catboost_model({'ctr_leaf_count_limit' : 5})
#Oops! On our dataset, it ruins the model performance.
#### Store Simple Categorical Features
#With this setting on the previous parameter `ctr_leaf_count_limit` affects only the categorical features, that CatBoost creates by combining the initial features and the initial categorical features present in the dataset are not affected. When parameter `ctr_leaf_count_limit` is `None` parameter `store_all_simple_ctr` has no effect.
model_params = score_catboost_model({'store_all_simple_ctr' : True, 'ctr_leaf_count_limit' : 5})
### Internal feature importance
#
#It is quite common to use several encodings for a categorical feature. For instance, CatBoost creates 4 different encodings for each categorical feature by default (see "Default value of simple_ctr and combinations_ctr" section). When we call `get_feature_importances` method we get aggregated across all the encodings importance for the categorical feature. That is because in practice we usually just want to compare the overall usefulness of the different features present in our dataset.
#
#However, what if we want to know which encodings worked best for us? For that we would need to get **Internal Feature Importance.** Currently, it is available only in the command-line version of CatBoost library. You can find details about the installation [here](https://catboost.ai/docs/concepts/cli-installation.html) and an example of how to train a model with the command-line version in [this tutorial](https://github.com/catboost/catboost/blob/master/catboost/tutorials/cmdline_tutorial/cmdline_tutorial.md).
#
#To train a model with the command-line version we first need to create a column description file:
descr = ['Categ' if i in categorical_features_names else 'Auxiliary' for i in df.columns]
descr[14] = 'Target'
pd.Series(descr).to_csv('train.cd', sep='\t', header=None)
#Then train a model:
#
#`catboost fit --learn-set cars.csv --loss-function RMSE --learning-rate 0.1 --iterations 4500 --delimiter=',' --has-header --column-description train.cd`
#
#And then create an Internal Feature Importance file:
#`catboost fstr -m model.bin --cd train.cd --fstr-type InternalFeatureImportance -o feature_strength.tsv`
#
#The contents of this file in our case are the following:
#
#`9.318442186 transmission
#7.675430604 {model_name} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#3.04782682 {model_name} prior_num=0 prior_denom=1 targetborder=0 type=Borders
#2.951546528 {model_name} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#2.939078189 {body_type} prior_num=0 prior_denom=1 targetborder=0 type=Borders
#2.666138982 {state, transmission} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#2.431465565 {body_type} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#2.059354431 {manufacturer_name} prior_num=0 prior_denom=1 targetborder=0 type=Counter
#1.946443049 {state} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#1.932116622 {color} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#1.633469855 {color} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#1.561168441 {manufacturer_name} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#1.419944596 {manufacturer_name} prior_num=0 prior_denom=1 targetborder=0 type=Borders
#1.3323198 {body_type} prior_num=0 prior_denom=1 targetborder=0 type=Counter
#1.068973258 {color} prior_num=0 prior_denom=1 targetborder=0 type=Counter
#1.038663366 {manufacturer_name} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#1.001434874 {manufacturer_name, body_type} prior_num=0 prior_denom=1 targetborder=0 type=Counter
#0.9012036663 {body_type} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#0.8805961369 {manufacturer_name, body_type} prior_num=1 prior_denom=1 targetborder=0 type=Borders
#0.8796937131 {drivetrain} prior_num=0 prior_denom=1 targetborder=0 type=Borders
#...
#...
#1.476546485e-05 {engine_fuel, engine_type} prior_num=0 prior_denom=1 targetborder=0 type=Borders
#7.417408934e-06 {engine_type, body_type, state, location_region} prior_num=0.5 prior_denom=1 targetborder=0 type=Borders
#`
#
#* We can see that the most important feature is transmission;
#* then we have the 3 `Borders` type encodings for `model_name` categorical feature with different priors;
#* then an encoding for `body_type` feature;
#* then we have a categorical feature created by CatBoost from the combination of `state` and `transmission` features
#
#An interesting observation is that for some features like `model_name` the most useful are the encodings of `Border` type, while for other features e.g. `manufacturer_name` the most useful encoding is obtained with `Counter` method.
### `logging_level=Info`
#
#Another way of getting some insight into how your model works is training with `logging_level=Info` parameter. This setting allows us to see the feature splits chosen for each tree:
model = CatBoostRegressor(custom_metric= ['R2', 'RMSE'], learning_rate=0.3, n_estimators=5)
model.fit(train_pool, eval_set=test_pool, logging_level='Info')
#For numeric features the format is the following:
#
#feature name, index of the chosen split, split score
#
#Example: `year_produced, bin=47 score 669154.1979`
#
#Format for categorical features is:
#
#feature name, prior, target border, encoding type, categorical feature border, split score
#
#Example: `{state} pr1 tb0 type0, border=12 score 901338.6173`
#
#For convenience, categorical features names are written in brackets \{\}
### Parameter tuning for binary classification and multiclassification tasks.
#
#In our tutorial, we were working on the regression task, so I would like to make several notes on categorical parameter tuning on binary classification and multiclassification tasks.
#
#* For **binary classification** parameter tuning is very similar to regression task, except the fact, that it is usually useless to increase the `TargetBorderCount` parameter value (unless you pass probabilities as a target).
#
#* In **multiclassification** task we should keep in mind that usually, we do not have any natural order on classes so the use of `FloatTargetMeanValue` or `BinarizedTargetMeanValue` encodings is not recommended. If your training takes too long you can try to set `TargetBorderCount` to a lower value than the default n_classes - 1 if there is way to unite some of your classes.
### Conclusion
#
#
#
#Congratulations to everyone who finished reading this tutorial :) As we saw the number of tunable parameters related to categorical features processing in CatBoost package is quite impressive. We learned how to control all of them, and I very much hope that this knowledge will help you to achieve the best results on your tasks involving categorical data!
|
#!/usr/bin/env python3
import json
import os
import re
import subprocess
import sys
notify_me_on_inbox = re.compile("(.*(Personal|Work)-Inbox|.*0-GitHUB)")
only_show_inbox = re.compile(r".*")
remove_gmail_folder = True
DEBUG = False
def get_inbox_statuses():
try:
emacsclient_output = subprocess.run(
"emacsclient --eval \"(my-is-there-any-mail-out-there-json)\"",
stderr=subprocess.PIPE,
shell=True,
check=True,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if DEBUG:
print(e)
return {}
return json.loads(
(emacsclient_output.stdout.decode()[1:-2].replace('\\"', '"')))
def main():
jeez = get_inbox_statuses()
meself = os.path.realpath(__file__)
if not jeez:
print("⬛")
return
icon = "📪"
ret = ["---"]
for inbox, number in jeez.items():
if remove_gmail_folder:
inbox = inbox.split("/")[-1]
if not only_show_inbox.match(inbox):
continue
if notify_me_on_inbox.match(inbox):
icon = "💌"
ret.append(
f"{inbox} ({number}) | bash=\"{meself} {inbox}\" terminal=false")
if len(ret) == 1:
print("⬛")
return
ret.insert(0, f"{icon}")
print("\n".join(ret))
print("\n--\nRefresh | refresh=true")
def goto_inbox(inbox):
subprocess.run(
f"emacsclient --eval '(my-is-there-any-mail-out-there-focus-group \"{inbox}\")'",
stderr=subprocess.PIPE,
shell=True,
check=True,
stdout=subprocess.PIPE)
subprocess.run("jumpapp -X emacs27",
stderr=subprocess.PIPE,
shell=True,
check=False,
stdout=subprocess.PIPE)
if __name__ == '__main__':
if len(sys.argv) > 1:
try:
goto_inbox(sys.argv[1])
except subprocess.CalledProcessError:
pass
else:
main()
|
from django.conf.urls import url, handler404, handler500, handler403
# from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
import blog.views
from django.views import static
from DjBlog import settings
from blog import urls as blog_url
from users import urls as users_url
from society import urls as society_url
from users.views import page_not_found, page_error, permission_denied
handler403 = permission_denied
handler404 = page_not_found
handler500 = page_error
urlpatterns = [
path('admin/', admin.site.urls),
path('', blog.views.index),
path('blog/', include((blog_url, 'blog'), namespace='blog')),
path('users/', include((users_url, 'users'), namespace='users')),
path('society/', include((society_url, 'users'), namespace='society')),
# 增加以下一行,以识别静态资源
url(r'^static/(?P<path>.*)$', static.serve,
{'document_root': settings.STATIC_ROOT}, name='static'),
]
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest import GaiaTestCase
from gaiatest.apps.browser.app import Browser
from gaiatest.apps.persona.app import Persona
from gaiatest.mocks.persona_test_user import PersonaTestUser
class TestPersonaCookie(GaiaTestCase):
_logged_out_button_locator = (By.CSS_SELECTOR, '#signinhere .btn-persona')
_logged_in_button_locator = (By.ID, 'loggedin')
def setUp(self):
GaiaTestCase.setUp(self)
self.connect_to_network()
# Generate unverified PersonaTestUser account
self.user = PersonaTestUser().create_user(
verified=True, env={"browserid": "firefoxos.persona.org", "verifier": "firefoxos.123done.org"}
)
def test_persona_cookie(self):
"""
Smoketest of cookie handling/Persona integration
Log in with Persona user
After refreshing 123done should still be logged in (cookie retained)
"""
browser = Browser(self.marionette)
browser.launch()
browser.go_to_url('http://firefoxos.123done.org', timeout=120)
browser.switch_to_content()
self.wait_for_element_displayed(*self._logged_out_button_locator, timeout=120)
login_button = self.marionette.find_element(*self._logged_out_button_locator)
login_button.tap()
persona = Persona(self.marionette)
persona.switch_to_persona_frame()
persona.login(self.user.email, self.user.password)
# Back to browser content
browser.switch_to_content()
self.wait_for_element_displayed(*self._logged_in_button_locator)
browser.switch_to_chrome()
# Refresh the page
browser.tap_go_button()
# Now we expect B2G to retain the Persona cookie and remain logged in
browser.switch_to_content()
self.wait_for_element_displayed(*self._logged_in_button_locator)
|
#####################################################################
# Code to produce statistical outputs
# For use with test / training datasets : HAPT-data-set-DU
# Author : Benjamin Jones, b.t.jones@durham.ac.uk, zkbb46
# Based on code from the CV2 open source library and code provided by Dr Toby Breckon,
# https://github.com/tobybreckon/python-examples-ml
# Copyright (c) 2014 / 2016 School of Engineering & Computing Science,
# Durham University, UK
# License : LGPL - http://www.gnu.org/licenses/lgpl.html
#####################################################################
#!! WARNING !! - This code depends on the sklearn package. Will not run without it.
# This code is not necessary for the assignment as all results are outputted to text files.
# The code is included for interest only.
#####################################################################
import csv
import cv2
import os
import numpy as np
import matplotlib.pyplot as pyplot
classes = {'WALKING' : 1, 'WALKING_UPSTAIRS' : 2, 'WALKING_DOWNSTAIRS' : 3, 'SITTING' : 4, 'STANDING' : 5, 'LAYING' : 6,'STAND_TO_SIT' : 7, 'SIT_TO_STAND' : 8, 'SIT_TO_LIE' : 9, 'LIE_TO_SIT' : 10, 'STAND_TO_LIE' : 11, 'LIE_TO_STAND' :12}
inv_classes = {v: k for k, v in classes.items()}
def print_accuracy_results(predicted_labels, true_labels):
if(len(predicted_labels)!=len(true_labels)):
print("WARNING: PREDICTED_LABELS MUST BE SAME LENGTH AS TRUE_LABELS")
##Throw exception
##initialise counting dictionaries
class_prediction_count = {'WALKING' : 0, 'WALKING_UPSTAIRS' : 0, 'WALKING_DOWNSTAIRS' : 0, 'SITTING' : 0, 'STANDING' : 0, 'LAYING' : 0,'STAND_TO_SIT' : 0, 'SIT_TO_STAND' : 0, 'SIT_TO_LIE' : 0, 'LIE_TO_SIT' : 0, 'STAND_TO_LIE' : 0, 'LIE_TO_STAND' :0}
class_correct_count = {'WALKING' : 0, 'WALKING_UPSTAIRS' : 0, 'WALKING_DOWNSTAIRS' : 0, 'SITTING' : 0, 'STANDING' : 0, 'LAYING' : 0,'STAND_TO_SIT' : 0, 'SIT_TO_STAND' : 0, 'SIT_TO_LIE' : 0, 'LIE_TO_SIT' : 0, 'STAND_TO_LIE' : 0, 'LIE_TO_STAND' :0}
class_incorrect_count = {'WALKING' : 0, 'WALKING_UPSTAIRS' : 0, 'WALKING_DOWNSTAIRS' : 0, 'SITTING' : 0, 'STANDING' : 0, 'LAYING' : 0,'STAND_TO_SIT' : 0, 'SIT_TO_STAND' : 0, 'SIT_TO_LIE' : 0, 'LIE_TO_SIT' : 0, 'STAND_TO_LIE' : 0, 'LIE_TO_STAND' :0}
for i in range(0, len(true_labels)):
class_prediction_count[inv_classes[int(predicted_labels[i])]] += 1
##Code here: gets result[0] gives '1', '2' '3' etc depending on classifier.
##inv_classes[X] gets the 'name' of the matching '1' '2' '3' if it exists
##class_prediction_count[X] gets the value of the class count and adds one
if(predicted_labels[i] == true_labels[i]):
class_correct_count[inv_classes[int(predicted_labels[i])]] += 1
if((i+1)%100 == 0):
print("Test data example : {:5} : result = {:20s} : actual = {:20s} -----> CORRECT".format( (i+1),
inv_classes[int(predicted_labels[i])],
inv_classes[true_labels[i]]))
else:
class_incorrect_count[inv_classes[int(predicted_labels[i])]] += 1
if((i+1)%100 == 0):
print("Test data example : {:5} : result = {:20s} : actual = {:20s} -----> INCORRECT".format( (i+1),
inv_classes[int(predicted_labels[i])],
inv_classes[true_labels[i]]))
##Sanity check - add up all counts, should = length of data
number_of_results = 0
number_of_correct = 0
for _, value in class_correct_count.items():
number_of_results += value
number_of_correct += value
for _, value in class_incorrect_count.items():
number_of_results += value
print("Number of results = ", number_of_results, "\nNumber of data rows = ", len(predicted_labels))
print("class_prediction_count: ", class_prediction_count)
print("class_correct_count: ", class_correct_count)
print("class_incorrect_count: ", class_incorrect_count)
# print("Accuracy: ", number_of_correct/number_of_results)
print("Accuracy : {}%".format(str(round(number_of_correct/number_of_results * 100, 2))))
def gen_report(predicted_labels, true_labels):
from sklearn.metrics import classification_report, confusion_matrix
# cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
cm = confusion_matrix(true_labels, predicted_labels)
cr = classification_report(true_labels, predicted_labels)
print(cm)
print(cr)
return (cm, cr) |
import os
from flask import Flask, request, g, url_for, json
from flask_cors import CORS
from flask_restful import reqparse, Resource, Api
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
# define flask vars
app = Flask(__name__)
app.config['SECRET_KEY'] = 'something really really secret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
auth = HTTPBasicAuth()
db = SQLAlchemy(app)
# define api stuff
api = Api(app)
# enable cross-request
CORS(app)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True, nullable=False)
password_hash = db.Column(db.String(64), nullable=False)
group_id = db.Column(db.Integer)
def hash_password(self, password):
"""
Given a password, hash and store under user.password_hash
"""
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
"""
Check a password againt the hash stored with this user
"""
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=600):
"""
Generate an auth token for this user
"""
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
"""
Given a token, return username if valid or None if invalid
"""
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
class UserPerm(db.Model):
__tablename__ = 'users_perm'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
perm_id = db.Column(db.Integer, nullable=False)
class Group(db.Model):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True)
group_name = db.Column(db.String(32), index=True, nullable=False)
class GroupPerm(db.Model):
__tablename__ = 'groups_perm'
id = db.Column(db.Integer, primary_key=True)
group_id = db.Column(db.Integer, nullable=False)
perm_id = db.Column(db.Integer, nullable=False)
class Permission(db.Model):
__tablename__ = 'permissions'
id = db.Column(db.Integer, primary_key=True)
perm_name = db.Column(db.String(32), index=True, nullable=False)
class ObjectPerm(db.Model):
__tablename__ = "object_perm"
id = db.Column(db.Integer, primary_key=True)
perm_id = db.Column(db.Integer, nullable=False)
object_path = db.Column(db.String(128), nullable=False)
# create db for shitty testing
if not os.path.exists('db.sqlite'):
db.create_all()
@auth.verify_password
def verify_password(username_or_token, password):
# first try auth by token
user = User.verify_auth_token(username_or_token)
if not user:
# try user/pass auth
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
else:
g.user = user
return True
class Users(Resource):
"""
Resources for working with users
"""
def get(self):
users = User.query.all()
user_list = list()
user_dict = dict()
for user in users:
user_dict['user_id'] = user.id
user_dict['username'] = user.username
user_list.append(user_dict)
return {'users' : user_list}
def post(self):
"""
Add a new user to the db
"""
options = ['username',
'password',
'group',
'permissions_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
username = data.get('username')
password = data.get('password')
group = data.get('group')
group_id = None
permissions_list = data.get('permissions_list')
perm_id_list = list()
if username is None or password is None:
# required fields
return {'error': 'missing arguments'}, 400
if User.query.filter_by(username=username).first() is not None:
# user with name already in DB
return {'error': 'existing user'}, 400
if group is not None:
group_query = Group.query.filter_by(group_name=group).first()
if group_query is None:
# group with name does not exist
return {'error': 'group does not exist'}, 400
else:
group_id = group_query.id
if permissions_list is not None:
# iterate through permissions, make sure they all exist
for permission in permissions_list:
if Permission.query.filter_by(perm_name=permission).first() is None:
return {'error': 'permission {} does not exist'.format(permission)}
else:
perm_id_list.append(perm_query.id)
user = User(username=username, group_id=group_id)
user.hash_password(password)
db.session.add(user)
db.session.commit()
# create references for permissions that do exit
if permissions_list is not None:
for perm_id in perm_id_list:
user_perm = UserPerm(user_id=user.id, perm_id=perm_id)
db.session.add(user_perm)
db.session.commit()
return {'user_id': user.id}
class UsersByID(Resource):
"""
Resources for working with individual users
"""
def get(self, user_id):
"""
Get all fields for a specific user
"""
user = User.query.filter_by(id=user_id).first()
if user is None:
return {'error': 'user not found'}, 400
permissions_id_list = list()
permissions_list = list()
for assoc_perm in UserPerm.query.filter_by(user_id=user.id).all():
permissions_id_list.append(assoc_perm.perm_id)
for perm_id in permissions_id_list:
perm = Permission.query.filter_by(id=perm_id).first()
permissions_list.append(perm.perm_name)
return {'user_id': user.id,
'username': user.username,
'group_id': user.group_id,
'permissions_list': permissions_list}, 200
def patch(self, user_id):
"""
Update the info for an existing user
"""
user = User.query.filter_by(id=user_id).first()
if user is None:
return {'error': 'user not found'}, 400
options = ['username',
'password',
'group',
'permissions_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
username = data.get('username')
password = data.get('password')
group_name = data.get('group')
group_id = 0
permissions_list = data.get('permissions_list')
# check username
if username is not None and username != user.username:
if User.query.filter_by(username=username).first() is not None:
return {'error': 'username already exists'}
else:
username = user.username
# check group
if group_name is not None:
group = Group.query.filter_by(group_name=group_name).first()
if group is None:
return {'error': 'group does not exist'}
else:
group_id = group.id
else:
group_id = user.group_id
# check permissions list
perm_id_list = list()
if permissions_list is not None:
for permission in permissions_list:
perm_query = Permission.query.filter_by(perm_name=permission).first()
if perm_query is None:
return {'error': 'permission {} does not exist'.format(permission)}
else:
perm_id_list.append(perm_query.id)
# update perms
if permissions_list is not None:
# remove old perms
for perm in UserPerm.query.filter_by(user_id=user_id).all():
db.session.delete(perm)
db.session.commit()
# add new perms
for perm_id in perm_id_list:
user_perm = UserPerm(user_id=user.id, perm_id=perm_id)
db.session.add(user_perm)
db.session.commit()
# update password
if password is not None:
user.hash_password(password)
# update group_id
user.group_id = group_id
# update username
user.username = username
db.session.commit()
return self.get(user_id)
class Groups(Resource):
"""
Resources for working with groups
"""
def get(self):
"""
Returns a list of all group names
"""
groups = Group.query.all()
group_list = list()
group_dict = dict()
for group in groups:
group_dict['group_id'] = group.id
group_dict['group_name'] = group.group_name
group_list.append(group_dict)
return {'groups' : group_list}
def post(self):
"""
Registers a new group
"""
options = ['group_name',
'permissions_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
group_name = data.get('group_name')
permissions_list = data.get('permissions_list')
perm_id_list = list()
if group_name is None:
# required fields
return {'error': 'missing arguments'}, 400
if Group.query.filter_by(group_name=group_name).first() is not None:
# group with name already in DB
return {'error': 'existing user'}, 400
if permissions_list is not None:
# iterate through permissions, make sure they all exist
for permission in permissions_list:
perm_query = Permission.query.filter_by(perm_name=permission).first()
if perm_query is None:
return {'error': 'permission {} does not exist'.format(permission)}
else:
perm_id_list.append(perm_query.id)
group = Group(group_name=group_name)
db.session.add(group)
db.session.commit()
# create references for permissions that do exit
if permissions_list is not None:
for perm_id in perm_id_list:
group_perm = GroupPerm(group_id=group.id, perm_id=perm_id)
db.session.add(group_perm)
db.session.commit()
return {'group_id': group.id}
class GroupsByID(Resource):
"""
Resources for working with individual groups
"""
def get(self, group_id):
"""
Get all fields for a specific group
"""
group = Group.query.filter_by(id=group_id).first()
if group is None:
return {'error': 'group not found'}, 400
permissions_id_list = list()
permissions_list = list()
for assoc_perm in GroupPerm.query.filter_by(group_id=group.id).all():
permissions_id_list.append(assoc_perm.perm_id)
for perm_id in permissions_id_list:
perm = Permission.query.filter_by(id=perm_id).first()
permissions_list.append(perm.perm_name)
return {'group_id': group.id,
'group_name': group.group_name,
'permissions_list': permissions_list}, 200
def patch(self, group_id):
"""
Update the info for an existing group
"""
group = Group.query.filter_by(id=group_id).first()
if group is None:
return {'error': 'group not found'}, 400
options = ['group_name',
'permissions_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
group_name = data.get('group_name')
permissions_list = data.get('permissions_list')
# check username
if group_name is not None and group_name != group.group_name:
if Group.query.filter_by(group_name=group_name).first() is not None:
return {'error': 'group_name already exists'}
else:
group_name = group.group_name
# check permissions list
perm_id_list = list()
if permissions_list is not None:
for permission in permissions_list:
perm_query = Permission.query.filter_by(perm_name=permission).first()
if perm_query is None:
return {'error': 'permission {} does not exist'.format(permission)}
else:
perm_id_list.append(perm_query.id)
# update perms
if permissions_list is not None:
# remove old perms
for perm in GroupPerm.query.filter_by(group_id=group_id).all():
db.session.delete(perm)
db.session.commit()
# add new perms
for perm_id in perm_id_list:
group_perm = GroupPerm(group_id=group.id, perm_id=perm_id)
db.session.add(group_perm)
db.session.commit()
# update group_name
group.group_name = group_name
db.session.commit()
return self.get(group_id)
class Permissions(Resource):
"""
Resources for working with permissions
"""
def get(self):
"""
Returns a list of registered permissions
"""
perms = Permission.query.all()
perm_list = list()
perm_dict = dict()
for perm in perms:
perm_dict['perm_id'] = perm.id
perm_dict['perm_name'] = perm.perm_name
perm_list.append(perm_dict)
return {'perms' : perm_list}
def post(self):
"""
Registers a new permission
"""
options = ['perm_name',
'object_path_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
perm_name = data.get('perm_name')
object_path_list = data.get('object_path_list')
if perm_name is None:
# required fields
return {'error': 'missing arguments'}, 400
if Permission.query.filter_by(perm_name=perm_name).first() is not None:
# perm with name already in DB
return {'error': 'existing permission'}, 400
perm = Permission(perm_name=perm_name)
db.session.add(perm)
db.session.commit()
if object_path_list is not None:
# add object/permissions associations
for object_path in object_path_list:
object_perm_assoc = ObjectPerm(perm_id=perm.id, object_path=object_path)
db.session.add(object_perm_assoc)
db.session.commit()
return {'perm_id': perm.id}
class PermissionsByID(Resource):
"""
Resources for working with individual permissions
"""
def get(self, perm_id):
"""
Get all fields for a specific permission
"""
perm = Permission.query.filter_by(id=perm_id).first()
if perm is None:
return {'error': 'permission not found'}, 400
object_path_list = list()
for assoc_object in ObjectPerm.query.filter_by(perm_id=perm.id).all():
object_path_list.append(assoc_object.object_path)
return {'perm_id': perm.id,
'perm_name': perm.perm_name,
'object_path_list': object_path_list}, 200
def patch(self, perm_id):
"""
Update the info for an existing permission
"""
perm = Permission.query.filter_by(id=perm_id).first()
if perm is None:
return {'error': 'permission not found'}, 400
options = ['perm_name',
'object_path_list']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
perm_name = data.get('perm_name')
object_path_list = data.get('object_path_list')
# check username
if perm_name is not None and perm_name != perm.perm_name:
if Permission.query.filter_by(perm_name=perm_name).first() is not None:
return {'error': 'perm_name already exists'}
else:
perm_name = perm.perm_name
# update objects
if object_path_list is not None:
#remove old objects
for object_perm in ObjectPerm.query.filter_by(perm_id=perm.id).all():
db.session.delete(object_perm)
db.session.commit()
# add new objects
for object_path in object_path_list:
object_perm = ObjectPerm(perm_id=perm.id, object_path=object_path)
db.session.add(object_perm)
db.session.commit()
# update perm_name
perm.perm_name = perm_name
db.session.commit()
return self.get(perm_id)
class AuthToken(Resource):
"""
Handles token generation
"""
@auth.login_required
def post(self):
# get a token
token = g.user.generate_auth_token(600)
return {'token': token.decode('ascii'), 'duration': 600}
class AuthResource(Resource):
"""
Authenticates user with resources
"""
@auth.login_required
def post(self):
"""
Return has_access True or False depending on permissions
"""
options = ['resource_path']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
resource_path = data.get('resource_path')
user_id = g.user.id
# check if this API is even registered under a permission
assoc_object_perm_list = ObjectPerm.query.filter_by(object_path=resource_path).all()
if assoc_object_perm_list is None:
return {'has_access': False}, 400
# if it does, list the matching ids
perm_id_list = list()
for assoc_obect_perm in assoc_object_perm_list:
perm_id_list.append(assoc_obect_perm.perm_id)
# if the user has a group, check the permissions there first
if g.user.group_id is not None:
for perm_id in perm_id_list:
if GroupPerm.query.filter_by(group_id=g.user.group_id, perm_id=perm_id).first() is not None:
return {'has_access': True}, 200
# check user perms next
for perm_id in perm_id_list:
if UserPerm.query.filter_by(user_id=user_id, perm_id=perm_id).first is not None:
return {'has_access': True}, 200
# at this point it fails
return {'has_access': False}, 400
class AuthPermission(Resource):
"""
Authenticates user with permissions by name
"""
@auth.login_required
def post(self):
"""
Return has_access True or False depending on permissions
"""
options = ['perm_name']
data = request.get_json()
for k in data.keys():
if k not in options:
return {'error': 'unknown option: {}'.format(k)}, 400
perm_name = data.get('perm_name')
user_id = g.user.id
# check if the permission even exists
perm_query = Permission.query.filter_by(perm_name=perm_name).first()
perm_id = 0
if perm_query is None:
return {'has_access': False}, 400
else:
perm_id = perm_query.id
# if the user has a group, check the permissions there first
if g.user.group_id is not None:
if GroupPerm.query.filter_by(group_id=g.user.group_id, perm_id=perm_id).first() is not None:
return {'has_access': True}, 200
# check user perms next
if UserPerm.query.filter_by(user_id=user_id, perm_id=perm_id).first() is not None:
return {'has_access': True}, 200
# at this point it fails
return {'has_access': False}, 400
# api resources
api.add_resource(Users, '/port/users')
api.add_resource(UsersByID, '/port/users/<int:user_id>')
api.add_resource(Groups, '/port/groups')
api.add_resource(GroupsByID, '/port/groups/<int:group_id>')
api.add_resource(Permissions, '/port/permissions')
api.add_resource(PermissionsByID, '/port/permissions/<int:perm_id>')
api.add_resource(AuthToken, '/port/auth/token')
api.add_resource(AuthResource, '/port/auth/resource')
api.add_resource(AuthPermission, '/port/auth/permission')
|
from bs4 import BeautifulSoup as bs
class Scraper:
"""
Its only purpose is to get all links and titles from search results
"""
def __init__(self, site_string, service):
self.service = service
if len(site_string) > 0:
self.soup = bs(site_string, 'html.parser')
def get_search_results(self):
links = []
if self.service == "otodom":
for tag in self.soup.find_all("article", {"class": "offer-item"}):
if tag.name == "article":
links.append(tag["data-url"])
elif self.service == "gratka":
for tag in self.soup.find_all("a", {"class": "teaser"}):
if tag.name == "a":
links.append(tag['href'])
return links
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.2
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.5
# nteract:
# version: 0.11.6
# ---
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append("..")
# ### Now you can get extra information for the profiler if you activate pass verbose= True to optimus
# Create optimus
from optimus import Optimus
op = Optimus(master="local[*]", app_name = "optimus" , checkpoint= True)
df = op.load.csv("data/Meteorite_Landings.csv").h_repartition()
df.table(10)
# ### Profiler dump mode (Faster). It just handle the column data type as present in the dataframe
op.profiler.run(df, "name", infer=False)
# ### Profiler smart mode (Slower). It just try to infer the column data type and present extra data acordinly. From example datetype columns get extra histograms about minutes, day, week and month. Also can detect array types on data.
op.profiler.run(df, "*",infer=True)
# ### Plot profile for a specific column
op.profiler.run(df, "reclat")
# ### Output a json file
# ### Plot histagram for multiple columns
df.plots.hist(["id", "reclong"], 20)
df.plots.frequency(["id", "reclong"], 10)
df.plot.correlation(["id","mass (g)", "reclat"])
df.correlation(["id","mass (g)", "reclat"], output="array")
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from collections import OrderedDict
from knack.log import get_logger
logger = get_logger(__name__)
def import_pipeline_output_format(result):
return _output_format(result, _import_pipeline_format_group)
def export_pipeline_output_format(result):
return _output_format(result, _export_pipeline_format_group)
def pipeline_run_output_format(result):
return _output_format(result, _pipeline_run_format_group)
def _import_pipeline_format_group(item):
return OrderedDict([
('NAME', _get_value(item, 'name')),
('PROVISIONING_STATE', _get_value(item, 'provisioningState')),
('STORAGE_ACCOUNT', _get_value(item, 'source', 'uri')),
('SOURCE_TRIGGER', _get_value(item, 'trigger', 'sourceTrigger', 'status'))
])
def _export_pipeline_format_group(item):
return OrderedDict([
('NAME', _get_value(item, 'name')),
('PROVISIONING_STATE', _get_value(item, 'provisioningState')),
('STORAGE_ACCOUNT', _get_value(item, 'target', 'uri'))
])
def _pipeline_run_format_group(item):
if "importPipelines" in _get_value(item, 'request', 'pipelineResourceId'):
d = OrderedDict([
('NAME', _get_value(item, 'name')),
('PIPELINE', _get_value(item, 'request', 'pipelineResourceId').split('/', maxsplit=-1)[-1]),
('START_TIME', _get_value(item, 'response', 'startTime').split('.', maxsplit=1)[0]),
('DURATION', _get_duration(_get_value(item, 'response', 'startTime'), _get_value(item, 'response', 'finishTime'))),
('SOURCE_TRIGGER', str('_' in _get_value(item, 'name'))),
('STATUS', _get_value(item, 'response', 'status')),
('ERROR_MESSAGE', _get_value(item, 'response', 'pipelineRunErrorMessage'))
])
else:
d = OrderedDict([
('NAME', _get_value(item, 'name')),
('PIPELINE', _get_value(item, 'request', 'pipelineResourceId').split('/', maxsplit=-1)[-1]),
('START_TIME', _get_value(item, 'response', 'startTime').split('.', maxsplit=1)[0]),
('DURATION', _get_duration(_get_value(item, 'response', 'startTime'), _get_value(item, 'response', 'finishTime'))),
('STATUS', _get_value(item, 'response', 'status')),
('ERROR_MESSAGE', _get_value(item, 'response', 'pipelineRunErrorMessage'))
])
return d
def _output_format(result, format_group):
if 'value' in result and isinstance(result['value'], list):
result = result['value']
obj_list = result if isinstance(result, list) else [result]
return [format_group(item) for item in obj_list]
def _get_value(item, *args):
'''Get a nested value from a dict.
:param dict item: The dict object
'''
try:
for arg in args:
item = item[arg]
return str(item) if item or item == 0 else ' '
except (KeyError, TypeError, IndexError):
return ' '
def _get_duration(start_time, finish_time):
'''Takes datetime strings and returns duration'''
from dateutil.parser import parse
try:
duration = parse(finish_time) - parse(start_time)
hours = f'{((24 * duration.days) + (duration.seconds // 3600)):02d}'
minutes = f'{((duration.seconds % 3600) // 60):02d}'
seconds = f'{(duration.seconds % 60):02d}'
return f'{hours}:{minutes}:{seconds}'
except (ValueError, TypeError):
logger.debug('Unable to get duration with start_time %s and finish_time %s', start_time, finish_time)
return ' '
|
import asyncio
import random
import discord
from discord.ext import commands
class Rolldice(commands.Cog):
def __init__(self, habbot):
self.habbot = habbot
#The command
@commands.command(pass_context=True, aliases=['roll'])
async def rolldice(self, ctx):
await ctx.send(f':game_die: | You have rolled a **{random.randint(1,6)}!**')
def setup(habbot):
habbot.add_cog(Rolldice(habbot)) |
"""
Black Pearl: For twisted little pirates
"""
|
#!/usr/bin/env python
#
# slackClient.py
# --------------
#
# Mal Minhas <mal@kano.me>
# Copyright (c) 2018 Kano Computing. All Rights Reserved.
# Licence: GPLv3
#
# Installation:
# --------------
# You will need to create a Slack App first.
# You will need a one time OAuth flow to get hold of your Slack token:
# https://slackapi.github.io/python-slackclient/auth.html#handling-tokens-and-other-sensitive-data
# You can retrieve the token for your app from OAuth&Permissions link
# https://api.slack.com/apps/ACW8VFRB3/oauth?'
# Incoming webhook:
# https://hooks.slack.com/services/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
#
import os
import requests
SLACK_TOKEN_FILE = '.slacktoken'
class SlackClient(object):
def __init__(self):
self.token = self.getToken(SLACK_TOKEN_FILE,'slack')
def getToken(self,tokenFile,credType):
if os.path.exists(tokenFile):
with open(tokenFile,'r') as f:
token = f.read()
assert(len(token))
else:
token = getpass.getpass('{} token:'.format(credType))
with open(tokenFile,'w') as f:
f.write(token)
return token
def post_text(self, text, channels):
assert(self.token)
responses = []
for channel in channels:
response = requests.post(url='https://slack.com/api/chat.postMessage', data=
{'token': self.token, 'channel': channel, 'text': text},
headers={'Accept': 'application/json'})
responses.append(response.text)
return responses
def post_image(self, filename, channels):
assert(self.token)
responses = []
f = {'file': (filename, open(filename, 'rb'), 'image/png', {'Expires':'0'})}
for channel in channels:
response = requests.post(url='https://slack.com/api/files.upload', data=
{'token': self.token, 'channels': channel, 'media': f},
headers={'Accept': 'application/json'}, files=f)
responses.append(response.text)
return responses
|
import os
import sqlite3
class SQLite():
def __init__(self, sqlite_path , sqlite_ddbbname, sqlite_tablename, delete=False):
self.__path = sqlite_path
self.__ddbbname = sqlite_ddbbname
self.__tablename = sqlite_tablename
self.__conn = None
if delete and os.path.isfile(sqlite_path):
os.remove(sqlite_path)
self.connect()
def connect(self):
self.__conn = sqlite3.connect(self.__path)
def create(self, sqlite_query):
dbcursor = self.__conn.cursor()
dbcursor.execute(sqlite_query)
self.__conn.commit()
def insert(self, data):
# data = {'column1': 'value1', 'column2': 'value2', 'column3': 'value3'}
# ['column1', 'column3', 'column2']
columns = data.keys()
# 'column1, column3, column2'
placeholder_columns = ", ".join(data.keys())
# ':column1, :column3, :column2'
placeholder_values = ", ".join([":{0}".format(col) for col in columns])
sql = "INSERT INTO {table_name} ({placeholder_columns}) VALUES ({placeholder_values})".format(
table_name='twitter',
placeholder_columns=placeholder_columns,
placeholder_values=placeholder_values
)
# 'INSERT INTO table_name (column1, column3, column2) VALUES (:column1, :column3, :column2)'
dbcursor = self.__conn.cursor()
dbcursor.execute(sql, data)
self.__conn.commit()
def select(self, strSQL):
dbcursor = self.__conn.cursor()
dbcursor.execute(strSQL)
allrows = dbcursor.fetchall()
data = [dict((dbcursor.description[i][0], value) \
for i, value in enumerate(row)) for row in allrows]
return data
def close(self):
self.__conn.close()
__all__ = ['SQLite']
|
# test equality
print(None == None)
print(False == None)
print(False == False)
print(False == True)
print(() == ())
print(() == [])
print([] == [])
print(() == {})
print({} == ())
print(() == None)
print(() == False)
print(() == print)
print([] == None)
print([] == False)
print([] == print)
print({} == None)
print({} == False)
print({} == print)
print(1 == 1)
print(1 == 2)
print(1 == ())
print(1 == [])
print(1 == {})
print(1 == 'a')
print('a' == 'a')
print('a' == 'ab')
print('a' == 1)
print('a' == ())
# same as above but with !=
print(None != None)
print(False != None)
print(False != False)
print(False != True)
print(() != ())
print(() != [])
print([] != [])
print(() != {})
print({} != ())
print(() != None)
print(() != False)
print(() != print)
print([] != None)
print([] != False)
print([] != print)
print({} != None)
print({} != False)
print({} != print)
print(1 != 1)
print(1 != 2)
print(1 != ())
print(1 != [])
print(1 != {})
print(1 != 'a')
print('a' != 'a')
print('a' != 'ab')
print('a' != 1)
print('a' != ())
print("PASS") |
import cv2
# Read the original image
img = cv2.imread(r'C:\Users\harrizazham98\Desktop\OpenCVForPython\resources\Day 3\geometric-shape-names-types-definitions.png')
# Blur the image for better edge detection
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Gradient x is calculated:
# the depth of the output is set to CV_16S to avoid overflow
# CV_16S = one channel of 2-byte signed integers (16-bit signed integers)
gradient_x = cv2.Sobel(gray_image, cv2.CV_16S, 1, 0, 3)
gradient_y = cv2.Sobel(gray_image, cv2.CV_16S, 0, 1, 3)
# Conversion to an unsigned 8-bit type:
abs_gradient_x = cv2.convertScaleAbs(gradient_x)
abs_gradient_y = cv2.convertScaleAbs(gradient_y)
# Combine the two images using the same weight:
sobel_image = cv2.addWeighted(abs_gradient_x, 0.5, abs_gradient_y, 0.5, 0)
# Display Sobel Edge Detection Images
cv2.imshow('Sobel X', abs_gradient_x)
cv2.imshow('Sobel Y', abs_gradient_y)
cv2.imshow('Sobel X Y using Sobel() function', sobel_image)
cv2.waitKey(0) |
class Config(object):
TESTING = False
class ProductionConfig(Config):
DATABASE_URI = 'mysql://user@localhost/foo'
API_KEY =
class DevelopmentConfig(Config):
DATABASE_URI = "sqlite:////tmp/foo.db"
class TestingConfig(Config):
DATABASE_URI = 'sqlite:///:memory:'
TESTING = True
|
import os
def create_cell(first, second):
res = set()
if first == set() or second == set():
return set()
for f in first:
for s in second:
res.add(f+s)
return res
def read_grammar(filename="./grammar.txt"):
filename = os.path.join(os.curdir, filename)
with open(filename) as grammar:
rules = grammar.readlines()
v_rules = []
t_rules = []
for rule in rules:
left, right = rule.split(" -> ")
right = right[:-1].split(" | ")
for ri in right:
if str.islower(ri):
t_rules.append([left, ri])
else:
v_rules.append([left, ri])
return v_rules, t_rules
def read_input(filename="./input.txt"):
filename = os.path.join(os.curdir, filename)
res = []
with open(filename) as inp:
inputs = inp.readlines()
for i in inputs:
res.append(i[:-1])
return res
def cyk_alg(varies, terms, inp):
length = len(inp)
var0 = [va[0] for va in varies]
var1 = [va[1] for va in varies]
table = [[set() for _ in range(length-i)] for i in range(length)]
for i in range(length):
for te in terms:
if inp[i] == te[1]:
table[0][i].add(te[0])
for i in range(1, length):
for j in range(length - i):
for k in range(i):
row = create_cell(table[k][j], table[i-k-1][j+k+1])
for ro in row:
if ro in var1:
table[i][j].add(var0[var1.index(ro)])
return table
def show_result(tab, inp):
for c in inp:
print("\t{}".format(c), end="\t")
print()
for i in range(len(inp)):
print(i+1, end="")
for c in tab[i]:
if c == set():
print("\t{}".format("_"), end="\t")
else:
print("\t{}".format(c), end=" ")
print()
if 'S' in tab[len(inp)-1][0]:
print("The input belongs to this context free grammar!(T)")
else:
print("The input does not belong to this context free grammar!(F)")
if __name__ == '__main__':
v, t = read_grammar()
r = read_input()[0]
ta = cyk_alg(v, t, r)
show_result(ta, r)
|
#!/usr/bin/env python2
"""
The GitHub user interface shows me a list of notifications, but it's full of
things I don't care about. This tries to automatically mark as read the
categories of notifications that I don't want to get.
Create `ghcreds.py` containing:
token="asdfjkl"
where token comes from https://github.com/settings/tokens and has the
"notifications" scope.
"""
from github import Github, GithubException
import ghcreds
import requests
import json
g = Github(ghcreds.token)
fixed = 0
looked = 0
def is_merged(url):
r = requests.get(url)
if (r.ok):
repo = json.loads(r.text or r.content)
if "closed" in repo["state"]:
return True
else:
print "\t" + str(repo["state"])
if r.headers['X-RateLimit-Remaining'] == '0':
exit("Rate limit exceeded. Try again later.")
return False
def mark_read(n):
global fixed
u = 'https://api.github.com/notifications/threads/'
u += n.id
u += '?access_token='
u += ghcreds.token
resp = requests.patch(u)
fixed += 1
user = g.get_user() # Calling this with any args returns a nameduser
notifications = user.get_notifications()
for n in notifications:
global looked
looked += 1
# I don't want to be notified of merged changes.
if is_merged(n.subject.url):
mark_read(n)
# I especially don't want to be notified of Homu's merges.
elif "Auto merge of" in n.subject.title:
mark_read(n)
print "Examined " + str(looked) + " notifications. "
print "Cleared " + str(fixed) + " spurious notifications!"
|
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START privateca_revoke_certificate]
import google.cloud.security.privateca_v1 as privateca_v1
def revoke_certificate(
project_id: str, location: str, ca_pool_name: str, certificate_name: str,
) -> None:
"""
Revoke an issued certificate. Once revoked, the certificate will become invalid and will expire post its lifetime.
Args:
project_id: project ID or project number of the Cloud project you want to use.
location: location you want to use. For a list of locations, see: https://cloud.google.com/certificate-authority-service/docs/locations.
ca_pool_name: name for the CA pool which contains the certificate.
certificate_name: name of the certificate to be revoked.
"""
caServiceClient = privateca_v1.CertificateAuthorityServiceClient()
# Create Certificate Path.
certificate_path = caServiceClient.certificate_path(
project_id, location, ca_pool_name, certificate_name
)
# Create Revoke Certificate Request and specify the appropriate revocation reason.
request = privateca_v1.RevokeCertificateRequest(
name=certificate_path, reason=privateca_v1.RevocationReason.PRIVILEGE_WITHDRAWN
)
result = caServiceClient.revoke_certificate(request=request)
print("Certificate revoke result:", result)
# [END privateca_revoke_certificate]
if __name__ == "__main__":
revoke_certificate(
project_id=sys.argv[1],
location=sys.argv[2],
ca_pool_name=sys.argv[3],
certificate_name=sys.argv[4],
)
|
import keras
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Activation, Dropout, Flatten, MaxPooling2D
from keras.optimizers import Adam
def build_model():
model = Sequential()
model.add(Conv2D(32, (3,3), activation="relu", input_shape=(1,112,92), data_format="channels_first"))
model.add(Conv2D(32, (3,2), activation="relu", data_format="channels_first"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3,3), activation="relu", data_format="channels_first"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.10))
model.add(Dense(40, activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
return model
|
import sys
import os
import csv
import music21
from indexer import client, indexers
from proto import smr_pb2, smr_pb2_grpc
CURDIR = os.path.abspath(os.path.dirname(__file__))
PROJROOT = os.path.join(CURDIR, os.pardir)
HELSINKI_TESTS = os.path.join(PROJROOT, "smr", "helsinki-ttwi", "tests")
TESTDATA = os.path.join(CURDIR, "testdata")
PALESTRINA = os.path.join(TESTDATA, "palestrina_masses")
LEMSTROM = os.path.join(TESTDATA, "lemstrom2011")
QUERIES = os.path.join(TESTDATA, "queries")
def helsinki_ttwi():
for filename in ("leiermann.xml", "query_a.mid", "query_b.mid", "query_c.mid", "query_d.mid", "query_e.mid", "query_f.mid"):
filename_without_extension = os.path.splitext(os.path.basename(filename))[0]
with open(os.path.join(LEMSTROM, filename), "rb") as f:
data = f.read()
notes_df = indexers.notes(data)
vectors_df = indexers.intra_vectors(data, len(notes_df))
vectors_csv = indexers.intra_vectors_to_csv(vectors_df)
with open(os.path.join(HELSINKI_TESTS, filename_without_extension + ".csv"), "w", newline="") as csvfile:
csvfile.write(vectors_csv)
palestrina_occs = (
#("000000000011344_Missa-Primi-toni_Credo_Palestrina-Giovanni-Pierluigi-da_file2.mid", 1238, 1266),)
("/Users/davidgarfinkle/elvis-project/patternfinder/music_files/corpus/Palestrina/Primi_toni_Credo_4.mid", 1238, 1266),)
for mass, start, end in palestrina_occs:
filename_without_extension = os.path.splitext(os.path.basename(mass))[0]
#with open(os.path.join(PALESTRINA, "mid", mass), "rb") as f:
with open(mass, "rb") as f:
data = f.read()
vectors_df = indexers.intra_vectors(data, len(notes_df))
vectors_df = vectors_df[(vectors_df['startIndex'] >= 1238) & (vectors_df['endIndex'] <= 1266)]
vectors_csv = indexers.intra_vectors_to_csv(vectors_df)
print(vectors_csv)
with open(os.path.join(HELSINKI_TESTS, filename_without_extension + ".csv"), "w", newline="") as csvfile:
csvfile.write(vectors_csv)
query = music21.stream.Score()
query.insert(0, music21.chord.Chord(['C4', 'E4', 'A4', 'C5']))
query.insert(1, music21.chord.Chord(['B-3', 'F4', 'B-4', 'D5']))
outpath = query.write('xml')
with open(outpath, "r") as f:
data = f.read()
double_leading_tone_query_notes = indexers.notes(data)
double_leading_tone_query_vectors = indexers.intra_vectors(data, len(double_leading_tone_query_notes))
with open(os.path.join(HELSINKI_TESTS, "double_leading_tone_query.csv"), "w", newline="") as csvfile:
csvfile.write(indexers.intra_vectors_to_csv(double_leading_tone_query_vectors))
def palestrina():
midi_dir = os.path.join(PALESTRINA, "mid")
output_dir = os.path.join(PALESTRINA, "pb_notes")
for midi_file in os.listdir(midi_dir):
midi_file_path = os.path.join(midi_dir, midi_file)
client.index_notes_write_response(midi_file_path, output_dir)
def lemstrom():
client.index_notes_write_response(os.path.join(LEMSTROM, "leiermann.xml"), LEMSTROM)
for q in ("a", "b", "c", "d", "e", "f"):
query = f"query_{q}"
midi_file_path = os.path.join(LEMSTROM, query + ".mid")
response = client.index_notes_write_response(midi_file_path, LEMSTROM)
def other():
client.index_notes_write_response(os.path.join(TESTDATA, "000000000000457_Castigans-castigavit_Josquin-Des-Prez_file3.xml"), TESTDATA)
client.index_notes_write_response(os.path.join(TESTDATA, "000000000000458_Castigans-castigavit_Josquin-Des-Prez_file4.midi"), TESTDATA)
def queries():
CG_E = smr_pb2.IndexResponse(notes = (
smr_pb2.Note(onset=0, offset=1, pitch=162, piece_idx=0), # C G
smr_pb2.Note(onset=0, offset=1, pitch=185, piece_idx=1),
smr_pb2.Note(onset=1, offset=2, pitch=174, piece_idx=2)) # E
)
with open(os.path.join(QUERIES, "CG_E.idxresp_notes"), "wb") as f:
f.write(CG_E.SerializeToString())
double_leading_tone = smr_pb2.IndexResponse(notes = (
smr_pb2.Note(onset=0, offset=1, pitch=162, piece_idx=0), # C E A C
smr_pb2.Note(onset=0, offset=1, pitch=174, piece_idx=1),
smr_pb2.Note(onset=0, offset=1, pitch=191, piece_idx=2),
smr_pb2.Note(onset=0, offset=1, pitch=202, piece_idx=3),
smr_pb2.Note(onset=1, offset=2, pitch=156, piece_idx=4), # Bb D F Bb
smr_pb2.Note(onset=1, offset=2, pitch=168, piece_idx=5),
smr_pb2.Note(onset=1, offset=2, pitch=179, piece_idx=6),
smr_pb2.Note(onset=1, offset=2, pitch=196, piece_idx=7))
)
with open(os.path.join(QUERIES, "double_leading_tone.idxresp_notes"), "wb") as f:
f.write(double_leading_tone.SerializeToString())
if __name__ == "__main__":
helsinki_ttwi()
lemstrom()
queries()
other()
palestrina()
|
# Generated by Django 3.0.4 on 2020-04-20 09:17
import YourJobAidApi.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('YourJobAidApi', '0026_auto_20200420_0611'),
]
operations = [
migrations.AlterField(
model_name='post',
name='img',
field=models.ImageField(default='default_pro_pic/dp.png', upload_to='jobs/%Y/%M/%D'),
),
migrations.AlterField(
model_name='userprofile',
name='profile_pic',
field=models.ImageField(default='default_pro_pic/dp.png', upload_to=YourJobAidApi.models.user_profile_picture_directory_path),
),
]
|
# Code strings from the Galileo SIS ICD for the E1-B code
e1b_strings = {
1: "F5D710130573541B9DBD4FD9E9B20A0D59D144C54BC7935539D2E75810FB51E494093A0A19DD79C70C5A98E5657AA578097777E86BCC4651CC72F2F974DC766E07AEA3D0B557EF42FF57E6A58E805358CE9257669133B18F80FDBDFB38C5524C7FB1DE079842482990DF58F72321D9201F8979EAB159B2679C9E95AA6D53456C0DF75C2B4316D1E2309216882854253A1FA60CA2C94ECE013E2A8C943341E7D9E5A8464B3AD407E0AE465C3E3DD1BE60A8C3D50F831536401E776BE02A6042FC4A27AF653F0CFC4D4D013F115310788D68CAEAD3ECCCC5330587EB3C22A1459FC8E6FCCE9CDE849A5205E70C6D66D125814D698DD0EEBFEAE52CC65C5C84EEDF207379000E169D318426516AC5D1C31F2E18A65E07AE6E33FDD724B13098B3A444688389EFBBB5EEAB588742BB083B679D42FB26FF77919EAB21DE0389D9997498F967AE05AF0F4C7E177416E18C4D5E6987ED3590690AD127D872F14A8F4903A12329732A9768F82F295BEE391879293E3A97D51435A7F03ED7FBE275F102A83202DC3DE94AF4C712E9D006D182693E9632933E6EB773880CF147B922E74539E4582F79E39723B4C80E42EDCE4C08A8D02221BAE6D17734817D5B531C0D3C1AE723911F3FFF6AAC02E97FEA69E376AF4761E6451CA61FDB2F9187642EFCD63A09AAB680770C1593EEDD4FF4293BFFD6DD2C3367E85B14A654C834B6699421A",
2: "96B856A629F581D1344FEF597835FE60434625D077ECF0D95FBE1155EA0431979E5AFF544AF591A332FDAEF98AB1EDD847A73F3AF15AAEE7E9A05C9D82C59EC325EF4CF264B8ADF2A8E8BA459354CB4B415CC50BF239ADBC31B3A9C87B0843CF3B9E6D646BA43F866276B053826F3A2334CC5E2EFB9F8F195B382E75EEA63F58A06B3F82A3B5C77C1800FD9498F803E524435B321210BB84690BED0BBBE16D363B3A90656A73720E27008852FB7DACC8284411B177728D9527C560859084A395A6F11A96AD9DB6B43E00642B000ED12BFD967868EAB1108552CD4FC89FBC408ACE7678C381EC91DD000319124EB5D5EF52C4CAC9AADEE2FA045C16CE492D7F43743CA77924C78696FCBF2F9F7F36D8E623752200C6FCBBD71ABBB6877F3C5D6E6740AB0389458A6B66440858B2D383244E853646FE2714211DEA9E6196252815BB704A20BFE556AC474F8998944E0CABBBE21A6400B87BFDCF937D12B2821D59298AF4AD378F0F42BD8C41693B8D993CF37C8B478F3BB5D33AD2A9FA24AD7B8FA895FDBC04964192F7BA3FF74E0E3A435B5DFE042E3115CACF29624C0645E9C917534A2EBC1F5665E4E1B1BC56208DBCD8A27CCB6474D5D0E20CA4072C960E5ACE41BDA3770DF3B681F2B318F6F8E1CB17C2857350FB6009AED665E13B2780D79217F73FAC7A8A48048DB0FB8A8A5007CDDC9A7B2DA8257C99F1CB605A18204",
3: "E57DE19A3E4A8C122FCB1DD6584B3D2DAE364D800F9C5A9E957B38F624CBD3ACC58FA3ED070B5E44857CCB813FBC0BB83B5D157C6C562422E5963CC4DD753C45B0264F8E136A0F1774D77A543E44D51EF8C6B9408B6E3B5CEE1347A94F13ECDC94DC764976E5A50B4CB0AE7557553B47EDFE03EC2CD32EA8D125A341E1EDFC77E75330D6E7B23DC838EBCE7E5567F5B8C80C3D15E7404B4E10F0BEB0C69626A814AF91334199864FC77E0FF548DC2A6FA6A71C3C0561F2B085CC05E8512E27B9DBA60B93D114B87935776C8E9A67905C429D48BF3AB1B0A56FAFBFD5D9C8D8C8A9E5918BFF273CF5E8664FF2B90314BDBFDAD5AB8C22A0E45C104ECE75EA43FE9BDCE306A5A28AE464628163D249D8056005F1A900951808CC8620F81768153436F741667A8E271DD986C7A1E5046FCC74C7CEBBF9A1296D6CF0B2FF85BE412D87214BB368DFF462AD649D7324A117252311C664D33E4DAFBD830FBCEB6EFBDD7391D4BADA7A775FD1949D981F619655DB3C22BAC34E5AE41222905C0C7E80D6EA28471EC0468756531C09A471EDBE200472E78F1701FEE96E5769A9893C0F11E7906B064442E06E21ED8B0D70AF288690C532A2D03B373E1E0085F62F7AAA658B569C5184E3DDC40ECAA88B887118601691892F9F55E2DE79E49DFF11D434C2BA3AA6447522A7C99DC215CAD2ED0114ED62CBDAE9D315E48AE14D2014B7F8E",
4: "C0FC4C72A12023BA7093C86775DF3D2F42C7CEDE616876340BE4301361B9DC9DFF4F1DEC6A62E165927BDE4F809E969AAD085437496BB95904719820F4CA8ABBA0B84C34B06DD7E268BA10E386FA7DB9FCFCDAF2B6AFBA46A8A299153B4E11582FBA7F28F0A0F9DE41830AB33335062C57D81DC361EDFE491939100FC827F36273760043D1C35B74E36C6C4DBE1D307847D55AC07D8B212C2DBA632A86AB15BD0FAFFA43070644C7E50623195A3796AA8E8D6E4E964FA0E4488A500B9063FBBFB1204A0E33C6CF2879AC2BA7C86CAB57E3E8A497836194E65C5C39B950F1AFC3B58E850A5EC39F4190D55351D16529CD52B36DF4A2DC68EE202BB758CF19C54B0E1461D547B5D06C2F9DC09C2B15458C3140860E4C6F3FE4F417FDFCEDE00F71212EE137E6669E569A7845470CA564F85CB4772808D65D2B48D409B709BD7AC5F7E28AA804CE9DAC3ABB5A5B768C6A184B5A974E933F2C1772FF64AB26BA2D5A165744E314EFB2238AC4858A8B82723DAE8865478EAA261F35DD4D98A9C07ACB0B822AFF1AD3E739CB214CE737196FEF2DD0B0D45BAC423935670BCF71C2EC04CCB98943786173C309E75A02BB78A788A5E6F8A8F407E57B8403841A9E1FCB3A7AB80D1F6529770E52C173E2C47EDED4400D5E665E325ED845C9E8D0E66FDA16B17D61EDBB336F22688C3F0FB040A55F33B65FA9F3D45F5B22C445CBF9DEB22",
5: "EA9596357B343DFC31D5875CC0E94117A33651472E476D3892D8112EB6CB6E0151D409C5A514DCDA38A773C58F18B590EF9017B6EDF0192AB7EB29DD6E1E7E7390C13E9B10209D5775F3B066F7B2DBB7307FB44F726DD2F368A5FDBE75BA7248762E1EC7E4589DF1A353A16D6B3CAC1C9ACDB89890ED2C4F44AFEFC763DB51D102230C37E1ED0943CD6F4176B2F5C19119588911ACF81A7A29320AD579C1BFAED1A70DEE1B87037138ADE411E0BB92F5B3148DFA11F2F84CA6C019124B922837503AA9823A97E443A66378D5CB3130A7EC9B05670E85D095D5E6F603092C632E51FD9013FE7FB9F08448FD09F1219A4744CDAF82BF9C60039C8185C7E9559FCE301C6D3F46A2E514AAD44D3889C8CB4ED7439BF47019194F2644363770F8BBD0AE92B6F5F43CBBB503A885239DA636903D4C264B3FF09AB77E3FDBA7EFC63E0792B6D5183759E57D8A694CDB133B4A9E301CEEEB978050AD9A9E410091AD29E389829E2F24BE1E3B24F4540C4A6533EBA72E8AD540BAAE43A0CB82F971F3A51DD77FE9E1956E2EE7553E050A1D10B99552DDD5B68F2E2859712835BD2AD6B08881753B4833FB04740E3364D2CD4921B939393E7EA91B854FA1E5A8EE79FF0A83F111F78435481D462E0E1CBC0C921D190A435A1BA755E4B7021244FC5E3F0630F2A1F439C02AE619393E5624834B05ED7DEDE5F0AFC7A40899424E75D4EE792",
6: "90E92279CD4F60D98F6E8FCB3E9263DB60FAB146A835AAC2E96B3BE3FF07119032DEE0521C731117E90C2943B389DD6B65C5E21C34F86F5A7ADE04072DFD1479EA36528D340736B0FED4F6207BE9F6CFC971D5EA11781AC2DA25DBEEB6B903EF8BB0AC0CD2E29F94B8CB67874A7B7441045758E09EA061181A50E0AB7BCCF801554E0644780BC137436E3FB7784C182856A790D6943BB53DB40D13D6A2F7B83A5C521073883B90FB8DB1C0F954D132943C09156A09984B822079FB8FD09BC07C1D6336C7CEAE8CC3162760B9838CA6A38FD0044FDF099E416D57BF9F33A551043F34EBF9BAA90901E62D2D981065F977852072F692535DDE24EE8946387B4E5B0FEFEBD75552C1FC325A608A78079A9AC864F2F30010A3304CB16A26AF98D9BFD3B8D128541190B2BBEE275A6F53B9BC5108306985ECBB983B56E34F18B48A12AEAB88271F4F780CFDFA83E05E35C12464F4350597CCAE9B4498F5A5454DCC3218D3336763674934ADCBCB5EA52891EB240C362248226DE64899BE30735F6495E94AA61ABEF62B803C57FDD045B724ED1966B6E7DFDFCA5B36F7B0FACEDAC62DE8E10B12DFC84B1A9CEB407BDE63CDB5208ABBE5E066AAF262187E94502B1701B2CC8681CB616773DA2B7AF49443CFF528F45DD7F25959836771908C2519171CAED2BCDCFCEA46301E7D99A5AF7199155772E92BAD85F35EDB656F0999EE828",
7: "A91F570102961D62CA6CB55144AFCCEAF3910F3336DCB029CDCBA164ADA72732771B6ECD1C58E49F468A2BFD23E1B996DABABBAF5AB3A4C74926187B5833006F8BEF7F9CD0F05A2A0B9BD9073C4C3976E8660CE7BF81634CF0B31C3DDD806A6A0C15BC552B83A86789CC675A6D137BE27BC86DF68FEC5D268119EB9E965260FE1F5C56AEF60A8622CDA8C42F24CBA7F5B07A7416917277323314AFD3ECD10F74BEE7B22DC760EFA7F935FC9963411353782547FAEED32E69A4FB5756C1A73CCDFFEDE50F4B2D9B5D2ED5C59C9A52D80CD27B989B8DAA14C569E763C08FD42358CD064B2DE0526607C9536D75E1617EC80615EF5EE2314FAC29907B61B61F8696CB80B14B3A0148EEBC825C91150A08A23FC7B38B5982AA02A18BF6E91B3A1F2EEF360F682A34AB36CAFCAD556841073F219910F7BC2F07CE45E98F77F50475DF9EDFE2DC9E3D7280193D61AB5076A14887E9D9193C3B83C5773BDECA067CA1BC3D4561C3A8B4E30072A6269B529760CA1B5FE9D3DB2B5D1202CE8B18E9E2E80FAFF47108168D3C7EB3C940B1A35A1D1B968A5A9DC0686DD8336E498C240F20871600FF995B9E33169DCFCFCB58E75C94D82F843C60A7118F0D7B40064A8A4176C5158E86AF0BE4C1D5D73D1C051132A85CC0628486AFD660502A515D6353B674B1D4E61750C13E8A3AD48FE1F89F201C288A8F443867C2BAC23C706EE7A2D2C",
8: "C6E00978E351164532EEA256ECBE0D4F8FCE02A276BD19666DE93936F7A242FC4C7E879791314B043ABF1D5F9B0036ED22AA92028C800C4D62BD6640431170EA77311865074D670AF2847AA47CB94584A793FA82F51574BD7C62BF14386F14A3D7DBD129FDE64EAD67EB35D5E13FF214D7D163B770D4A77A62D02D88C0FCF3FA5EC306EB7F85539105FA2CE5F53D182E58FBBC1C57CFBCD2D2F7FC8A067D6FA0BC834DAB8F370B0971BF6D068CD4D3A32C11C6598DEBBAEA046528C5EF762828CC84D003847069FA18743A809A004431E83924B8FDF0AC78699B905ACCFF82E83FDAFEC8648DF64042FC9438B261B73F0541498ACAD67D702AB631BECEF8680D33CE8F4F0CE29B95132591A350DD68B36734B97D4B3E84A76497F702312F2A8370DCF26A7C3C8EB91DD8699C48F551750712683E0397083714A6CAC3457C0FA70BB3A036C6E0BEF24E6B20BA5565B351C2EFD56BD9455FF7728BE07A097208E73DE4CD0CB4E215B4642365123CDEA419B28459D50E864B762554E7C1D7CAF73DA7D40EDEF5D824A2FE1A6CA473B07370932A8A5D441DEE3C9A60DB68E27A9D3E9C8229B44E5B434C6D18A8CADB6D17BC4614DEBEAD670C73132CE2F999C8716D1098C69277E8ECAC546EF8002E5182E25F31A354DF112E97F8733DD20893B430CD7130E69ED4A0FE4D6C2E4FA479001E42EBC9F36E5DFD3E0BE35A64B89745E",
9: "821BBB3FB91E50253A9E71AC379ED57AEF394C2CC59587B2D0337CE74002EEAD17AB5D504BCA68BDAE9061C3DBAE2985EBE292B9BEC9D3542015225F44ED3C2C3FFB036A515BF33DA1690F3438FD225A5034106C5F4BCC43301EEC2245D73F63038E2A7D9B8CF95A9FD813FFA071FFDE423E0CE737969578BEB909764A8D6DAA9E15A4FA0867831652C0F6E9AAA39A63F0AEEF62A433476CC7380460ECFB8B7F3B2FE8C4C42A3EF1CDB808FC9747FB4F044B3B47A4EDFCC9463ABB72C55399B2F79EE5FEDA270D6358B27F8466969DE4A5F2E6A5F2C4CF0813C09F468DC97FC0E5DD057A8A0355767B698F8A79BF0350C4200413A15E6591DE70A1B502E19FF515C3DF36935974A4764895B9E3CA2626BD39B7ADB780AAF7E2E914E804CA923089A51F3876649C73CA3C2623A8C95D11EF4B3F941E9772EBA1F47212C666F03F01509FF699F74EDE27182B6E98AF49D1BAACB41A328A8C34D6E8AA3553DA3962B27B041495F269328B6BFB4A385CBB118953F3F009920EC4C8590003290DD60AC89177BB8C4BF753CE723AECA392B8D9E5E9E4113DD062F294A77B6EA9A0477E697C04C787CE78A92C704409D37D37B6B392128698D0D8D4CA101EB38B92F467F0D86EFD8759A14162CAB55F8C457E82392790A5BDDC8DD2663944F880C95EC02FE5363B064623994EE5D4396C0E44DE2A3D225830BA6160270BCD110A942B0",
10: "92A0DEABA9875D4AFAF99A24C1D5F10EBBE6DEF9CAE5B0C85B2A0417C1CC5D1A5F71CD8F8A4B013C3F012C0A19EE4A23106CAB8662C5A2A93A971D0B6E487FC05BAF5C355A9520C9148584CFED3EDD0F38696E161E64378C831C586D9178A0CE289A67F33AE68C02A3CD138FA09DF1CAD01EFADFC8BF6F5407B79B18D09C82804736752D08A1FE09EB35F544E9F797EA36DB493BA947AA82513EB1615A356B5AA4308B0B4183E070EB494D628159D2D4BC3CB110AB0CCB2E9E73B5B7EB567187621E72D99F1FB78565917B28464A5F29DD8D6F98B6ED703040A44B0ACD97F15049E009E8533FDB0B6DB2F2582E6BBF81D7B0EADC8F402508F6B8531AD13FD1C55978A8A70DF4E053DD475132D348AE27581370EC14A3E0F96E0D70DA4946DEEC0760011404FDC5B436CA7419D05895F5E0EAEEBC88C74947733BE9919F18CE702887A6C4DF7C19279B82FB646090822DA9CD9C7653F6B931A337A28F7A4A01DE0CC0744F22961045F8EF8D4B30B07E5EDF5FA944EDCFB9841A9088AE82444FCB6E90B0E9C567A80E8C42EC713D78132F37AD1D2592C31C93D2EAEFF38AD94E5C0D94F949F47B88B03BC1EA4E5EC9C7D9DF19ED208B8E44FFDEB0B625F633C7DB1C826AA9E1C1309E5B14A0DDDB79714DFDCB52221CEAD7E8A140DF7806F127156478AFBEE922B8ECF322D66B48BEC434299BBB36B3BD9030467B7F2EBBDF358",
11: "AFA7FBAC93326D0C36A388831B99DF4D527BCE7C9070F7B46B5FFCDEB07384801AE5F86A89934DE23DFE2C1AD117797D4FA1BBA6175823B41166DBE9D126F17B3761E2C352AB396A5A9CCEA42A5E9EA1BE3497C0A5BA9121DB97F64159AAC78E62D7DEFF3BF4CF73F8CFBE045C9D39E41D5D208DCC4B47CA27E900C3CD8FD1408DC5E0F5114F2FE65817D37CD1452C4967ACAA2119FB8D60E5E2FD8A820D0AADD88B94D40435C095568AE6394D3B97C835BA868A83083316C49C75D36EFDD85165BE74A4F2B2D21295EBCE085D9C4A4758FDD9CF71B97FDF34B7B63A5E9691DBDAB834D87D5B52CA9A53032FFE821398616EA92625C2DB633E37911987083A3B49A86FC562FB1264A75643A5FB6E97162E16ACCE353227FE61A859E094C2359BC4645946AD12AE5C39C70F59EA7B597A9B3372C23AA578146781A61163C92816627DD9C4BF1788087821F9F5D41B75A0F251B06BBD3E29ABD41E72A1D48323D24E2AD6F11C2D49678CC04FCF6B0EFD33BE6DDCD444F5CA02FE158112631F782CA7B0C5F3607ED807495BF8E82C5EA51A922FE28C8168D9844859E7A3EE3038C5D1D4BB4B13406C340894DF46406836739E31D01082BC84489592DA0E985630CEC40702A36DDC301B3AE1E8101786FEDBF752F9E175287C239C18FC25795BCB479DEF59C58C373313C02A1BC5F16355E2B50EFB58855670868728B902653ED80",
12: "943CAEB680AA3E630755DF32F406F403D7AF5E48A710274D3887A7AAC8EA6744B889F2E0CD2033DEC0B434A9591254A0AA68C5C9BF11D35765E86B437497D84E5DCBBC0C0C580CE9BC50EC6382AD74DB02C2C233B7BB07517D48056226C505ABF2DD244F6BBAA23313D570558B065E42327768078EFDB53DC465DA038E3B216D990EE951B3E13D3C1CD559998F77BCDCD2B9522B6F1DC5E12C912EAEF574AFD69C251F9B2532501AB9F4B3B2223D0F8920BD562B0D358A14AB0D196DF6337D1C96CDB47AFEC6F81DED4B5773864DA32FCCD06B9AC53C122B2C6327E6E5EFE227DE4893FF15BBB2257FAEA836E99676EE32BF6FC14D4F56EA191B8A3870374A0867C49EB0015D1C6D07B87A36BFDD1DCEF20EA7B80D997CBE2D83EB5630F2EE6F73B0D50700C89E4F32438F5541360683DF11DA6E7A3C1E7DB2A87800D9245BF04278C990A8DC9CD86DEF39CBC6D4BC00FF13BBE132F9D86681A8913BE787CFC69C35304824788716D52DC74CEA399E06DE6241780447C74DA8E947134D8B2FAA9648D6D5F34C9D60AE5973B5BB0187796D589C8FDDD7675671F28C04AC1038D09251980683CB712F694D7C5B0D5B1DE86CD10EAC4EA04A55BA8803D78249BEF516D38067890105A23212E72879FA267A8B4F0455A81F17CFD3E5DDC55E5D4FE00F83E18626C676DAF00E6AAFCC23D209DEE0B0FC6C2AE4DE161D13017ADB5D8",
13: "E5E70E7837D094416558C044D758383EDF5755C80921218ABE76E51FB93249E211A38FE6D07A7DFD2263E6E3D8DA0F921A06A606B804DE7AC3FD097E5F96EFCC0F544D623FD6F43FB88CEA7C341E901CD47A7E24AB141E998FE41CA87CD6CE8C1870D9ABB6503BF7E8B659084BAF2237DFC94F35C9884C7F44B87120BFCB298696E613C1656AC4899781A94869EC603B4D38665337CA8593AAC83AD8BECE10302E4B4694237E96CCD3AD9CD5F8EC039A1D1A4210716371404C5C3FF375CB3A33559B1C1A239F2E442C8EB033501BB290434BE73489F716965393989422CF4D57E5B4F3C76AF3C5E8999E61805134B9D7C40BFB59D0D0FD30F98567E66D6148D6AA64F74A22C50AE49D6B1ECC6BB5A002ABF38FF2E2436766B86BDDE7D95DD6E02AB0FF06E7BC22CEC98D55AA2BC4D7B91C36B2FF9F525A74423498D548318509320FCCBCA582A6C2996AF6538422FF0DF060C0BC7356B0850A139AC3914338127B786F4BC58CEB6064DA881376A147DFF53C6700BD13316A5874A75D7B9713DF54FBB393BAFAAD7F7B0710C049A0B6A8B76A9956BF6185BA39D9C347D179FBB97D4FED68F47DB5AC8E0D40122EA51C4A1F88D23153DF651A180C2AD456ABD7F851B65B220A72BA48FAD0436332E4EE7EDC554B7D75481EE05C3D3453D760E9099DD27B324DD84C0C0C4DEC4C674D25284B16410F959FBD09D9DF09CE875601E",
14: "BFDBC82ACB4FBCD5A90C5967EB2FED597A02607F426002128AF4B38942C85AF4472B3CBF3B183F240E049B251713740A31117F108936631FD0F11C5F79325BD6677A2C2B242965AEFC147D93358730AA782491209CBE600976F56030753CC979C240A196647CD9EAB1DD0380E59BC7905EF740C3411AD9DD72027D0D3DD6DEB0F5F3C18F6D6F7BC59B758E7E262937B4599B38567C147ED2689BA2CF23736CAF55B6925827E2B70E47D3813C94C85298BD6B49C97B5D0221BE9E3164B6FA3D95AECF53AF170966090F19A69E75F188BD2556B4E8FA7DC4AC6C34F54297C06C2A96DD1C45B42E6175B5E8784568F7FEF0B6C124C5019CB577B374941E8515CCFC21F46D188BDD2C2284C688879A5BEC50CCB97FAEE1F75580577498D509D3DE161BE216C873B29E178CE17DCACC5E9E2224D05ECC842FBEAB82A75AAA20769FD81131CFB69D5E35409273CA106FFB27F63FF997CB500F161F6DD3A8BFA5719F004EC17860152D3290951678A131E4F3D3AB34CFFCAB2967ED9D8F1BB987950306BD28751D2AEAB05F071B08574EFCA01E5386E04F727BF413A8279E9392EFB64D9AEE00877C76C81EBC861E2B484A2D35E592A131726CAE61BC010B954721A82C968CC6F384D9BBB99B4E87846D10B94EE31F64846A5834DF73A67A267B894B1C06242D750F15F3E1E850A11CB2E2B16155008F91493AB3BC77CF9BE56F9DB20",
15: "D64F3D1CB54CDB9143D9E701BD313779C09DA064D9A85674CCB53B0C5B4446C122098961D5EFFD6A85537486D5EB26B5E18FFBFB8E6EF16C2DD2C02EC7C07DB15CE33015A636E225F744C963BF0653A89A48F1AF04819E273A3AE1F5538AD574D553C5A0DEF47B552957037BCA921970C76DDEF74BA083ED55363760A6780612C075964B083B4F674EA0012FD1DF09F0445CE75A698852098206868AD8241E3B319FA8D2D86DE6E7631DF1AEB571F9676323E0627307F6D8F569536A758DE5EDAAEDF80F4335E3AFCAD07F70AAD5CD08CCA1E71B84D4D97931F924AC0010C0811972ACAA414B89FFF7917E653BB31E9CDFC72595066C662CDB9BBC96152D46BF4E8C15A8D34809C4B9D79871BDF0B63FA294F2D667624F6E0210CD40C92F1C033C3D8BF089EF85C4F571CA727C71B23128A9B0FFD70CEA93C316FC4D69D79B089107F292E03425B2552AF5AA18FDB9AF86EA1972B66B1276B09119437E4DFB8F8E3972D91A93816EBD7D8D715CB47EFA742938B0B49FA27A291B0DEA1DF0B8F878332103F45A99936896181E51FF65C6995F57C2C54B8002DEFF54B0EB3131EE7D61030C33B5502C49CF398FEC4B7615D16FCEA3E8EA12BFB311D426331A06606CA5A066707C4AF8D1048F1CA6065FBE506D06C6C00D5D250E227265551867A6816F05155FCBDE24D4AD115BDA98AFE08B12A1F32E7C2ADA801FFB78BA05726",
16: "9D6AD9889EA02FC9A58949290975DB0F512EB37C8156CC9F1242B9E45F22CC1D6ED1CBCB6CB245811CE729261641FDF7A8F389BAFD7311B8BD689E02409F6E8C5202F466349EA466E5398B29C8CB126D9600D89697A07A6900FE8D95951903DAA3419839C2D9E35E9F4EABC04C9006EA585F544C7163A33D7E78DE28256B7B8978FE018CB529F7F79BBF66DC4F0DECE80AE3C2CD479D78C4480E4DE2F06C70E5FEBDFB4ECAEDC2E7BD891AD6C91A7C2446F1B13B340B7160782F6CC5B45F9787CF1B0985202DDF02EC552A6DC41325FD8D31A4316C13C56F7157134F66E1D103CC3AA7EB951C92094EB4409E6E7BC494434FAD80999D46D824A5A57390599052025F7DA4838F7D16A8DACDAFA06D175546FADD1E3F7975265230F6C01B9C1FB1B7AB1F2FDD43A5778E3C88FBEA70575CA26D94D249670E4D9FF28EC67D15829776D7BC6754D2A2BB01554E5FF0C3FAD8A1CB546E8AD5E5314103D086D14ABD30EA95DDC591C13D96C1CC3F60FD18D216B67181B6324AC09A97C0C45E50EE8380ED42F6E0430639373E7760C708248EE7D74830E9594114879748883F247D056B2BA94A0FC54CECF6F5C6AB4DCB7CFC8C224F40D886427504233DDBEDCE160DEFDFFD69EE2B75746D9CF71676DC453FD01C315ACA96373ED387B040BDEBA7FF3CE00D915F90AE6E1796971F8052160154E8986913AD7BA291188EC49A60BE27C",
17: "B5184F7D580935ACFF18201CE8B5D54CD0A1CACF102FBC8AADF391C4CA5807BAEEF4E5E47F7459E74485E48E0C42D27CADE6970714FD97C08F9592FDD387C859FC12C1CCCFC3EBF510D66FBD8C448C25A322CC5887F94A55D48ECA362C690F24833C3B032A047D12BDA2ADC6824A1F6EA9320BED27968E9CFBDEC60D041EF538F1740C0519003FAA89CD4224293167E05344998FD396EEF618E8F547990BC06A8B76D0FD6FAC13284601AB7191CEB813C46C45CE7B3FC09EDF08DAFE136BFBDD63E6CE7E4BCBB16C5DA68AC71A1298FD27363349A261C2F2CA8CB799E8604ADF70092BDBD6A04CB80568776A537AD1711891B251C74E42FCB095B23EEF70F167E8B4856BB7F92E3A43C79FF4437262DD70BAF9B16CBF5F10D1AD7559AB0F8CEE1B9FAD058E84FCC342D9F0D9FBE4207D40E281416506242CA1B8DAB28DE88D2D00BA21AA7FDDC25940CB29F02811F8DC6850A6A87D72CA9F3476A73649FB4A254B1204CC1261E7D512BFE7B0D0091AD5CB0FBBB765FB5AFDFAB0D701941DA54832FE8253BC0CF61924BCA2CA231A196C7C32A350AC9A5FA2884D8571FEEEDB7D29632E71898BB62B5E4E0104F73AA6A9C6B8CDA816872805D75ECA64F961641077B259C9D39E2F3CCD9FCFB1E6B6E2692EA34336A967E587F32E49B961B91311198A204D11874B4BEBC6C04DDB5B82D5B741D3CEDC03A56A2017B3D2C4FBBD4",
18: "CFDD6B78AEB21CDCD6AF8C349F6DF8FF8B96BC8246A672A16E45B5D0AB7D992570EC45A534B77F204039FE200D4C5E7C78FE24941F578097B216177D8AD4E1844B2E52D843256D0BE8504CF2D5B639E2CD501A6FE39B8AA7DB7DEA924B38692E43195DB7E5F25E25152DF0FB7E0D4EF63F99CD95F699E16576702B651C29583645070011B2A1F88C947BAE7C94D48EB07A132DB38D4FE2B77EEAFB31AFB442710BD0AE4E6102DA69A454517B6F148D97DBFBAC7305979B5D74D7D7568A0CA56CA89F23D8330261025CC741F9D7A4BDB356B544C68C89CCC2C125F5C71E18C4EA102343AE4A44F6FC695810E6F28C86BF53F4C8B8AAE46DF6006B1679EBEA790266D4D02A2095074ADA634EE60C7070285C316E1F191BC5A88B80D673F144D65B870A65FC93D8B4BB29B80FD58F9FE95F5994878308CAC5394781E4D5A3F5EA2A8ED834EE5BD31D2058C843F22EB778C4C25144193DAA65F9B57AEC4A344713E9EDF913F3CD29196B42E71BB182AC3B1A60AFDBF1112A86A20BFC1D28D3E0DBBABF38E8F12651C207C951654FE8C4CECB6C6F93EC46456DAFFD7320DEC8D08F2F712CEB4D82407D61CC47B333F69310C06EE1FB5ED84F83945F05D4A87CF5A68D78B5536880DE3443E804040E599BC5837E22150C93CC1E5E711F9B889C78C6FF882D80857EF41ABC5F12E99105E6C894EC0B796E0A645780341CBD039E8C6EE",
19: "ABA759AE16B9D8778FAC203FADF48015331D6499B8CD74BD71ABEBD3E53ED90625E3057EA47BE587600F308D38743A686EF6FA189A4D86E4A35EB798FD2307345FBD10FA701265F6417603365FCC4CE7635924428167115BA372294C27A23CE6C27C506603C5A6618A2B3344BAC50AB7FDC29D36BCBDFCE0D48D088EFD8EA1DE492C543093C30AB7694627C01B334CE3368AEB4BB3267EBB1096450BDFC2571977D7EF78D6E288FCE0388A041838EC2031248F5FD659C70180634A1DC7196C8D9111C75B51C50F854CEC63DEBF9FFE1AB9406735EC3187276DE7CA2FAD4287027956C93B8E84B7C0C3A9C3F7E82B3DB35EB6D2CEBDFE0708FEDD764C839954F2CC9044B652D0A01D28BD6B9D3DD9740CAE39AA52597FFC1227FAD8B78EAFFC31BE94A632A1AA7A60AA5A9E090DA2B62F6DBDFDC50DF6EBE1D9949619FE9B2302248D6C801DD2D6C01FF8206A93C0AD22C6990C4EECA7D4BDF36C3246A5D2D2B3982C608E6AD6BDD85C92682EBDC9E4117F8B7F841239C2A5AD7977E11E4E9CA73A55859EADF7C9C2F1B28A6B4AC7202019230063331FC5586756CEA1F8478173A0A4964D00C1AC099590152125A4D01592C54DC2555E1BA34C7AC039394D1979AEA2BF7B2B2A8CB9D62E89132CE9E3B325F023AC6E8117CE57AD4B271EFB0C172FBFF8FA6A17A490B67CA7B15F865A8AEEF37651A622390E82AFD418C7AFD48",
20: "CEA29601B96AD3A831646922000BBFF02C014A9136D9A151A0E61A51F9FC2EC0C3A8F4C83E64BDE569A33B4CD653C1345B7CBEA3B3AC0411B6145727B1DBF6066ABCE9DAA8B0DE58ADC2510C02C2619A542A139FA3EF7A03AD3467345D9573C107A13E7FCD43C0D51DB5EC1A09D409DA75462F9C71F0C9E36C2742C279C910F07CFC5CF7F98AD48D67232A2DF29A66B78209557357A4BC91922D4195DA9533CD3501F388AF6EE2BB3AD08BC7D53015059988F5B9BF7824D066DCBDC61CA588DCCF0EBDE4A96632DBA22CA0D770C61A1DD66EDA882D02C5FA284798E12296E89C45906D315EFDBA816FD869DF869A65DD8BA4E0B13C441EEB052EF3D0FD436E4AC68EFC749E0CF4C7E15599D5514E136ABD134BA638A02E9EC1FE66CC9ACBCE5082C8734196BADC21F4DA7621D9FA725362C411127836A26CB44CB3851D53C599B94A5E67862665D7092C43D9B4AD3FE20B8AFACCEDE920F440F3BF5552CFAFAD04A7D7E0A9CEA18D497282D44778FB7D5072832C0B77C4C51F4DCFD7AC07DC7A9863DB8A38F1C003CB852F6119BE801AD12B8BC7393B00640F125C734447DB2FD8B02F7F7FC7A23B84FB80F9CC08E3EF888634FFB6F51ECEE9B20A89941FBF2B49314DBDD67CB7A1B5BD8D629FA327AF2CBB47B5419A0A8CB807D30152FA560690DBAC49D6B043D5BC9D51E82C3B1CF4ED69E997050C65197F3D93E21CBE91E",
21: "D358BFC8C6AD1DC94E71D1F5D05589424275875AF8CDA2ABCC6404D6FCB7A2E0A74C68024827E02621C10CD5FB149FBA373AE32DFFF275CF386C3D7A04E3FE10B6F1A6F4782B4823242F29672E847CCE760BA005D6852A3459E7576A254B10A9A78A9F8112BEA39BA65898CFED1179D68211D98E6950ED06399E39433ACD898E2F6C87F5FB9D99518EF36429D447B0EF0C5B7D834ACFA388578BDF60D4B1FB5A0CEE7D1D613BB9B99E36DC9636E70A543BA6BF0B3A448DBDF80469494239D4B7C4979D82E80C08EF36EA67560C86665D458040CE31BA009BCDC30CCBAC50259E4485E570F190613CB010563F6BD24C2F1CF73F6A6844AB8350D23BBC3D1361E73DCE94AF83697BB817BA366C9855A754EFC2F007D99A964125682E6F5CF7FBBF687D221B5A0FD844477A2F87D5370F4469F76073A93AEF7812275FD4F70B2040C12A83ADE5E5D862684D119DCA0F75AE2B56C794968A68566291B731579A1055A84F083B3072B7BD5AC9D520F64F0829B592875613BDD81C11622B331289C98501B01EE1D813C0E97CF36878260F80BF88071D258B9DE02F3F90B4C12BB56CBC731550B5EFDE6D97A1283EEFE61CD6E5DF312D0F0153A32DD65B143EC6A3F2B64E2B8FFB47EAE46BD92A6EB9ACBDD11A2D730D027A3EDEADBA5965198FD59BBC8574B680B96AD48586E5B17625251BF4374E28C6AB956C6818183FDC119499E",
22: "FE69433233B6067B0EACF1F47BD3AAD9783FA30F684110D1152459233896479D08A976B853E4B7B52A34511239961048B7C1B9009095327C86F2EA291FAC1734ED2596EF19D04528F3D8F2A3430A0C19DA6A70A37DB6DC034BA0053B57ACB9E7C00ED9BD6AC11339EA169D9D54E6739B051AF40EE79A1034D6294261E1AFFCD61B9CA5016C56B2D1172D9B2A7283E4EE0A06C8149E5A2DAA263A5D2429C2B1FCE75C41887DD02E056EF8724645FEC6FE7FC1EF180529B1E894773CF3E2E1D938EFE9CD824D91454116797F5A84746537FED5F0EBF0583C8508EA0745B4989954EBC4F215BE3D515687BCDD5DFDAB9814358B07038E0CB869A8C34F916FC67773191679C60A15A0A399E224D0B0168439386C0AEE8F5EF77185AC847A66D934CB0ED6A3467C3B386BA7F115877F36B49E111DE49E409468F343A98974F4EF1EEEDD282F73013EC2727518DB46C6751A58AE3E0D5F9D2B966D4465BC5595BC31B2712AE1E1BF9915CC0E02CA7240EBB9A045F959E77DFCDADAB6248D58B47BBEF3C775DEFD629A2EED15201A21ADCA470B1AD3084924FABCDAB6B12FA6201E2A239AE8F1BCD7CC39FEC62587E58C84AAC15935D45261E3AFEB60016AFA0902DB98DCFE586513FF70EF4E3F47773635D475754A158FACC9C470921FB0186BD6EEDEFCBEE9C803118851F82CACBF8C0A544B0562E2E27286CEA5FBAF83AA5C1F97A",
23: "C7386F9FF39FDDBFEB223AD8B856EA2E7F3AFEDE197A61F183FF7DF2FD6DE208E71E6E1063FB3774B696913524F7488EFC2CA54E8B653EF5BCB7A8F4994E312DCEE99A316C2ABF3FDF85B8FA9BBD4366ABBD7B3D3D433C14710A95EBB3D0FCDA2D37A443D62A8361DA78ACA781CEC04542D01DE7B6C6D14CDD4EA709264251D46C42AAF404094286DA5BFF8E81FA2F8C54B172821054F4CED82287F29EA3D3AA798C9CF5C5A909B9FBA641A8D9E310248B0F9A1375CE4DAA98EB62286B4EF4DFC58B877A73D017B17AFD7F1F58D3D2CAD3B7AF2F06699B08B88FB4EB70D2511190158BB4928ED1735C94400980144EF9ED06E06074E2F29325C1AA316A46E8E617B3CE916CFCF05A389052DE120498341EE26A27A3D757AAE763046B8CBC841350292F06AFF97C9707CE5561F5C119E2FF6C137094F62573EB80DC13862797C3319158DDD465FBC033CAD81BFBBBB54D9467599D751B9980A9AE8BFC6715C5EA74859E6A10DB369D5DF83A92655A9A5908228B33B36F55DE563005B886EB324CEC4160F0D18938E9FE41D39234C29E13B814DDCD13CA6450774800924B0848735C5DE076F66EDC973FC83B13938811CD9887371470AC5DD985481185F1191EA8C1D3A7DC65E1E82E2318D0FF0C9AF65EA1515DDC536C5A8BD0AF481789838DA54A39BA56D014E12242600AC78D28ADAC3FFD3600E896445868064D1D2ACF22E",
24: "BF5202D3599D2DDAAE5F526B6B6AC469D4BA0D0BA5D79B1DB89173320F0EB68F5D9DA495AA0981F8022426F68519B548B19B5F8CF068A6CA1442AF77C83B7D8649DC281BF438F9576F7A719A902A860B9ECE9AE9C14B98859B282010A5DC90DCE612AFEFD44E0E9E7666A461AE50C2656BC036648B826CA9C3C7C53B30976335B097C19390716A41FD437A2098BCFA2B2975F1EAE5BDBB8192024C20136D2542FD89FB8F2F94C08F765109279BC4E511787496233F15F52D7C3BC3E98A6DC39AFA1818B9533EDE72FDAF021E2C9B7D6C74E49B849F372B1A131F4C532DBE3B63635E0E1334C87DDB6F3D73883D2B43E87CF19E40D6B404E581E807E6EC1A94F5261C7F7EFD4CF043C90A1A7E97465022ABAA1DC21588FD285E7158FD9B67EC5FE7C9E84029E961E045EB5227E4726154F4F057FA337BB20DDA25D11632A7995B810764084EBDE01AF07372EA82FBAFE0434401FCFE05CE8FE3C20C01ACF4E9B8EAF4D50C73D5C42A95526CDC8313DBCA6ECEACB457D9673565A1CC0AAE23FD6261A8943E8FB84CCEC676601A4B302A9CACDEC8998EDC847A53B3CB0E12C8B4A7897D5680CB14A3D11BDBF4826C3938EBEEFA0075B6494CC714D3C0DDA2F5F783CF23AD2D2545C899867C1115BF4A4F559F63E68098955550BFA1EF7771598EF86A08C0C634B291674BB77615121BF0838DA96D6E7C53BFE6A58A382FD9721CC",
25: "BF8903A3918B3FDC06CAB4EF675F7BE3962CD7E3C6ED643386EE533C3B24A3D94D2EA2CFB83F0A346FF2875DB07BA647492D47A807E7FD9717CF12BC97B3C1BE1361E598850B39D50CF7BE700507863BC4BBF26620FAC11D97128049BD96C5E09DC8FF3F62655D660FE66D31AB0B0F6D4F8420E3D2E633C571D7FE2AF1CB4E3BEE95E092B00EFD2796A3DEF376F75B7EFCBB141337D81AE52939D87956C41B1E42C1CCA4317D31AB4F53DC9502A3DC774E05E1ED5008CD931DDDB98DFA69960A6ACD45B60895C4FBA2BDAE8BC7DB8C821697558B1E0A3111F156738409FD180C5A4A33B24C5EE4991B84133CE9AC089724D62DA9D9827A2A04FC103652F216A0895E78A96086270814C2699F475CEFD6359428D8C505BBE8C1A96D2793802219144CA6B3EDB455929B39A3E9F3AB74D685608CE3F301FE38202ADFEF529CCFF46AF36DC24956A7CD07CEBA55AA4C89F7913A8A4B844FD8F152C8A823CB9888E3BFEA97D7E4AAFA07125DA4F51D974A5DAFF0045BCE5B868177A91BD932963451EE2673A85AA8B7D493BDF25BCC2F64AEC3150D8C40C835AB4F5D0B7F259DF099BD6FA9F5CB198B61018B1448035CCD34E7E7A2138F437490026050BBE3CE2D4CF4F4F095CB97548E5731A338CB3903519D6B13A029727F047A7D00904A556C883745410360FC878F77707A716D549ACD6A70A18F9EE0AA8A6EE2080608E10AC",
26: "F58CDE0EFE2356F429B0F2F9A7869A4142A6173188DD75B570F1D1ECD282E4AFBAD11370C5B4CCF3C98535D27D73C0111F11A84711F732441EAECAB684F2F0D7FD4FC4070749574922A906E84B3350CDE5957DC388FDA23BF45F05951A393DA253EAF691940897B57ACE655E9630F09856E76958D6BF7B830E0CB8182AE226F39D48036C867BEFA7E7ADBAD17C1AB45297C757DA4AFFBAE677B05677D60DE1D975A4F3D7EB3461B424B67B61025AAC257A69FF720CB9DAC007C50C69A7ACDBBCE210BAD4DC2E629A039D98E7EA037A5C344B5CAEDCDA035F28677A41D55A0E3E6E480CCB12B8F17062A983F4E651B4F7CB217FD06BE46747CD5418C0C81916465A4F5660152B3E4781DA8040D4246F9BC47366BF663CF9DA3BB247D9238873CCDC6FC62D1D8F669EFBA42527112FF4072262F7E65AEAC328871DDF47588A0A0DD13A4139F4145822A5917F624B881BFC354F37B6D59C566823F629A21C973324F7167BC39FBD2C121D2A849308D13DA1A28948EB59F7DE97E364223E17A30119BBC7F43E21E7DC3093F7505055ADAB4654194A77C1CCB61898840125455A275A8F071273D8C13934915D379CC603657D99CE4075C1F1DCAB60B6BD62ABA1A10B5402A59706798002EF30ADED2F354E38CE0B57900FDAD31E7F684E53D097B4313DB552EA66F6D337F29594470D3DC0BC6CD361831251004DD3C5357BC0BECFE",
27: "D9086F7C272AA317C64C00AF43C924DB5DAC97F8EE3ED2296252FC4756FCE6928BB009D4488B9BAB757411BBA52BA6F61AF1181CC7BBA94257593FA1BD26D52AD5014C3F1A1832FC4F7445C8BBB77C8FD31C88F0C5D4736D49DCDFBEEF2B8301E31185793BFF87CFD9E6F7E084D343AB98BA3518A87A5F915BC0D76B01AF7DC1CE45F1C5280BD39D3E3D94D0A0286F8BD9FA942849664E08F2BE0B93C6E3B89061193FADA0FA9485F62CA87F3E68E204186EF1187642D651162E4D8E7DA049F462362D8C94539CAAD09AE4768C96ED6C2CAB8025EBB6901CBB26865E1F19FA1B193D47ECE390B881233578950175C85B928582D5B439EEF2F56A8C7EA09278E47741051223AC182456C4FA04D025BDB33FA10C48C70EC91BC709E3CB0FA3E01DCE5FE5ECB9018130A8DE5D0583EDD68EA2EF227A612748B2F785A30A01014BD479DEC6256C8AD88470F79DE0E1432CAE448DD7049E5B7D4DF3C978F65E708CA3759AAB9D329C11FAD71204E1E92322E3EA1BBDD9D034E2A23ACAFA21CF490AA5E2E419197DBE990667BCF277ED61B264632F694392EF52F0A27C38E478257AEC8D2542938BF0713EBE60779C95A0EEC8F32A5202A849CEE8CE0F99702F595AEA839531D4CFB5F5A6166B06EB64387552A1F9BC6BB97B9B99D19C3D2E1E8E9B305D525E7413496E40FF50CF77D4D4E2D41B1D5929848FB2F1FDDA5A39DEA0546",
28: "AE4E3B30560A50DA55AB3E59FFF512844A2700D2D763D85D5C3FD8CFEFACD4D023BD926D3EF2E55EB1B3831F2276EB07E5C07B44FD7D79333699BED0804B678915FE0F092DA9A62F69CB020DA21932F9FDF9AF332E1B400C6B7E7880508E840D62FBA07E827A23A2575AE68E15AC444A1CE35DF3C3F7CA49DEF2966DF3BA89C8E90ED5E2421A6407F2EC51A3E92A3608FCBD6AD9FF9E5C7817E79A0C09FE9014F7AC291448263E4346CBC4BAA6EABFB59B4526B654070084F52B864F9769181DC6EA91B576956397CE55CCDDBE41F94E5DC366E775C86ADB1C807B66D08696A2BEE45B90E8736469A371F05929D9D9FD34980DE08E00BDE2CD0EAB6AF2165D76519F8F2D894AC70740D2372B37407BDA4D943EDF1CBD35CCE4D81340CC97751C568731C009DF65571F28B7F58106AE67279E83C3A0C130DE0C5B6C99117099548661D290C4CAF3BC60EF719E2F7B210FCD4381C33904AFDF96DC3A6557B42B6EE895B4D604F5F8985F454C51E32B2C874E90926CBC58D044D483D6D2A7C26C7AC4D190531F79993D07B2E830FEB99BFDB00AE8C008DB1B762F3F4A81D41295FDDA37F3056B1110D4F0CF385F9FCC7E14C34F6752A2FB17F5CD3FC4AF0D51E4A0AF7D28DB0D4D651156189209480054F8287266B1CB26C9E8CACAA0BE5A69C696300025D160F9DA29F9EC79838941459B7B8164AAD95577A0C532EC2EDB35250",
29: "9CF0CC00B5788DD743A5F33D87E8FA5733B72EDBCD61AA4B8D0B81213DB52E7EF17AE90934F5EC0711ADD19E881CC330F696179C1BA464FFE6D7B04EEC383A4106BE5892C5DD1BD719AB3739A909A384FACA455E6AF96600AC6FF809788700DD2AB93DD228483759BD903EC002D4C1278808B764F018E3B740EFD821A61F5BEA2948A653041FB31F6D5D0DE0A045DA366E44112C820FD7FA966B2CCFD5A6816AF84DC0A3EEB8F9D2F0A912586F91D50B1AE3D930A680A8FB7435B6875ED2E599B87598A7C20245296C4965E2E0CF372B6ED1219BA68CB646D3E73D52665AAF2E3D1C4DE8D264578299B166FA0E148281C877FA9B14818759CBF7FF575307E80B73933599D94EAD2FB1C08A30006330BF0AC1F1C0A4EE6B07F9F3381AD7E2E469E8DA9C2D22CFC0A208B58924D2F994AFC0268EFE206E0A9EB79BB51CA26FB49013B9A17017E0C08F9FFC6C319BB1B5AE41771443BC670EEB91D7769F9890A9B80F52CB0167EAAF850FAF2A52B74ABB1792E7CEFF68C0D38B01F244AC0CC0EF0731E3BDDCDAB89DF376973A7ED5D4264EE82C334671FCD39ECD6E2CF869493914F332767BBE461707166A9164776D29F5EC9291F505AF291254D7319AA594B5F397D5BDF00BB840C4DDCB425F4325ED8AB77E57BECA3441B89414616671692EA88A89D2690A4B5FE958F990BD84A3884A60FADD5DA57EDF01865F85829195460",
30: "B85B6E754CC8F6805A8A19DA104418D9C134C8B0DBCFD5DAAF5A71BC047A73BEDBC192A453674BC624959BB76E44C5B34244D4736ED3F0F3C9658FEC0DA5437E01E128795EDD7593D636CD73FC1780B37A381502633CCF2EFDA0BBB494C1D0FC7F602DF8C282F55E3828E81A92458EB16B74835040D8A9C8F2DDF180A617B0592344B4373E1B526C9706B843B0CED4D25D7324C6FDD0F33133C00443638E6249061C56A116CEC7822F4512AFAEE52CE8F94D8547F72612EA8C7D160C65FA3BCC92BE01493706EC4E5F203F0BF85C52F417BAF8AF490E50133505685CE63AC5B173E07D8DABB2D439C6DC18B41B9CF37D02C92AB5C2F27EC83AB6B2DDCB7ABCEA30A95BBC39E9FD0CBB28118823F7D0342F1EB7B45FA6BB3A50223D0D7B14E975E7658352BC9288B48AF1346955F4551F2ECA47D423EFC63D20681057E5EF234D061A5E6E234ED01F3DF223A0E8B4DEDDC552C7DC3ECF663D5011FC907EB4A7CF746AB9E07C2929B7427DFE9E00B0A1308881912635A72EA99927F343EBAD32436A9B8EB1934AC29E79BB80AB3ED9F5CE39D1E43C251564654365DA43FB8A0FBA27F2328D82445A1EAAED67B92716147E859064AC326A42DC7880DE82FA782AFFF9C59FBDCE088746F8CEDBA288BC8C2C4B458782CC9BE63A86168B671BE99A09F2217B7BB2A7BC88651C1BCE8A0B89316ABFE72B22722273AF570974D8EDEE4",
31: "DD40DD438251E401FC926CC6968393415D52D521A5BB34D4272D6BC7B5431062B35112CA709C0680CBB18EEE053AAD62B2391C9E9D580562541A453ED936CE8E88DFA61A88CA3BEE66CFFF801785CCE863ED9C36A04D2DC8742A81CA55127B44314AB4E687ED921B4881CB363AFB3CCE7EB774E3205D4591939ED7D3C0C508A31786421F49669E120F01D35D467B40F85F2454F13F591F3B830937421B5C8A6C20EA878971AEC941FD99CEA92FEE00E5DC2264987DBC549EFF3E4A26AF0CAD7421C4256D107A3E8908F67450960E4E41FD7E2E84F754BAC81C8F5F1D6F650DEB3E6EFF6059836643209E3880D7BDA701869208D8E4BC8D0614066414DB3F93D6EA187950285F55BB7A1B026EA4BFCAB4671B07704828D5CBF9730EFC99E68E91F1FE9664DFA73297F2D6BD9497DE04982C9FF3730BB6FC3EA2053B3F45DC7FB587BA19B3C6B7E780EA5F25B45BB727174D4CD3B401FE1906360BF0B15DB13B62752F82EC62226AABC83C1C26376F8366BB849DDB65958AD969B25654DEF1841518993033AF47EABEE3CAAA936F19E28A205F3CDDB5CAC649DB6A90483ACB63A24EA46D397508EEB5DA94E9C883EB0451D036E28CC303D52B1BB31FFF582605F340D449508959ED1FE2FF0BD22FDF77F9680D6B5647D59E7E6A003AF0C6A95092F0DE43D1252EA6DE00F288BCCE3ED9CE273DCB4F3BA7E8D17353B8ECA24F03A",
32: "FE38B1ACA366B4C15F3FDD4DF0E0274FBEFDA0042BB203A4F6627ED9E29F405379B2F2DDC0F3B02A0CA70A9499F3CE82B87603FAA347B7052CB5D13D9DE84C114EF3B8F62418FB1F3E374B997127667FD6BCA2E2F9DBC04ECA9D908CD37C62F08EEA6F44B3FDC149465AA8037D65A6C8B9B8B3D5E9A40578E5EA3AE1209BA49E5E2AC615C59A2D71AC1605B98E39A5E66A890754C7D1C07E06DE78632587BADAF7FAAB0A529AB791095DB0A708B691E9D81F2CEA8F07B05495528B9FD56F77A4C8209DB972FAADD9791BA59F47C06F241F50C0619FC04F8456339E0AF331310FA4DCCBEA0E5DC2795CA6B3ADD0174AE4B30AC0428320ACEAFF68F73ED11DC1BC9F0237BDC75F7F48BE518EB3305CF2BB898B329716FC9ECF7E99B510B3309808735FD0A77B15731C233998F9ECEF46E2CAA6E6EDC8D05B943ABD17027A80D636E535038FAE44D60AAEC5406A372D62479192FA84D844520C6774CC589FEE16A3A5549495D968AABAABFE4DB94F5AE0C54E603D6DA5C3056769A064890533EA8EA1E5D1CD410CC8DD4B1D7E0F5F787232439AA4B3911C5DC792ECB873E8105A1AA61C627BE57E809C6863073E1E19AD8B987DE97D88A817FB43ADBB7751E36D1F0E7B70B3759D6EA8F2350D10AF38C331E22703B2B5103C908E1D35A8E814E45BAE81DCA0530FC3525CD640548245C259738E749E195B006081A18C45475F906",
33: "961408BD52EA1C6A9F340D9109B2388CC358BBA2D35BB6AB672A9C16F3AF968BE4613BA6B13D115B896BE71CFEC4A4AFC0BF5D2BB1B5DC19405F40E60FDF361E6CC362CDC28B75B8C30B468D3BBB77F3FC62869FBB635A3F7AEA63420CF1B80A4B3813B2240B83ECE999808E1394DA2881DE2DE62C870EC163AD8D81495DBE2C7383B78E19AC506AED9F3BD1280A77F2D9C55600BFA258E76761391145D45F74253C6E14BAE16179884F0F0EEF8150A445BE1B5AA4FEDC2198CC39763A3DB473C1CC4263CE2DD587447DDA6BD0A496E8DF60859CFCDAFED2EEC5B1E77E68F449ECF129AA17395BA39392EB610DF45134571BBCBB26162C83FFE77D188160EBCF598EF6F092881612A04BA9F4215D429BB521E737F6C3048B5D95B20AC37F79AA99A12CD0469268228463C317A1F31E63E4754890F20B4516D179342A76201402BC0DB2AD091A70B4232473343FE9E0002E59044C5F3B6E3D7368DB7E7F9F42E7A4A1942B1161552C5C84390197A54079F570B57E8EA8A30659FF5A61048142E4368D5542B968966E54DBF91D3970B9A3A278B951D6F914246FA5CCCC20DE53713D4830AA86D758ADA7A8747FCD455B2C320FF9E5E7FE1CCCFD6F7928884FC0B35F34118B4E7E6F6A5D6E3FB4E3DF90CEEBCEEF9D7D595A4C456C373C5356EF0DB0E6F8D61E413F80E4C32532BAA39170AAE2FC606B7206C379C4155031AF918",
34: "E03A069F63220109A77232F2BAF29D345737AAFED2E5E8D5C846B937277DC88392DB2D55073CE063F5FFC5717BB89B481B8C86EC01808A85ADD78517CE12DF776CD3F02D948BA795215599CFCCD9B4FE56DD681A59C71D24946225171DC18E47BFA9068302BA428929790EE62306C5FC8C10E71F6F372105C9421A563A4DA704E01FFD71E46B28C6EE1B7CDF95BCA6A794E8CFCB6BCA6AFBB67DED5C1267A12A4752176C3C1E6C2F665194C7F11C1CE6CC8481A5A966719B57124CA33D8EC9862AE5485788106562A0BEBD3980AFC4469BB1FD1653678192B3022E26CE8788C68F202D506DF098D49EBAFF7741CA96A02BD68BFC4FDBE24D34770FBBF8EAD9647C911B2E6AA705F0236301162CD2B41B88BDCFC2A79EC54698276126D6EC0213931609985224BF515AD3221FB5E8A1C4CECD51DB4AFF612157ABA6640866DC6D3602C446B8C6EAEF359BC8703D5EF9FE8EC7F5F2685C53924C6F1F71519E0AB589EEFB393A12C06B906402FE0A2FBD793272BC101D3B57F10A52C172E505B74F2A00BD5D2F7C7EA2883908434C125969A62F1B09F42E578BE22C08E88B11767D97C4C0CBD2BBD98508AFD591EB0B0C846A95FB72159E561F9D87DB446E9A19AF378B6DB4DCCB49FF8DB547B3040FFD0171B87245CF73A0747287B6FA1F1E4AE99518E8C53DBEEA942AFEF75B69E4790D75CC8A5181C609BEFAB641E28C07082",
35: "D8D7DD9242E54C6DCDB2A717A6F33226A94D5794FA0BDC401F4ED842A5CFA4AAB462F703239F684DB9B95E5101A5DC6067C7062AED9259CDB2067BC815C157A7E150F8557C3A54DDDAE94E5C569A1E09C383A062B601F920EF4D957F4BCA8E329123BD6FDC3B731361864CC139EBE3C68FED0F40FB127D9D1DC071DFA1552ABC6703494A632AA1314984A2D4B7A8BB32C555B79DEE013A66745AE15E8E5E4E129E44A119203425F2ABF1F03CE9CB33C0BDC3285ADFAAF4D7B51EC31F02D1E654BB10F0CD97EC3E389CAA34398166B4D5C9FCBC6E957FFEC9CEE4071F90EE2926FACC8FDE884CC6FF1F6EF1D3CC681FB44E45C5CF681AAD13226DFE19E22CE81265F3088D193EDB098988B2640EBD8D9D66708E1E9880DC41A72FD3D64792B14AA18C13E371CA17FB46B21DA9C59FFA2FD075852D42E2A578744792EC02F9A35F869912BBB44DCC648CE6075DBDF457A9AA891488A5450FC719778739AF323E87F9633E621B404F70614F77C65697E71F281675C843C523740AB66756E4DA784F555B5B4D797A06A0AFD35A69DEEA6E948B9B3C2A62D7B1D56832D9CCF56F2680A5A0A3037F4E0252413FB86520F2815C8824975634C0889A486963A2C874638559E14F780A7F3E2318B88B2C2010174150AFD4BECE2C5FF2D37BC2FF791F4A3136E19C373FCF03E471DFCCED19471182A93E91ED3EA68C402234AB2B00ECF62",
36: "87F0D84B811E4635AE8023B74306DE789ADDF1CABAF5F47885CF7F9A33F2C533093A339EB0D5E05C0763A215459CFF0D31CA92573EF2074CE2B6A11FDD6BAA3C6FA100D23A9AA413BAAFDA22F746CF74562F9DB0EF2F7CDF266142F177681CF2EF388E2E9AA012459ECCA332B779E48CA44E536082D59C3951ECA42F66B600D2621BF5F3584B59DC0DB76EC66ED7D00E943BFDE703E7D5050A8F263366948253B3576311CE88B463791DFA6F401ABDCFD7FA44B158AA97EF1CFD7A8802662A633EED958052DEDB12A6FB353BF2224ECC2322937DE3D39DCF82650B18FC0BC2BCB8CC456141C9F3FB09A0906AC1EB77E50E8D260041E4B3FF4BF3A53F7EB62E0FC503E8E38F185AF795F67FCE17665AEE29BD0D5521024A70B61446CA4CD2B3DC274FBE72A9AE29EF67A2217D6CC81BD8831B5160E4E81238B379B657FFB49023B040B6D504B287F684A0A0C07304E6BFA21E8D0A7629BA32F3F2BC9A33DEE2EAC1A2C22462EA0D24162543945B78FB6E26B86E12621588735B32A4F9A50157F20BDB7A4B6D151B3F28B40A03CFEB3CCA635261ADA2295B74947F1B1D676F6C014AB362C1F5AEBB439DB137D034D00591235B6F8D11C735A0C1964B29D3002D5243374628FB488A04EF245E64F598EB6ACBDE8B87F2FC5D1ACD105460C26BC6E1C7DCEC2E92E33F722E0A613A86356343EB111D0B166AFEC5C7A44B81A607A24",
37: "DDEC47E0FE3E2F4205206CD673EC66D7435E8BD4A523A8681ED77F51453B904E4468C70C2224C5F1D01A1C5ACA89BCD72673F82955FACFDDC4DA499EFD8D18A2BCC5035B0C69D095AE0EF1AFDC389B6253BD6FC83D3C37E809EE732A87D065207684332CBB9BB0519CFF51B553B3689246EC1EA42E236773A5AC4A7BCD37381615F78A41E5B181AF5C502E22FE79436D6EEC07F7FFAA739356CD9544C7DDBC42C56BB1C965441903962703986C93C6F8135A8EC42A89DCB46E754EF5B5250E1171398FD31793594831B0775F2A39E63FAD99929A0F257ADF332C078B0B7E209229ADA46D69512FF2DE930F962B8F81EEFCFDD358FEF8E4DA0D49F39C43AE9D99D8052FCD60305FF4D9FD4CAAA3FA6BF258B867E3F266F1BCC77390D02132E370AC79B34FB37F12D102985950FA5FD53D3D4783DA5B284494EBFAB51DE6CE259E27712A0EB4D78EFD6573A03D629F29166B902795FC8DF59050C9FA48584F0EB69293C7FACEA8972098E1367D183C0B91C3550852AEE6721A84341612E820CA4F53C71A51774134923FA00FAFD84811C07620917C1966F6A26A032B125D76BFA149FD66EBD18F7536285CF94A8A750680D6BE0F4A4E7F2433018796A358CE465E7507F186752EA2FFB01941902129936F7B18321902780E2C6A5EA7D3607227D6315C570815BD0E808EAE242B554824ABAF30614CC72B74E647D5B5A6306632E",
38: "DB29CD5101A3FB7A651BAB94E0CEF2BF737BBEBF0F755AA42F1C0B2E6A2E00422458CD8E2244576F139E811B2469E1B8F10FD95A988FCF872607A9B5CF81EB5875C427515D8D6D4FC58F3C69A92AB4932ED3D1CE6191E648276AD746F12A0D7B1ACE96B80CD2C3B99BD1094ED29F7020121076A6E6D5D750C39633B00F3DCB4793A27B4838C492EFCFDF94D955ED33814C954FFE8B8D29A81C3C62BB2E95ECB393F06EBAB479494628027CD02E59F1AA32F78C53142137965E662DF4B3EEB0B92A6FEBDC91F8B31D41E2EF69ACDE1CFF2AB49E2A4B12C275A20439E30C690D14D4F661C81308F11EFA3D014009D80D794D9F2C8B51E2E6C83686DBB07D1790F56D7C8EE495657BDC081A63B1353F4C4CA74CC0D02CFD7BE60E063A33D1A4D9050A9100F0E181704799357532DE152CA54FFC089E8DF80F29DFA14C19C9D6B7855482622504F95A8948A5DD640EE88B87CA98F8248DF148AE4F992BC7D1FEB3BE07E70F22CC052D92FB263337528C41ABA642C6B1C6F8883B21ACDC69196733FFB684451D188107C1DD219AB8B0AE5F49628A1CE32FDBA4B8CF02F38513E37542D020F4BAFE03EAFD7DCCAAEE9E28258322E43DD47DA3AD625DC2795ADBBBB9FE061917573936E31DE6355023F8D74500D2D032B7A53630F37FFA80567BA36F771C4ACD71D76DE528FF47E8281ABCD325E4C4620D0B73B2BE2787A7F6F485492",
39: "E8F24D8DFB54C5BF909CA2CC5496AEA76E583D2D865259C356E64E76793290BB00E7029A146E1E90DC0C45356E13EF59D60F20F080A0FEA743FC1C222AAEFA3E55876F2C9E6F7BD29C09AE32E80B15DBA0E6B594E951EAA3BFD166DFCC17AFD2621EAE6539C74FD776BB998C24C30EDB3A8B6814DE088E7E6B7CE9A64EEE9ED8C9D987A21A3BF35A9D59171DD456D8D0D7D1BEB3F37D4A31812BE00077F0F0064DA56EF9C4D36D1D3CAB4D1D4C024665BBE1DC227DD29E1CF814EA65B64DE60ECA4AB9B19F937DF15914F3CA577EB3A6A0A2C8ADDA53DE536721FFFD519FC5E7CD9A3BF4F030B1EB638D6A0F8C4E24085012D758414211585EF6E0DB8C670064885B67256BB6AD7E12E380C5E25BF58EB1731E935899C1FD2B20008CF87529AA3E714BFFA86D8B66AA94811A43ABD868A7711C4FFB339C01D72D4974ED53E7DDACCD36B5F459DDC05E9D475D3E2AE383F6FFC9C2A0D1791B4675DA843303C96A98ECA88B54735293C4E1906AF30221EE71AB58F3E38271608DA6AC0A488C0850DD86F8B6588C91589632EB781637C14D9D24B57ABFC301790B3C11C1B2938845F08F7280388E0B9D5A9682A6FD40374542634590F336E42D8FDD92F3F6C82BE4D3B953CCDAC984F6C94D8F2FF70BDFBA63C922060C322FE34188FF70A37648C362BB68B06D74A2012050FC007F276A54882A8A04DCE014655AE43E448639F56",
40: "FE35174BF6B56B67D39271D92AE0DC2B64FE31AAE8D1135BD8FD308D7E281F3ECE84784423A48C1362B5852719023F8861E861AD8B22219259E357EB9CD8505A66EF7CCEB53636B47D38A2AF5506E1FD72D3E30A29EF897C5C44A271EE562D67B279806E8A5DCD78DE538D8121CAF4C217F8A969AEA50D6FAC68066277242B1979F1A6B3051CE5B9949A11719F556EBEF844C808E5C1AFCE5E312C53AA9DDBFAF7280A7FB8502D2C7D1B91614157AE0C6C962F868D05D0463131DC841169946B732F8000E686467BD5D8086CC249693FF9794374266BD6A5C8AFEC65C66A834012365D60179450C58FAFE724B8B9E99C33900A86649B04CD54351D6C4CC7068B28F070417CC9C4430390493BEA50799FCBCD7A13BF75947C597B3D7AD486E3AF99CDD743B6230BC473DA35E6D05FDD17F7CB8D04A2B00EFBAA30946EDA96BED467A45EBA54578D9001637702F1DBAFAD16D2608C475B8DC7994DAB93FB34DF2237E4A13D0C04A6CACD42FA9463674AF8ABA97CF511A82E8E61F8330004E165D753323F4AFFE598E4D108DD8EA8EFA45693A2F9EC8335C756393585E052FE5D150A58F058CF8C3C720F37112DCBC6324877E87541F06C968C46FF846CB512EFE65CC401174BBD1C977694ED7BD1702EBD2D1EF23BB56D0D0C1EC2D90A27CC63F0A09D83F8B0A5ECC181D5D4265911BEE77EB1DCB4780B53CE74DC7A42252F3FE",
41: "8FC54C96AC76BECE7284BB32C63FCCABED194E82FBBCFBDFDC6AD09AF95DCE6A5D6E13CC06B247E38253D359A6C0EEE00A660BC3BE6FB217B9B554C90186711B1E85117DF0CA17463E132333B8469ADF72C3BC6F218A96697E172CE2E6D6F4E70EEC2A0CB48F0A2E7991B1B90A85088CD2C59E3C9AE7FD939592C14AFE9A13829DD97E345D1AF92AB46BF196DF906BBCAE16F5A58A4D99C7D586869C81E182352C210E3B1EE822F4A95DD0BB3B285632978B18FC1CC29BD58C2152FD3BE73482DE1A6A79C3929DDA1D20B6E99E25A666AB20CD371A85DD20BF0A76C5A81041EC943A89A94AA64C5207B3166441FFFBDF4AA28A2A88173F280F3A2838A98667775F597D0368DC0BC53C1AB8B7670EE23E0ECA08CB09FCA68FE2153F01FCB7061CA1B6AEDCD815D611FF71868F50855054085AA1B7CA3309581532D658C7D2D6069C3B44E0E3CDE45222DD9EB40C369762CDDAC6D9A6923FE0EA7DFA1DF73BFF8B60EFDDB8863538A38528803F3EC27E09C87A2D0E160C2480F7D2003DA331FD5C7FE05B582DC9AFB114D2AD2F822922067F1FDB3261078F33879C497035003171165FB139F79BC508AB9D3250D1B53508410A01C35B53179076D9F46C5BE1A26DEAA2F9F71F442FD7D19D34979F6ADF96BECF1124551D4806E7136551291352748DD2D58443978C3DEA5ED0544E6596769A760B476B9CE7BB09543991EADFAD8",
42: "BA63AF12FA5F7D03F714CA9AFBF19375D2A7E31EEF4A9E10C7C2F8650552A6CD22FDE0E012D46CEBC773C87729C7E746FA4C0361099D4876701C0B2A16EA2B5A6B750CDCEB573DC711F95CEF06B8DF516CA2F9BE6387550F22502E53A772436F324569B25BBACCC781D30DE25B806D369AD1EB1D1137EFACD0BE765DF4D06E177D3E3F13E9A3165F269244FE8B130E9066B23474418A5191ED481BC8974074336E71BEB02B1BC34CC620BDBC24CDA8BCEBE068416E5F5B5A263A51CB3F6589F77D4E607939F7A0FA637858B96189A014B5688A9DA32FE2ABFC31FB00401DE475BA07265B3FD1890ED0FDA487D7C616E099B4EF4E47C9F9114E6BF8CC757C92C02C46546130ECF7CD44160F55A72831692A5E69146086C43A5F043BD1184965E2A6B154BD7B8BFB3B4B28C081B0F349900492C703913885428A82A8D2EF1240414CC0B6D56D969BA1A121D9D61584C6A880D533AB58B94C85F07732B5EBEE7FC87FF3AA279ED703858957A1C1501D71DC5420C24BB2D570F7589F0AC5B6EAA87AF68442FBB38EF693D2200E73DC73F58CFCBBE43FFCBF76DB4241FC4A7B131F29C8BC0F77C95843D2FD2DE39E3D4D44237DB39994380C5F1A1A3AFD927F6B736D585112920296B90CE31D58BCAAF8CCBCBF15BB36199B48F00F308F7E8264D039D6DAA6E848CECDD1544323C9654232F45689076B7C4B1123AE0FF152124E930",
43: "F2CE3D09E701F60716A413AF06E14E6FB7BBC29DCCD8273A083B00D429B57EBE9FD5270EC0C299D243A9FF0FF73DE19977EC16C2772C6B7FF88F83481924C921F49EEE41121DC79B7C23295EFA50292B11C7D45D66BCFADC3C3A390E03E13CCCD51309BD4061782273CE5D0E1BD3E11271A3C52654D2B846B5600D3E68D2C234807BBC32F1A350839500DEB6D387AECBFF5344606CD04BE3614262F068FBD256B2855EABD5A7C0AB5C28D5719C844F2A6FFC500628CDBCE869D2F090E588B57B796029A84E2319FB6E59960553B62BD30DE99AA7ACF508168EEED8DAB9996041BDD78BAA2A1CAAD2EF366F4A3244DCAA3F06EC9A24572CC80C3CF44B557CDCF6279165414066D6B700DAEA8D361034D9DE455A55E55CBE39898B65651FE709506D1A1FF67585D0D3E1B2C5EB599704B3925CEFDA45C33D92C9ED9F0C45B7A80706E6629594F66A0F74A4A767493B73B23AF323D519B0D05EEC62C6123A0829CF612DEDFB7F275A55F1629DC64D2F77125A1BDFD7B9C213C51DD9FFD83DEF42E87AD4E0F9E5ABBAA6B120E131F9E5A3097F7EC766539C733522FEB0F02604613CA1190024CC1C3297E7CDC3C514F8C3787943EE8CE457B516677437B2212A19629A0A04B0DAB8258A17B28F52BCE915A680152225304301FD2BB1BA2A557B3E3CFE6553CABBF79A47FEC31DA590308D156C3537B97116224F3E21EA3841A505E",
44: "AB2F844287488AB6B0F47218F2A4C54E6BE79A80F1209CD747FD88A575EBB11F2F0756E2C263E753D9388876E159EF3BBF99448D0865173572F4B6A03FB72244334E4E861E0495AAEF15276379F8DD88AF9313096805D5596446B48EF1F1BF2ABEDDABCB1FFC98ED408846D85732F807196C9CC9B283EA4E0D78681C73CB6AF89E5C361476F84E979814F30C0AB03E97156015A493E091EC5D854E8B08328581D80091564C25D6A714407D6B591F17D376953F18308B8BC12EAB5B5C9FC11AFEF293109785CCE29C978955CAE2601A1DBAA274B72CC18CF27FE077A0123258A1B879E1ABF84458BB10652CF7310278C7FA11BDC5D00DB65E5D6A13D77FBF9D02DD0574DC7AE3849C7C47820126FC99B9766920516AD1D63E7EAA366005714D6C1695E731E88D43A38B615407A99B32A101C4D417D5E36B886C26EA1CBA9FAB0C040468781897145489121EE47BB2FBD6A064CB325AF5CECE5D5A4BD590C70C7A9B068D318247D8FE16A3609C94A8D431E9872E26A3CDB19DD971971AE1FBEB2FB76F7A71AFC5815CF7871BB5EFE3BD0C49BAC441ACD9CE1DA4AFBDB972659D13775AE77F843259EED57A62CEA93FC449EEE1EFFD9AA2D36D8FDBC06A95A6AC9B067468F1D813F2DB95D3456063D5B23395824385D5C68D3F869C4F639962D599AEB26FE2D5148A51579EF385D31E744450E11307F0C803C70ECCA93F831EBFC",
45: "9B1066F98BA2299266D8C1351E6945D7EAA658BCEA46D5AB353C2CE48D01E915E740DC90E9CC487126AF9FA722A335FA1A8D11C2719F07CCA23B1DC5E5B9AB198077177BAE0B5F35C14666FF32926AE0239F21811921C77FFB56F7B218A353F8EA67448F61ECA318196B1186514D27416EE2F61764940A70300B69EBD02719191ACAD969B1075EDD09EAE26F883AC99DED1AE9355C2F30CC656934C0175E824E76947F83D791CE7A2960FEF547CD3BA94CA064659D1F68C0DA8C13AFACC07F3D5A653F259DD141E2ECE9B6D25F67CD38AF3F802CCE332049DCDAE1504061C2437C7D3BF4E15D88185D4114E96BFF7EC673A7AF8AFD53C979C89FEFACDDF873686D892DEF5FB67877734CD981C16684320AF392A464C7216FC5C8B22BC29C8430151FACC94AA1513A08E1FF2F6A965F68E368245510B975595EFDA4E80B7FAE432FCE9737962974905F367FF637121237B2A404C42A5A9318DC5ED9CFBBC2EA9C17A6CA37BBD98F8771CFA7EF58B1EEE40BA2D6C2031EBDE4AF0590AAE8FD10B2BC02A254C97726521E4D4D4D9FFB74FA5D06F8AA0BD43631FAAFFCBF01FCA87C9360C5E6A62A4B025B894E2C30F9003D29B642335DB0A3F2A3436A917B61776F96292A06A6B4DACE4F6F0EE0232E1C206427A0386075BF50D6074006D45E3A3CAB085431032AC20D2690F0435D257D3E2C93FAE49D75FF0F320CB4D348C6288",
46: "8A7F8C99EA79E1B2118E3B203F72FD8921BAA5C336B006ED66AC7181575D66ADE21D00FC8C7DCCA7DC9E430D1D086F2922487B5B025AD38750B4F60993368D829CC361B600ABA990E570370AFFF9BF171610F2A0B0B93A0A3AC54244073A0E816DD691BF1B0041BDC165125A14C621E01C8F069C3E05F48FB77E66A998C27A87FEAF07B5912B303A98AE5796F1B5D4E2EF52F38E68F0EC5786C19ED93612D7152BCF0CA1A3044898F9347FCA8370CE6E4A0510750DF6A42C1C35FDBD91BD2A26A60FB229CC35FC45525D12A092505D901A4F9E1E8D42D25C41B8FDBA13AFED8BC5566D4F3BA13A779D7BAAC1E25B6710D7718B73A641F23E1D22CBDED3B7BC3AF7D92B2DA1CF874D908CD8590C80F1D5055CB8228AD964DE099A4D037202C65D197A35D8A268D8A3BFFFAB39B93615DA295A09AB979AF925D895CB60B5DC5580055BC4347F0DDCB1090DAEF46C8ADD1009A5126362B0B4F1FFDDAB4A00AD8290EB84F76AA345DD73FFF7733035026E3CC1D99428CA68ADEB8CBE98E4A630F99F4F33E8AB66895AE7435D2E84EAC95CF19E9B440373EC0B4D4B2CFAD672C7FE1FE8CE5F34F55B016F8B115FAD6CC7B53DA7555E67672FBF6BDE324AA09FA18F13D9FF1041A12B04F30304774B07659F397554E66CA589D9D9F1F262CF659F718CA7F443B8331BDEA8C3980045562D909EA44E917FF5D47812A0390139B2A87D0",
47: "80B35D641CF6EEF705D51DAACE1EA5EE92057FD497B0D937C7CE9A546BB32DE580F8E434D644F191798A518785BFB9471AAA5D03700CB0B7B2635A6A14750F03DA4FCCB1B363C254A5294DD8E7943E4CDDA318C4B92B57B14EB0F8EB250686CE129BBB2B18EBE7FF53C9DA9C0B664C527A3C69D905CC6FAAE8BEBA7D83294C1656DA4B8308968EE49DDDC2D0C71A17B02053027D7DB8F4E77E3AD1C80FFDBD37938876B671D80E99F5F1C7BAEF50B7E05CC0CAB8979A3A2A852A7018673292CDBCECAABC74B839FD3C084682357A5414E431C1F25E34850FBC779285854FC6AFDCFB7B7749E0DFFF5F93A8AC146C873B407F2CD6CCB461312AE35DEE8D6D51347B0824156DDD60762807A5C132C0667FBBCA7489058C47A156B2A50CA5C24B894C1EE7C44B87179176905B7657A8E95AF7F2EA6C6D2A12384CAC9E6D14253DC5C31BB8FDD2462581C109D2DAF72238E4B63F436DFD7DD5571548D2206BCB8B837D8CE8C9C3B3066E46A1655E3D84AF42DE1051DDDEF438821E0F0C1EDBFE148356D707036B269C19C4CEF4C4BB4048364E2A3886E38B42EFA15F22CC8F92D802EAF3FFAF9BF45247DB76C03E99F662884DC2A29EE488023BF0EBD46539DAC307410846B25280349106CBAADFD658A066C3664E35C4C696726140275AF7611D2369ADB8258EBD2279DD24DBFE002377B9A3C1B120890A6FDF20597417F88F15C",
48: "ED6621A6BAAF64578BD988D08FA8D3D2873C87C38AD7EB38C0922FF3F7E59F0A8DE00744698939D42D459E6105ADD3C77E9FC4911F572B3856C9AF6C15ACE704F026C6B2FC8BDCD9D84022DC10C47E11E934BEB7E1DCE8B6A2BAECD384D810BC3AE587E7EDE57CDEA908DDB020885624BE042DE0ACCDE1511CE38AFB6C9DD812FEC339B8137D88108F07035A3869CDCFCB9402CF96B9E331297B644DB13DFA88F60605E067B9F35607D2D75573E0913F8080EF603AC4B7133D836B84F32F48534237CC559ACABA53A96885D297CBB572BAB1C3275F7CC7A3AAD10F29E727B28BC29B038F4003F8C93FEDC88C63D72609A5330F36F4986CD9536812676A89305C6CE58D4C49C088B5E273F2AF77A8D31D1B8574B9DF02CAD4930D7A7F76C067E7160ECD1A845FC6A3B508708C279A2C94F29108FCE34A63E5D5554FB530522394BCA00508DDAE6039ED9F47609781CC60F6E211BDF29E6F44ECC873DD24F1D37E49D7BB7D6279FA7B9D08B5FA8F9364EF6D4C8D129A5A97A8BBF17DBD5E64C4A31426881687BB79285B09290DE61AE40C295CB1BF3C8AF756E88068A777ACCE3C4B6E78C62AC1E8EAEBEED0AF3153983214D7459AA8E254633B52E5C0ABF4647B906AC50A62543710EE92B335A7162B540AC70F2B2F49FEA892BD72DBFC5F7A35C3AE56636AF2887BE680FA63768C27040E888202F700007DE648482F5307C56",
49: "914BDB196CD56E3B7D7D3F1D7A5E4B0A1389578F111449DC2DF643E6E29F688227C3C07033C2A3818342B229F63C229FAC11EE1AB6F0FCE8608E03B46DC983318DF15FD8DBF2970EB342BE2E534BB0455BE58290A48FC60973553E94C4CB53566CE0250D9FCF055936523A8ABFC9287DB9DDEC54710859DF62829D2B6A100358EB64E6219451868D6BBC2AE4DCEA0C0E338B26B748D4A1A34AC16233046CB7D346D0D79A3CCDD4CDCB435B9B3075AEBEDB4C0F18C5DC006F5C208D882308510C75E729D08C779CA99D5A685E78D5628094AD137BAA635B7FC0F492C48A9CDBE63209C8231455012EB3E830B5B2A79ACD8FEA8016243EBC85BF5D6F46A48FE013D2B3B789BC5F743200BCDE03995BB2B6A640CFB099788E380B4E01D75409A8D8B3887DF2B1CD34960091653EEA6C52EDD745B9363BFFF666891D9C8BF511C3C07D38F49DA2892DCCEC81E1722F6EACB3214E3335C93E6141AB94E5EC31BABF8108F6BEBC3E60B1BFE37579B4D5DC8B77A347940CC1F6BFB5B46097B1EEEC4C354159BB3475E05FAB6BDE5672014D9489CB70DDF537F7209BB9EBF1FC6B8B94564AAAD5ADDD83CE6E51EFCF73DC6080D738C4FF1CBC87ED420A0B92FA459AD7BE58789F0A191D149F88173184A22874DF6D39DC1BCD4413648B178ECB03F8358547A68DE7B672BE9BA1FFC8BA392F8A58ED2806155C00F86B7669BEE4220D420",
50: "97051FC67ACA30E8AEE73D3A8CF38BB13524D4E0EBD9BE68398C7C16227CABB1D0B0A0ABE7B6384ABA02905BA0C3C7363599D059C7B4C99DB165CD14FA12FA7912449CA7DD5E346D8010C85A757382270DAD15BA3CE36A76EF55F81A1E80BF366B37FE3A88EC722028C25E234E624040450A99CD808F942568AA7133981D72E7F2928894670AD5399482DF1B90E7E64062F830B736C79C30F36281495C76699CD48404673FA334F042F9E0E67DD7F3853BF71ABEAF6A9A5546855E840CE42B224D8F6490C6CE5FC02EBAF4FFC390107058F54CD635D4A7F2878099C1EF495750E6921BE2F39AD808C4210F287319F811A254CEF8CF153FC50AB2F3D694A530949E5F578D075DB96DDCF2BB90ED3DE09D9CA8E08662FD8982741DE1CE0A6B64C3D3D5004B5C04B2B0DFD976A20FACC94D1762D41EE03B40D2CF367612812EF4CC41D1BFE9CEB51AE3A22AF1BE7B85A057D3048D0E73FA0FDAF1119EFD76F0A41BE63128B22D64A5553E9549D411483BBCA1483EF30CF6A6D317AD2C7973EFA6D4C1121F703D2F48FCDA3177AD450D75D2A28D2C244AEA13F0E60AEED8ACBAB444D400DF5E280DB799B2D9A984DF1E2567D39D1DE58EF78CA6B4D8BC172B07DCB02D156CA96EEFAC69E556CFCE0AAB617C7FBB8C34871C1D35E74B7BD307D3F2E424C7A9AD676A1A69E0FE735EA50887A1DFAE6CA2FE4460FC7EF323ADE493020"
}
|
import numpy as np
import pandas as pd
from prettytable import PrettyTable
from recourse.helper_functions import parse_classifier_args
from scipy.stats import gaussian_kde as kde
from scipy.interpolate import interp1d
# todo: replace percentiles with scikit-learn API
# todo: get_feasible_values/get_flip_actions should include an option to also include all observed values
#### Internal Classes ####
class _BoundElement(object):
"""
immutable object to store lower and upper bounds for a single feature
object is kept immutable and reproduced in order to not store values
"""
_valid_variable_types = {int, float}
_valid_bound_types = {'absolute', 'percentile'}
_valid_bound_codes = {'a': 'absolute', 'p': 'percentile'}
def __init__(self, bound_type = 'absolute', lb = None, ub = None, values = None, variable_type = None):
"""
:param bound_type: `absolute` / `a` (default) or `percentile` / `p`
:param lb: value of lower bound (numeric);
set as min(values) by default;
must be within [0.0, 100.0] if bound_type is `percentile`
:param ub: value of upper bound (numeric);
set as max(values) by default;
must be within [0.0, 100.0] if bound_type is `percentile`
:param values: observed values for variable;
required if `bound_type` is `percentile`;
used to validate bounds if `bound_type` = `absolute`
:param variable_type: the data type of the dimension this bound is being used for. Must be in
{int, float}
"""
# set bound type
assert isinstance(bound_type, str)
if bound_type in self._valid_bound_codes:
bound_type = self._valid_bound_codes[bound_type]
else:
assert bound_type in self._valid_bound_types
self._bound_type = str(bound_type)
# set variable type
if variable_type is None:
assert values is not None
variable_type = _determine_variable_type(values)
else:
assert variable_type in self._valid_variable_types
self._variable_type = variable_type
if bound_type == 'percentile':
assert values is not None
values = np.array(values).flatten()
assert isinstance(lb, (float, int, bool, np.ndarray))
assert isinstance(ub, (float, int, bool, np.ndarray))
assert 0.0 <= lb <= 100.0
assert 0.0 <= ub <= 100.0
self._qlb = lb
self._qub = ub
lb = np.percentile(values, lb)
ub = np.percentile(values, ub)
if bound_type == 'absolute':
if lb is None:
assert values is not None
lb = np.min(values)
else:
assert isinstance(lb, (float, int, bool)) or (isinstance(lb, np.ndarray) and len(lb) == 1)
if ub is None:
assert values is not None
ub = np.max(values)
else:
assert isinstance(ub, (float, int, bool)) or (isinstance(ub, np.ndarray) and len(ub) == 1)
self._qlb = 0.0
self._qub = 100.0
if values is not None:
assert np.less_equal(lb, np.min(values))
assert np.greater_equal(ub, np.max(values))
if variable_type == int:
lb = np.floor(lb)
ub = np.ceil(ub)
# set lower bound and upper bound
assert np.less_equal(lb, ub)
self._lb = float(lb)
self._ub = float(ub)
@property
def bound_type(self):
return str(self._bound_type)
@property
def lb(self):
""" value of the lower bound """
return float(self._lb)
@property
def ub(self):
""" value of the lower bound """
return float(self._ub)
@property
def qlb(self):
""" value of the lower bound (as a percentile) """
return float(self._qlb)
@property
def qub(self):
""" value of the upper bound bound (as a percentile) """
return float(self._qub)
def __repr__(self):
return "(%r, %r, %r)" % (self._lb, self._ub, self._bound_type)
class _ActionElement(object):
"""
Internal class to store the properties of actions in each dimension. Used by ActionSet.
"""
_default_check_flag = False
_valid_step_types = {'relative', 'absolute'}
_valid_variable_types = {int, float}
def __init__(self, name, values, bounds = None, variable_type = None, mutable = True, step_type = 'relative', step_direction = 0, step_size = 0.01):
"""
Represent and manipulate feasible actions for a single feature
:param name: name of the variable (at least 1 character)
:param values: values of the variable (must be non-empty, non-nan, finite)
:param bounds: bounds (must be a tuple of the form (lb, ub) or (lb, ub, bound_type) where bound_type is a valid bound type
:param variable_type: 'int' / 'float' / set to None (default) to determine automatically from values
:param step_direction: +1 or -1 or variable can only increase or decrease
:param immutable: boolean to represent whether or not the variable can be changed (True by default)
"""
# set name (immutable)
assert isinstance(name, str), 'name must be string (or unicode)'
assert len(name) >= 1, 'name must have at least 1 character'
self._name = str(name) # store defensive copy
# set values (immutable)
values = np.sort(np.copy(values).flatten())
assert len(values) >= 1, 'must have at least 1 value'
assert np.all(np.isfinite(values)), 'values must be finite'
self._values = values
# set variable type
self.variable_type = variable_type
# flip direction
self._flip_direction = float('nan')
self.mutable = mutable
# set bounds
self.bounds = bounds
# step related properties
self.step_type = step_type
self.step_size = step_size
self.step_direction = step_direction
self._grid = np.array([])
self.update_grid()
# interpolation
self._interpolator = None
assert self._check_rep()
def _check_rep(self, check_flag = True):
"""
:return: True if all representation invariants are true
"""
if check_flag:
assert self.lb <= self.ub, 'lb must be <= ub'
g = self._grid
assert len(g) == len(np.unique(g)), 'grid is not unique'
assert np.all(np.isfinite(g)), 'grid contains elements that are nan or inf'
assert np.all(g[:-1] <= g[1:]), 'grid is not sorted'
return True
def __len__(self):
return len(self._grid)
def __repr__(self):
return '%r: (%r, %r)' % (self._name, self._bounds.lb, self._bounds.ub)
#### core properties ####
@property
def name(self):
""":return: name of the variable"""
return self._name
@property
def values(self):
""":return: array containing observed values for this variable."""
return np.copy(self._values)
@property
def mutable(self):
""":return: True iff variable can be changed."""
return self._mutable
@mutable.setter
def mutable(self, flag):
assert np.isin(flag, (False, True)), 'actionable must be boolean'
self._mutable = bool(flag)
@property
def actionable(self):
if not self.aligned:
return self.mutable
if not self.mutable:
return False
# if mutable, then check that directions OK
sd = np.sign(self._step_direction)
fd = np.sign(self._flip_direction)
conflict = (fd == 0) or (fd * sd == -1)
return not conflict
@property
def variable_type(self):
""":return: True iff variable can be changed."""
return self._variable_type
@variable_type.setter
def variable_type(self, variable_type):
""":return: True iff variable can be changed."""
if variable_type is None:
self._variable_type = _determine_variable_type(self._values, self._name)
else:
assert variable_type in self._valid_variable_types
self._variable_type = variable_type
@property
def size(self):
""":return: # of points in action grid """
# defined in addition to __len__ so that we can access len using ActionSet.__getattr__
return len(self._grid)
#### bounds ####
@property
def bounds(self):
return self._bounds
@bounds.setter
def bounds(self, b):
if isinstance(b, (list, tuple)):
if len(b) == 2:
b = _BoundElement(values = self._values, lb = b[0], ub = b[1])
elif len(b) == 3:
b = _BoundElement(values = self._values, lb = b[0], ub = b[1], bound_type = b[2])
elif b is None:
b = _BoundElement(values = self._values)
assert isinstance(b, _BoundElement), 'bounds must be a list/tuple of the form (lb, ub) or (lb, ub, bound_type)'
self._bounds = b
@property
def lb(self):
return self._bounds.lb
@lb.setter
def lb(self, value):
b = self._bounds
if b.bound_type == 'percentile':
b_new = _BoundElement(bound_type = 'percentile', lb = value, ub = b.qub, values = self._values)
else:
b_new = _BoundElement(bound_type = b.bound_type, lb = value, ub = b.ub, values = self._values)
self._bounds = b_new
@property
def ub(self):
return self._bounds.ub
@ub.setter
def ub(self, value):
b = self._bounds
if b.bound_type == 'percentile':
b_new = _BoundElement(bound_type = 'percentile', lb = b.qlb, ub = value, values = self._values)
else:
b_new = _BoundElement(bound_type = b.bound_type, lb = b.lb, ub = value, values = self._values)
self._bounds = b_new
@property
def bound_type(self):
return self._bounds.bound_type
@bound_type.setter
def bound_type(self):
b = self._bounds
if b.bound_type == 'percentile':
b_new = _BoundElement(bound_type = 'percentile', lb = b.qlb, ub = b.qub, values = self._values)
else:
b_new = _BoundElement(bound_type = b.bound_type, lb = b.lb, ub = b.ub, values = self._values)
self._bounds = b_new
#### grid directions ####
@property
def step_type(self):
return self._step_type
@step_type.setter
def step_type(self, step_type):
assert isinstance(step_type, str), '`step_type` must be str'
assert step_type in self._valid_step_types, '`step_type` is %r (must be %r)' % (step_type, self._valid_step_types)
self._step_type = str(step_type)
@property
def step_direction(self):
return self._step_direction
@step_direction.setter
def step_direction(self, step_direction):
assert np.isfinite(step_direction), "step_direction must be finite"
self._step_direction = np.sign(step_direction)
@property
def step_size(self):
return self._step_size
@step_size.setter
def step_size(self, s):
assert isinstance(s, (float, int, bool, np.ndarray))
assert np.greater(s, 0.0)
if self._step_type == 'relative':
assert np.less_equal(s, 1.0)
self._step_size = float(s)
@property
def grid(self):
return np.array(self._grid)
def update_grid(self):
"""Generate grid of feasible values"""
# end points
start = self.lb
stop = self.ub
step = self.step_size
if self._variable_type == int:
start = np.floor(self.lb)
stop = np.ceil(self.ub)
if self.step_type == 'relative':
step = np.multiply(step, stop - start)
if self._variable_type == int:
step = np.ceil(step)
# generate grid
try:
grid = np.arange(start, stop + step, step)
except Exception:
ipsh()
# cast grid
if self._variable_type == int:
grid = grid.astype('int')
self._grid = grid
#### kde and percentile computation ###
@property
def interpolator(self):
if self._interpolator is None:
self.update_interpolator()
return self._interpolator
def update_interpolator(self, left_buffer = 1e-6, right_buffer = 1e-6):
# check buffers
left_buffer = float(left_buffer)
right_buffer = float(right_buffer)
assert 0.0 <= left_buffer < 1.0
assert 0.0 <= right_buffer < 1.0
assert left_buffer + right_buffer < 1.0
# build kde estimator using observed values
kde_estimator = kde(self._values)
# build the CDF over the grid
pdf = kde_estimator(self._grid)
cdf_raw = np.cumsum(pdf)
total = cdf_raw[-1] + left_buffer + right_buffer
cdf = (left_buffer + cdf_raw) / total
self._interpolator = interp1d(x = self._grid, y = cdf, copy = False, fill_value = (left_buffer, 1.0 - right_buffer), bounds_error = False, assume_sorted = True)
def percentile(self, x):
return self.interpolator(x)
#### coefficient-related direction ####
@property
def aligned(self):
return not np.isnan(self._flip_direction)
@property
def flip_direction(self):
if self.aligned:
return int(self._flip_direction)
else:
return float('nan')
@flip_direction.setter
def flip_direction(self, flip_direction):
assert np.isfinite(flip_direction), "flip_direction must be finite"
self._flip_direction = int(np.sign(flip_direction))
#### methods ####
def feasible_values(self, x, return_actions = True, return_percentiles = False):
"""
returns an array of feasible values or actions for this feature from a specific point x
array of feasible values will always include x (or an action = 0.0)
:param x: point
:param return_actions: if False,the array of values will contain new feasible points x_new
if True, the array of values will contain the changes between x to x_new (a = x_new - x);
:param return_percentiles: if True, then percentiles of all new points will also be included
:return:
"""
assert np.isfinite(x), 'x must be finite.'
assert return_actions is False or self.aligned, 'cannot determine feasible_actions before ActionSet is aligned'
if self.mutable:
x_new = self.grid
if self._step_direction > 0:
x_new = np.extract(np.greater_equal(x_new, x), x_new)
elif self._step_direction < 0:
x_new = np.extract(np.less_equal(x_new, x), x_new)
# by default step_direction = 0 so
if not x in x_new: # include current point
x_new = np.insert(x_new, np.searchsorted(x_new, x), x)
else:
x_new = np.array([x])
if return_actions:
if self.actionable:
# flip-direction must be 1 or -1
if self._flip_direction > 0:
x_new = np.extract(np.greater_equal(x_new, x), x_new)
else:
x_new = np.extract(np.less_equal(x_new, x), x_new)
vals = x_new - x
else:
vals = x_new
if return_percentiles:
return vals, self.percentile(x_new)
else:
return vals
class _ActionSlice(object):
"""
Internal class to set ActionElement properties from slices of an ActionSet
Using this class we can support commands like:
a = ActionSet(...)
a[1:2].ub = 2
"""
def __init__(self, action_elements):
self._indices = {e.name: j for j, e in enumerate(action_elements)}
self._elements = {e.name: e for e in action_elements}
def __getattr__(self, name):
if name in ('_indices', '_elements'):
object.__getattr__(self, name)
else:
return [getattr(self._elements[n], name) for n, j in self._indices.items()]
def __setattr__(self, name, value):
if name in ('_indices', '_elements'):
object.__setattr__(self, name, value)
else:
assert hasattr(_ActionElement, name)
attr_values = _expand_values(value, len(self._indices))
for n, j in self._indices.items():
setattr(self._elements[n], name, attr_values[j])
#### Wrapper Class #####
class ActionSet(object):
_default_print_flag = True
_default_check_flag = True
_default_bounds = (1, 99, 'percentile')
_default_step_type = 'relative'
def __init__(self, X, names = None, **kwargs):
"""
Container of ActionElement for each variable in a dataset.
Requirements:
:param df pandas.DataFrame containing features as columns and samples as rows (must contain at least 1 row and 1 column)
or
:param X: numpy matrix containing features as columns and samples as rows (must contain at least 1 row and 1 column)
:param names: list of strings containing variable names when X is array-like
# optional keyword arguments
:param custom_bounds: dictionary of custom bounds
:param default_bounds: tuple containing information for default bounds
- (lb, ub, type) where type = 'percentile' or 'absolute';
- (lb, ub) if type is omitted, it is assumed to be 'absolute'
:param default_step_type:
:param print_flag: set to True to print a table with the ActionSet as _repr_
:param check_flag: set to True to check for internal errors
"""
assert isinstance(X, (pd.DataFrame, np.ndarray)), '`X` must be pandas.DataFrame or numpy.ndarray'
if isinstance(X, pd.DataFrame):
names = X.columns.tolist()
X = X.values
# validate names
assert isinstance(names, list), '`names` must be a list'
assert all([isinstance(n, str) for n in names]), '`names` must be a list of strings'
assert len(names) >= 1, '`names` must contain at least 1 element'
assert all([len(n) > 0 for n in names]), 'elements of `names` must have at least 1 character'
assert len(names) == len(set(names)), 'elements of `names` must be distinct'
# validate X
xdim = X.shape
assert len(xdim) == 2, '`values` must be a matrix'
assert xdim[0] >= 1, '`values` must have at least 1 row'
assert xdim[1] == len(names), '`values` must contain len(`names`) = %d columns' % len(names)
assert np.array_equal(X, X + 0.0), 'values must be numeric'
# parse key word arguments
custom_bounds = kwargs.get('custom_bounds', {})
default_bounds = kwargs.get('default_bounds', self._default_bounds)
default_step_type = kwargs.get('default_step_type', self._default_step_type)
self.print_flag = kwargs.get('print_flag', self._default_print_flag)
self.check_flag = kwargs.get('check_flag', self._default_check_flag)
# build action elements
indices = {}
elements = {}
for j, n in enumerate(names):
elements[n] = _ActionElement(name = n,
values = X[:, j],
step_type = default_step_type,
bounds = custom_bounds.get(n, default_bounds))
indices[n] = j
self._names = [str(n) for n in names]
self._indices = indices
self._elements = elements
assert self._check_rep()
### built_ins ###
def __len__(self):
return len(self._names)
def __iter__(self):
return (self._elements[n] for n in self._names)
def _index_iterator(self):
return self._indices.items()
def _index_iterator_actionable(self):
return ((n, j) for n, j in self._indices.items() if self._elements[n].actionable)
def __getitem__(self, index):
if isinstance(index, str):
return self._elements[index]
elif isinstance(index, (int, np.int_)):
return self._elements[self._names[index]]
elif isinstance(index, list):
names = [self._names[j] if isinstance(j, (int, np.int_)) else j for j in index]
return _ActionSlice([self._elements[n] for n in names])
elif isinstance(index, np.ndarray):
names = np.array(self._names)[index].tolist()
return _ActionSlice([self._elements[n] for n in names])
elif isinstance(index, slice):
return _ActionSlice([self._elements[n] for n in self._names[index]])
else:
raise IndexError('index must be str, int, a list of strings/int or a slice')
def __setitem__(self, name, e):
assert isinstance(e, _ActionElement), 'ActionSet can only contain ActionElements'
assert name in self._names, 'no variable with name %s in ActionSet'
self._elements.update({name: e})
def __getattribute__(self, name):
if name[0] == '_' or name == 'aligned' or not hasattr(_ActionElement, name):
return object.__getattribute__(self, name)
else:
return [getattr(self._elements[n], name) for n, j in self._indices.items()]
def __setattr__(self, name, value):
if hasattr(self, '_elements'):
assert hasattr(_ActionElement, name)
attr_values = _expand_values(value, len(self))
for n, j in self._indices.items():
self._elements[n].__setattr__(name, attr_values[j])
else:
object.__setattr__(self, name, value)
### validation ###
@property
def check_flag(self):
return bool(self._check_flag)
@check_flag.setter
def check_flag(self, flag):
assert isinstance(flag, bool)
self._check_flag = bool(flag)
def _check_rep(self):
""":return: True if representation invariants are true."""
if self._check_flag:
elements = self._elements.values()
aligned = [e.aligned for e in elements]
assert all([isinstance(e, _ActionElement) for e in elements])
assert all(aligned) or (not any(aligned))
return True
### printing ###
@property
def print_flag(self):
return bool(self._print_flag)
@print_flag.setter
def print_flag(self, flag):
if flag is None:
self._print_flag = bool(ActionSet._default_print_flag)
elif isinstance(flag, bool):
self._print_flag = bool(flag)
else:
raise AttributeError('print_flag must be boolean or None')
def __str__(self):
return self.tabulate()
def __repr__(self):
if self._print_flag:
return self.tabulate()
def tabulate(self):
t = PrettyTable()
t.add_column("name", self.name, align = "r")
t.add_column("variable type", self.variable_type, align = "r")
t.add_column("mutable", self.mutable, align = "r")
t.add_column("actionable", self.actionable, align = "r")
t.add_column("step direction", self.step_direction, align = "r")
t.add_column("flip direction", self.flip_direction, align = "r")
t.add_column("grid size", self.size, align = "r")
t.add_column("step type", self.step_type, align = "r")
t.add_column("step size", self.step_size, align = "r")
t.add_column("lb", self.lb, align = "r")
t.add_column("ub", self.ub, align = "r")
return str(t)
@property
def df(self):
"""
:return: data frame containing key action set parameters
"""
df = pd.DataFrame({'name': self.name,
'variable_type': self.variable_type,
'lb': self.lb,
'ub': self.ub,
'grid_size': self.size,
'step_size': self.step_size,
'mutable': self.mutable,
'actionable': self.actionable,
'step_direction': self.step_direction,
'flip_direction': self.flip_direction})
return df
def to_latex(self):
"""
:return: formatted latex table summarizing the action set for publications
"""
tex_binary_str = '$\{0,1\}$'
tex_integer_str = '$\mathbb{Z}$'
tex_real_str = '$\mathbb{R}$'
df = self.df
df = df.drop(['actionable', 'flip_direction'], axis = 1)
new_types = [tex_real_str] * len(df)
new_ub = ['%1.1f' % v for v in df['ub'].values]
new_lb = ['%1.1f' % v for v in df['lb'].values]
for i, t in enumerate(df['variable_type']):
ub, lb = df['ub'][i], df['lb'][i]
if t == 'int':
new_ub[i] = '%d' % int(ub)
new_lb[i] = '%d' % int(lb)
new_types[i] = tex_binary_str if lb == 0 and ub == 1 else tex_integer_str
df['variable_type'] = new_types
df['ub'] = new_ub
df['lb'] = new_lb
df['actionable'] = df['mutable'].map({False: 'no', True: 'yes'})
up_idx = df['mutable'] & df['step_direction'] == 1
dn_idx = df['mutable'] & df['step_direction'] == -1
df.loc[up_idx, 'actionable'] = 'only increases'
df.loc[dn_idx, 'actionable'] = 'only decreases'
df = df.drop(['mutable', 'step_direction'], axis = 1)
df = df.rename(columns = {
'name': 'Name',
'grid_size': '\# Actions',
'variable_type': 'Type',
'actionable': 'Mutability',
'lb': 'LB',
'ub': 'UB',
})
table = df.to_latex(index = False, escape = False)
return table
#### alignment ####
def align(self, *args, **kwargs):
"""
adjusts direction of recourse for each element in action set
:param clf: scikit-learn classifier or vector of coefficients
:return:
"""
coefs, _ = parse_classifier_args(*args, **kwargs)
assert len(coefs) == len(self)
flips = np.sign(coefs)
for n, j in self._indices.items():
self._elements[n].flip_direction = flips[j]
@property
def aligned(self):
"""
:return: True if action set has been aligned with coefficients of linear classifier
"""
return all([e.aligned for e in self._elements.values()])
#### grid generation ####
def feasible_grid(self, x, return_actions = True, return_percentiles = True, return_immutable = False):
"""
returns feasible features when features are x
:param x: list or np.array containing vector of feature values (must have same length as ActionSet)
:param action_grid: set to True for returned grid to reflect changes to x
:param return_percentiles: set to True to include percentiles in return values
:param return_immutable: set to True to restrict return values to only actionable features
:return: dictionary of the form {name: feasible_values}
"""
assert isinstance(x, (list, np.ndarray)), 'feature values should be list or np.ndarray'
assert len(x) == len(self), 'dimension mismatch x should have len %d' % len(self)
assert np.all(np.isfinite(x)), 'feature values should be finite'
if return_immutable:
output = {n: self._elements[n].feasible_values(x[j], return_actions, return_percentiles) for n, j in self._indices.items()}
else:
output = {n: self._elements[n].feasible_values(x[j], return_actions, return_percentiles) for n, j in self._indices.items() if self._elements[n].actionable}
if return_percentiles:
return {n: v[0] for n, v in output.items()}, {n: v[1] for n, v in output.items()}
return output
### Helper Functions
def _determine_variable_type(values, name=None):
for v in values:
if isinstance(v, str):
raise ValueError(">=1 elements %s are of type str" % ("in '%s'" % name if name else ''))
integer_valued = np.equal(np.mod(values, 1), 0).all()
if integer_valued:
return int
else:
return float
def _expand_values(value, m):
if isinstance(value, np.ndarray):
if len(value) == m:
value_array = value
elif value.size == 1:
value_array = np.repeat(value, m)
else:
raise ValueError("length mismatch; need either 1 or %d values" % m)
elif isinstance(value, list):
if len(value) == m:
value_array = value
elif len(value) == 1:
value_array = [value] * m
else:
raise ValueError("length mismatch; need either 1 or %d values" % m)
elif isinstance(value, str):
value_array = [str(value)] * m
elif isinstance(value, bool):
value_array = [bool(value)] * m
elif isinstance(value, int):
value_array = [int(value)] * m
elif isinstance(value, float):
value_array = [float(value)] * m
else:
raise ValueError("unknown variable type %s")
return value_array
|
import requests as re
import colorama
from colorama import Fore, Back, Style
import json
colorama.init()
print(f"""{Fore.LIGHTGREEN_EX}
█ █░ ██▓███ ██████ ▄████▄ ▄▄▄ ███▄ █ ███▄ █ ▓█████ ██▀███
▓█░ █ ░█░▓██░ ██▒ ▒██ ▒ ▒██▀ ▀█ ▒████▄ ██ ▀█ █ ██ ▀█ █ ▓█ ▀ ▓██ ▒ ██▒
▒█░ █ ░█ ▓██░ ██▓▒ ░ ▓██▄ ▒▓█ ▄ ▒██ ▀█▄ ▓██ ▀█ ██▒▓██ ▀█ ██▒▒███ ▓██ ░▄█ ▒
░█░ █ ░█ ▒██▄█▓▒ ▒ ▒ ██▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒▓██▒ ▐▌██▒▒▓█ ▄ ▒██▀▀█▄
░░██▒██▓ ▒██▒ ░ ░ ▒██████▒▒▒ ▓███▀ ░ ▓█ ▓██▒▒██░ ▓██░▒██░ ▓██░░▒████▒░██▓ ▒██▒
░ ▓░▒ ▒ ▒▓▒░ ░ ░ ▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░ ▒ ▒ ░ ▒░ ▒ ▒ ░░ ▒░ ░░ ▒▓ ░▒▓░
▒ ░ ░ ░▒ ░ ░ ░▒ ░ ░ ░ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░░ ░░ ░ ▒░ ░ ░ ░ ░▒ ░ ▒░
░ ░ ░░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░
{Fore.LIGHTMAGENTA_EX}by @nb1b3k""")
print(f"{Fore.LIGHTCYAN_EX}Input a wordpress site below: ")
userInput = input("Target: ")
website = userInput.replace("https://", "")
website = website.replace("http://", "")
website = website.rstrip("/")
website = "https://" + website
# add scheme if not present
checks = ['xmlrpc.php', 'wp-cron.php', 'wp-config.php', 'wp-includes/', 'wp-content', 'wp-json', 'robots.txt', 'sitemap.xml', '.htaccess', '.gitignore', '.git', '.log', 'readme.html']
ua = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}
# GET RESPONSE
for path in checks:
#Assign URLS to check
global url
url = f"{website}/{path}"
#GET URL
try:
getData = re.get(url, headers=ua)
status_code = getData.status_code
responseSource = getData.text
# XML-RPC check
if "xmlrpc.php" in url and "XML-RPC server accepts POST requests only" in responseSource:
print(f"{Fore.GREEN} \n[+] XML-RPC is enabled :)\n[+] URL: {Fore.BLUE} {url}")
elif "xmlrpc.php" in url and "XML-RPC server accepts POST requests only" not in responseSource:
print(f"{Fore.RED} \n[!] XML-RPC is disabled :(")
#check for wp-config.php
elif "wp-config.php" in url and "" in responseSource:
print(f"{Fore.RED} \n[!] wp-config.php is not accessible :(")
elif "wp-config.php" in url and "" not in responseSource:
print(f"{Fore.GREEN} \n[+] wp-config.php is accessible :)\nURL: {Fore.BLUE} {url}")
#check for wp-cron.php
elif "wp-cron.php" in url and "" in responseSource:
print(f"{Fore.RED} \n[!] wp-cron.php is not accessible :(")
elif "wp-cron.php" in url and "" not in responseSource:
print(f"{Fore.GREEN} \n[+] wp-cron.php is accessible :)\nURL: {Fore.BLUE} {url}")
#check for wp-includes
elif "wp-includes" in url and "403 Forbidden" in responseSource:
print(f"{Fore.RED} \n[!] Directory listing is disabled in /wp-includes/ :(")
elif "wp-includes" in url and "Index of" in responseSource:
print(f"{Fore.GREEN} \n[+] Directory listing is enabled in /wp-includes/ :)\nURL: {Fore.BLUE} {url}")
#check for wp-content
elif "wp-content" in url and "403 Forbidden" in responseSource:
print(f"{Fore.RED} \n[!] Directory listing is disabled in /wp-content/ :(")
elif "wp-content" in url and "Index of" in responseSource:
print(f"{Fore.GREEN} \n[+] Directory listing is enabled in /wp-content/ :)\nURL: {Fore.BLUE} {url}")
#check for wp-json
elif "wp-json" in url and ("rest_login_required" in responseSource or "rest_cannot_access" in responseSource):
print(f"{Fore.RED} \n[!] wp-json is disabled :(")
elif "wp-json" in url and ("description" in responseSource or "endpoints" in responseSource):
print(f"{Fore.GREEN} \n[+] wp-json is enabled! :)\nURL: {Fore.BLUE} {url}")
print(f"{Fore.GREEN}\nTrying to enumerate users.....")
url2 = f'{url}/wp/v2/users'
print(f'URL: {Fore.BLUE} {url2}\n')
dataReceived = re.get(url2, headers=ua).json()
#loop 20 to get admin users
for nums in range(10):
try:
data = dataReceived[nums]
print(f'{Fore.GREEN}AdminUsername(s): {Fore.BLUE} {data["slug"]}')
except IndexError:
pass
#check for robots.txt file
elif "robots.txt" in url and "User-agent" not in responseSource:
print(f"{Fore.RED} \n[!] robots.txt file is not found on the target :(")
elif "robots.txt" in url and "User-agent" in responseSource:
print(f"{Fore.GREEN} \n[+] robots.txt file found! :)\nURL: {Fore.BLUE} {url}\n")
print(f"{Fore.GREEN} \n[+] Printing its content now...\n")
print(f"{Fore.BLUE}{responseSource}")
#check for sitemap.xml
elif "sitemap.xml" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] Sitemap not found :(")
elif "sitemap.xml" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] Sitemap found! :)\nURL: {Fore.BLUE} {url}")
elif "sitemap.xml" in url and "302" in str(status_code):
print(f"{Fore.GREEN} \n[+] Sitemap found! :)\nURL: {Fore.BLUE} {website}/wp-sitemap.xml")
#check for .htaccess
elif ".htaccess" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] .htaccess not found :(")
elif ".htaccess" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] .htaccess found! :)\nURL: {Fore.BLUE} {url}")
elif ".htaccess" in url and "403" in str(status_code):
print(f"{Fore.RED} \n[!] .htaccess found but is forbidden to access :(")
#check for .gitignore
elif ".gitignore" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] .gitignore not found :(")
elif ".gitignore" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] .gitignore found! :)\nURL: {Fore.BLUE} {url}")
elif ".gitignore" in url and "403" in str(status_code):
print(f"{Fore.RED} \n[!] .gitignore found but is forbidden to access :(")
#check for .git
elif ".git" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] .git not found :(")
elif ".git" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] .git found! :)\nURL: {Fore.BLUE} {url}")
elif ".git" in url and "403" in str(status_code):
print(f"{Fore.RED} \n[!] .git found but is forbidden to access :(")
#check for .log
elif ".log" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] .log not found :(")
elif ".log" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] .log found! :)\nURL: {Fore.BLUE} {url}")
elif ".log" in url and "403" in str(status_code):
print(f"{Fore.RED} \n[!] .log found but is forbidden to access :(")
#check for readme.html
elif "readme.html" in url and "404" in str(status_code):
print(f"{Fore.RED} \n[!] readme.html not found :(")
elif "readme.html" in url and "200" in str(status_code):
print(f"{Fore.GREEN} \n[+] readme.html found! :)\nURL: {Fore.BLUE} {url}")
elif "readme.html" in url and "403" in str(status_code):
print(f"{Fore.RED} \n[!] readme.html found but is forbidden to access :(")
except:
print(f"{Fore.RED}Input a valid URL. It should have a domainName and domain extension.")
|
#!/usr/bin/python
# coding: UTF-8, break: linux, indent: 4 spaces, lang: python/eng
'''
Convert common image formats to G-code.
Program is optimized for a lulzbut Mini with a 1W engraving laser and tested
with KiCad .svg and .png plots as input. Usage with anything else than that
may prove difficult.
See: {__url__}
ToDo:
- Think about changing engrving speed with pixel value as well
- Do python variant of 'inkscape --verb=FitCanvasToDrawing --verb=FileSave --verb=FileClose *.svg' before hand (maybe https://github.com/skagedal/svgclip)
- Cleapup code
- Fix bug in setup.py that causes img2ngrv.py to empty
Usage:
{__package__} --help | --version | --test
{__package__} [options] [-v...] INFILE [OUTFILE]
Options:
-v --verbose Specify (multiply) to increase output
messages (and plot a preview)
--test Test componentts of this program and exit
-i --invert Invert pixel value of input image
-m --mirror Flip input image left to right
-a --alternate-mode Fix rare issue with svg tranparency
-b --black-and-white Set every pixel non-zero pixel to maximum
intensity
-r --target-resolution=<float> Target resolution (dpi or diameter)
[default: {tdpi}dpi]
-c --clip=<int> Threshold pixel value to be interpreted
as engraver full on [default: {clp}]
-1 --on-command=<str> Command to turn the engraver on
[default: {lon}]
-0 --off-command=<str> Command to turn the engraver off
[default: {loff}]
-f --light-speed=<float> Speed for light engraving
[default: {lghtspd}]
-l --low-speed=<float> Speed for full engraving
[default: {lowspd}]
-m --move-speed=<float> Speed when moving without engraving
[default: {mvspd}]
-t --engraver-threshold=<int> Threshold driving value for the engraver
[default: {lint}]
-M --engraver-max=<int> Maximal driving value for the engraver
[default: {fint}]
-x --x-offset=<float> Offset from zero position in x-direction
[default: {xfst}mm]
-y --y-offset=<float> Offset from zero position in y-direction
[default: {yfst}mm]
-p --preamble=<filename> Textfile containing the to be preamble of
the output G-Code
-f --footer=<filename> Textfile containing the to be footer of
the output G-Code
'''
#=======================================================================
from __future__ import division, print_function, unicode_literals
from logging import info, debug, error, warning as warn
import sys, os, re, logging, time
from io import StringIO
import numpy as np
import pint
import matplotlib.pyplot as plt
from docopt import docopt
from PIL import Image
__version__ = '0.4.dev2'
__author__ = 'con-f-use'
__author_email__ = 'con-f-use@gmx.net'
__url__ = 'https://github.com/con-f-use/img2ngrv'
__package__ = os.path.splitext(os.path.basename( __file__ ))[0]
__vstring__ = 'v{} {}\nWritten by {}'.format( __package__, __version__,
__author__ )
#=======================================================================
loff = 'M107'
lon = 'M106'
fint = 255
lint = 90
lfon = lon +' S'+ str(fint)
lson = lon +' S'+ str(lint)
verb = 0
tdpi = 508
lghtspd = 500
lowspd = 70
mvspd = 2000
xfst = 20.0
yfst = 20.0
clp = 1
nvrt = False
bw = False
flplr = False
altm = False
tm = time.strftime('%c')
pre = ''';This Gcode has been generated specifically for the LulzBot Mini
;It assumes the engraver (laser) is controlled by fan1 output
;Creation Date: {tm}
;----------------------------------------------------------------------------
G26 ; clear potential 'probe fail' condition
G21 ; metric values
G90 ; absolute positioning
M82 ; set extruder to absolute mode
{loff} ; start with the fan off
M104 S0 ; hotend off
M140 S0 ; heated bed heater off (if you have it)
G92 E0 ; set extruder position to 0
G28 ; home all
G1 Z25 F{mvspd} ; CRITICAL: set Z
G28 X0 Y0 ; home x and y
M204 S300 ; Set probing acceleration
G29 ; Probe
M204 S2000 ; Restore standard acceleration
G1 X5 Y15 Z25 F5000 ; get out the way
G4 S1 ; pause
M400 ; clear buffer
{lson} ; Turn on laser just enough to see it
G4 S100 ; dwell to allow for laser focusing
; Bounding box for placement
G1 X{x0} Y{y0} ; Start (lower left corner)
{lfon}
G1 X{x1} Y{y0} F{lghtspd} ; Lower right
G1 X{x1} Y{y1} F{lghtspd} ; Upper right
G1 X{x0} Y{y1} F{lghtspd} ; Upper left
G1 X{x0} Y{y0} F{lghtspd} ; Lower left
{loff}
G4 S100 ; Dwell for positioning
{lson}
G1 X{x0} Y{y1} F{lghtspd} ; Warning movement
G1 X{x0} Y{y0} F{lghtspd} ; Warning movement
G4 S5 ; Dwell to let warning sink in
; Start Engraving
'''
post = '''\
; End Engraving
; Cleanup
{lon} S0 ; laser off
{loff} ; laser really off
M104 S0 ; hotend off
M140 S0 ; heated bed off
M84 ; steppers off
G90 ; absolute positioning
'''
ureg = pint.UnitRegistry()
ureg.define('dotsperinch = 1/25.4/mm = dpi')
def write_gcode( dat, lon=lon, loff=loff, lowspd=lowspd, mvspd=mvspd, fl=sys.stdout ):
r'''Traverses `dat` in zic-sac to create a gcode raster of it.
Example:
>>> dat = np.zeros((3,4),dtype='uint8')
>>> dat[:,1] = 40*np.arange(3); dat[:,2] = dat[:,1]+10
>>> buff = StringIO()
>>> write_gcode( dat, fl=buff )
>>> _ = buff.seek(0)
>>> ''.join(buff.readlines())
u'M107\nG1 X20.1 Y20.0 F2000\nM106 S96\nG1 X20.15 Y20.0 F70\n\nM107\nG1 X20.15 Y20.05 F2000\nM106 S122\nG1 X20.1 Y20.05 F70\nM106 S115\nG1 X20.05 Y20.05 F70\n\nM107\nG1 X20.05 Y20.1 F2000\nM106 S141\nG1 X20.1 Y20.1 F70\nM106 S148\nG1 X20.15 Y20.1 F70\n\n'
'''
rvsd = lst = force = 0
xrng = range(dat.shape[1])
for y in range(dat.shape[0]):
force = lst>0 # prevent line skipping
for x in reversed(xrng) if rvsd else xrng:
val = dat[y,x]
if val != lst or force:
if lst<1:
fl.write(
loff +"\n"+
'G1 X'+ trfx(x+rvsd) +' Y'+ trfy(y) +' F'+ str(mvspd) +"\n"
)
else:
fl.write(
lon +' S'+ trfv(lst) +"\n"+
'G1 X'+ trfx(x+rvsd) +' Y'+ trfy(y) +' F'+ str(lowspd) +"\n"
)
lst = val
force = False
rvsd = not rvsd
fl.write("\n")
# Coordinate transformations for `write_gcode(...)`.
def trfx( x ): return str( x*(1.0/tdpi*25.4) + xfst )
def trfy( y ): return str( y*(1.0/tdpi*25.4) + yfst )
def trfv( v ): return str( int( lint + v/255*(fint - lint) ) )
def crop(dat, clp=127, nvrt=False, flplr=False, flpud=False):
'''Crops zero-edges of an array and clips it to [0,1].
Example:
>>> crop( np.array(
... [[0,0,0,0,0,0,0,0],
... [0,1,0,2,9,0,0,0],
... [0,0,0,0,0,0,0,0],
... [0,7,4,1,0,0,0,0]]
... ), clp=0)
array([[1, 0, 2, 9],
[0, 0, 0, 0],
[7, 4, 1, 0]])
'''
if clp: dat[dat<=clp] = 0;
if bw: dat[dat>clp] = 255;
if nvrt: dat = 255-dat
true_points = np.argwhere(dat)
top_left = true_points.min(axis=0)
bottom_right = true_points.max(axis=0)
dat = dat[ top_left[0]:bottom_right[0]+1,
top_left[1]:bottom_right[1]+1 ]
if flplr: np.fliplr(dat)
if flpud: np.flipud(dat)
return dat
def prevw_ngrv(infl, dat):
'''Preview data'''
import tempfile
nm = tempfile.NamedTemporaryFile(suffix='.png', delete=False).name
mrrd = ' (mirrored)' if flplr else ''
a = plt.imshow( dat, interpolation='none', cmap='Greys_r' ) # Greys_r'
clrcode = ''
if a.cmap.name == 'Greys': clrcode = '\nblack=substract; white=leave'
if a.cmap.name == 'Greys_r': clrcode = '\nblack=leave; white=substract'
plt.title('Engraving mask from '+ infl + mrrd + clrcode)
plt.savefig(nm)
plt.show()
print( "Saved to '{}'".format(nm) )
def load_img( fn, tdpi, clp, dx, dy, w, h ):
'''Load raster image and prepare it for `write_gcode(...)`.'''
img = Image.open( fn )
bgr = Image.new( "RGBA", img.size, (0,0,0) )
bgr.paste( img, mask=img.split()[3] )
mode = 'P' if altm else 'L'
img = img.convert(mode) # greyscale
if not w: w = img.width
if not h: h = img.height
if not dx: dx = img.info.get('dpi', [tdpi])[0]
if not dy: dy = img.info.get('dpi', [None,tdpi])[1]
debug('RSTR - w: %s, h: %s, pngdpi: %s', w, h, img.info.get('dpi', [None,None]) )
sclx, scly = tdpi/dx, tdpi/dy
nw, nh = int(sclx*w), int(scly*h)
debug( 'RSTR - sclx: %s (%s px - %s dpi), scly: %s (%s px - %s dpi)',
sclx, w, dx, scly, h, dy )
img = img.resize( (nw,nh), Image.BICUBIC ) # NEAREST, BICUBIC, BILINEAR, LANCZOS
img.info['dpi'] = (tdpi, tdpi)
dat = np.asarray( img, dtype='uint8' )
dat.setflags(write=True)
#dt = np.copy(dat)
dat = crop(dat, clp=clp, nvrt=nvrt)
debug('RSTR - Shape [0]: %s(y), [1]: %s(x)', dat.shape[0], dat.shape[1])
return dat
def svg_get_phys_size( fn ):
'''Get pyhsical dimentions from the metadata of an svg image.
Example:
>>> svg = StringIO()
>>> _ = svg.write('<svg width="13.97cm" height="7.68in"></svg>')
>>> _ = svg.seek(0)
>>> svg_get_phys_size(svg)
(139.7, 195.072)
'''
import xml.etree.ElementTree
xml = xml.etree.ElementTree.parse(fn).getroot()
wd, ht = xml.attrib.get('width', None), xml.attrib.get('height', None)
wd, ht = ureg.parse_expression(wd), ureg.parse_expression(ht)
debug( 'XML - height: %s, width: %s', wd.to('mm'), ht.to('mm') )
return round(wd.to('mm').magnitude,4), round(ht.to('mm').magnitude,4)
def load_svg( fn, dpi, clp, w, h ):
'''Load svg image and prepare it for `write_gcode(...)`.
Example:
>>> load_svg('https://www.w3.org/Icons/SVG/svg-logo.svg', 0, 1, 1, 1)
array([[71]], dtype=uint8)
'''
import cairosvg
from io import BytesIO
if not dpi: dpi = 96
debug( 'SVG - %s, dpi: %s', fn, dpi )
buff = BytesIO()
cairosvg.svg2png(url=fn, write_to=buff, dpi=dpi) #url=fn #file_obj=fn
buff.seek(0)
return load_img(buff, dpi, clp, dpi, dpi, w, h)
def write_ngrv_file(infl, outfl):
'''Make a gcode raster for engraving from an input image.'''
try:
dat = load_svg(infl, tdpi, clp, None, None)
except:
try:
dat = load_img(infl, tdpi, clp, None, None, None, None)
except:
raise
# Preview if verbose
if verb <= logging.WARNING:
prevw_ngrv(infl, dat)
# Write File
fl = open( outfl, 'w' ) if outfl else sys.stdout
x0, y0 = trfx(0), trfy(0)
x1, y1 = trfx(dat.shape[1]), trfy(dat.shape[0])
allvars = dict(globals(), **locals())
fl.write( pre.format(**allvars) )
write_gcode( dat, lon, loff, lowspd, mvspd, fl )
fl.write( post.format( **globals() ) )
def run_tests():
'''Run tests for all functions in this code.'''
import doctest
class Py23DocChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
if sys.version_info[0] > 2:
want = re.sub("u'(.*?)'", "'\\1'", want)
want = re.sub('u"(.*?)"', '"\\1"', want)
return want == got
doctest.OutputChecker = Py23DocChecker
sys.exit( doctest.testmod(
m=sys.modules.get('img2ngrv'),
verbose=True
)[0] )
def main():
# Argument handling and all the boring bookkeeping stuff
args = docopt(__doc__.format(**globals()), version=__vstring__)
#options = {re.match('\{(.*)\}',o.value).group(1): re.sub('^--', '', o.name) for o in docopt.parse_defaults(dm) if o.value and re.match('\{.*\}',o.value)}
#args = {re.sub('^--', '', k): v for k, v in args.items()}
if args['--test']: run_tests()
verb = logging.ERROR - int(args['--verbose'])*10
logging.basicConfig(
level = verb,
format = '[%(levelname)-7.7s] (%(asctime)s '
'%(filename)s:%(lineno)s) %(message)s',
datefmt = '%y%m%d %H:%M' #, stream=, mode=, filename=
)
global lon, loff, nvrt, tdpi, xfst, yfst, lghtspd, lowspd, mvspd, lson, lfon, fint, lint, bw, flplr, altm, clp
lon = args['--on-command']
loff = args['--off-command']
nvrt = args['--invert']
bw = args['--black-and-white']
altm = args['--alternate-mode']
flplr = args['--mirror']
clp = int( args['--clip'] )
lint = int( args['--engraver-threshold'] )
fint = int( args['--engraver-max'] )
lghtspd = int( args['--light-speed'] )
lowspd = int( args['--low-speed'] )
mvspd = int( args['--move-speed'] )
tdpi = ureg.parse_expression(args['--target-resolution'])
xfst = ureg.parse_expression(args['--x-offset'])
yfst = ureg.parse_expression(args['--y-offset'])
if tdpi.dimensionality == '[length]': tdpi = 1.0/tdpi
tdpi = int( tdpi.to('dpi').magnitude )
xfst = float( xfst.to('mm').magnitude )
yfst = float( yfst.to('mm').magnitude )
lson = lon +' S'+ str(lint)
lfon = lon +' S'+ str(fint)
if args['--preamble']:
with open(args['--preamble']) as fh:
pre = ''.join(fh.readlines())
if args['--footer']:
with open(args['--footer'] ) as fh:
post = ''.join(fh.readlines())
write_ngrv_file( args['INFILE'], args['OUTFILE'] )
if __name__ == '__main__':
main()
|
"""Functions to retrieve data from the Telraam API."""
import requests
import datetime as dt
from typing import Dict, Optional
from . import log
from tqdm.auto import tqdm
import os
TELRAAM_API_URL = "https://telraam-api.net/v1"
ENVVAR_TELRAAM_API_TOKEN = os.environ.get("TELRAAM_API_TOKEN")
def _response_is_healthy(response: requests.Response) -> bool:
return response.status_code == 200
def check_response_health(response: requests.Response) -> None:
if not _response_is_healthy(response):
raise IOError(f"Query failed: {response.status_code} {response.reason}")
def query_active_segments(api_token: Optional[str] = ENVVAR_TELRAAM_API_TOKEN) -> Dict:
"""Returns information about all active segments.
Parameters
----------
api_token: str
Your personal Telraam API token.
Returns
-------
response_json : Dict
A dictionary containing the database's response.
"""
url = f"{TELRAAM_API_URL}/reports/traffic_snapshot"
headers = {'X-Api-Key': ENVVAR_TELRAAM_API_TOKEN if api_token is None else api_token}
payload = str({
"time": "live",
"contents": "minimal",
"area": "full"
})
log.debug(f"Querying all active segments from {url}.")
response = requests.post(url, headers=headers, data=payload)
check_response_health(response)
return response.json()
def query_active_segments_in_radius(
lon: float,
lat: float,
radius: float = 10,
api_token: Optional[str] = None
) -> Dict:
"""Returns information about all active segments in a circular region.
Parameters
----------
lon : float
Longitude in degrees.
lat : float
Latitude in degrees.
radius : float
Search radius in kilometer.
api_token: str
Your personal Telraam API token.
Returns
-------
response_json : Dict
A dictionary containing the database's response.
"""
url = f"{TELRAAM_API_URL}/reports/traffic_snapshot"
headers = {'X-Api-Key': ENVVAR_TELRAAM_API_TOKEN if api_token is None else api_token}
payload = str({
"time": "live",
"contents": "minimal",
"area": f"{lon},{lat},{radius}"
})
log.debug(f"Querying all active segments in a {radius}km radius at latitude {lat}° and longitude {lon}°"
f"from {url}.")
response = requests.post(url, headers=headers, data=payload)
check_response_health(response)
return response.json()
def query_one_segment(
segment_id: str,
start_date: dt.date,
end_date: dt.date,
api_token: Optional[str] = None
) -> Dict:
"""Returns traffic information for one segment.
Parameters
----------
segment_id : str
Unique segment identifier (e.g. "1003073114").
start_date : datetime.date
Start date of the desired data.
end_date : datetime.date
End date of the desired data.
api_token: str
Your personal Telraam API token.
Defaults to the environment variable TELRAAM_API_TOKEN.
Returns
-------
response_json : Dict
A dictionary containing the database's response.
"""
url = f"{TELRAAM_API_URL}/reports/traffic"
headers = {'X-Api-Key': ENVVAR_TELRAAM_API_TOKEN if api_token is None else api_token}
# Query can be 92 days long max
# Get time-intervals that shall be downloaded one by one
time_step = dt.timedelta(days=90)
dates = [start_date]
while dates[-1] < end_date:
dates.append(dates[-1] + time_step)
# Query all data intervals
responses = []
for i in tqdm(range(len(dates) - 1), desc=f"Downloading data from {start_date} to {end_date}"):
start_date = dates[i]
end_date = dates[i + 1]
payload = str({
"time_start": start_date.strftime("%Y-%m-%d 00:00:00Z"),
"time_end": end_date.strftime("%Y-%m-%d 23:59:59Z"),
"level": "segments",
"format": "per-hour",
"id": segment_id
})
log.debug(f"Querying {url} with data: {payload}")
response = requests.post(url, headers=headers, data=payload)
if _response_is_healthy(response):
responses.append(response)
else:
log.info(f"No data found for time interval {start_date} to {end_date}")
if len(responses) == 0:
log.info(f"Data could not be queried.")
return None
# Assemble data intervals
json = {'status_code': responses[0].json()['status_code'], 'message': responses[0].json()['message'], 'report': []}
for response in responses:
json['report'].extend(response.json()['report'])
return json
|
from MagniPy.paths import *
from MagniPy.Analysis.Statistics.singledensity import SingleDensity
import sys
from copy import deepcopy, copy
import numpy as np
from MagniPy.util import *
import pandas
def read_run_partition(fname):
with open(fname, 'r') as f:
lines = f.readlines()
Ncores = int(lines[1])
cores_per_lens = int(lines[4])
return Ncores, cores_per_lens, int(Ncores * cores_per_lens ** -1)
def read_R_index(fname,lens_index):
with open(fname,'r') as f:
lines = f.readlines()
[config,R_index] = lines[lens_index].split(' ')
return config,int(R_index)
def stack_chain(chain_name='', which_lens = None, parameters=None,fluxes_obs=None,
fluxes=None,header=str, counter_start = int):
fluxes, fluxes_obs = np.squeeze(fluxes), np.squeeze(fluxes_obs)
lens_idx = which_lens + counter_start
chain_file_path = chainpath_out + 'processed_chains/' + chain_name + '/lens' + str(lens_idx) + '/'
if ~os.path.exists(chain_file_path):
create_directory(chain_file_path)
np.savetxt(chain_file_path + 'modelfluxes' + '.txt', fluxes, fmt='%.6f')
np.savetxt(chain_file_path + 'observedfluxes' + '.txt',fluxes_obs, fmt='%.6f')
np.savetxt(chain_file_path + 'samples.txt',parameters,fmt='%.5f',header=header)
def add_flux_perturbations(name, which_lens, parameters, fluxes_obs, fluxes, errors = None, N_pert = 1,
keep_n=15000):
if errors is None:
errors = []
if isinstance(errors,int) or isinstance(errors,float):
errors = [errors]
for error in errors:
for k in range(1, N_pert + 1):
perturbed_path = chainpath_out + 'chain_stats/' + name + '/lens' + str(which_lens) + '/'
if not os.path.exists(chainpath_out + 'chain_stats/' + name):
create_directory(chainpath_out + 'chain_stats/' + name)
if not os.path.exists(perturbed_path):
create_directory(chainpath_out + 'chain_stats/' + name + '/lens' + str(which_lens) + '/')
if error == 0:
perturbed_ratios_obs = fluxes_obs[1:]
perturbed_ratios = fluxes[:,1:]
else:
flux_perturbations_obs = np.random.normal(0, float(error)*fluxes_obs)
flux_perturbations = np.random.normal(0, float(error)*fluxes)
perturbed_fluxes_obs = fluxes_obs + flux_perturbations_obs
perturbed_fluxes = fluxes + flux_perturbations
perturbed_ratios_obs = perturbed_fluxes_obs[1:]*perturbed_fluxes_obs[0]**-1
norm = deepcopy(perturbed_fluxes[:,0])
for col in range(0,4):
perturbed_fluxes[:,col] *= norm ** -1
perturbed_ratios = perturbed_fluxes[:,1:]
#perturbed_ratios = perturbed_fluxes
diff = np.array((perturbed_ratios - perturbed_ratios_obs)**2)
summary_statistic = np.sqrt(np.sum(diff, 1))
#print('warning: not sorting summary statistics')
#ordered_inds = np.arange(0,keep_n)
ordered_inds = np.argsort(summary_statistic)[0:keep_n]
print('lens # ', which_lens)
print('N < 0.01: ', np.sum(summary_statistic < 0.01))
print('N < 0.02: ', np.sum(summary_statistic < 0.02))
print('N < 0.03: ', np.sum(summary_statistic < 0.03))
np.savetxt(perturbed_path + 'statistic_' + str(int(error * 100)) + 'error_' + str(k) + '.txt',
X=summary_statistic[ordered_inds], fmt=('%.4f'))
if parameters.shape[1] == 5:
np.savetxt(perturbed_path + 'params_'+str(int(error * 100)) + 'error_' + str(k) + '.txt',X=parameters[ordered_inds,:],
fmt=('%.3f', '%.4f', '%.4f', '%.4f', '%.3f'))
elif parameters.shape[1] == 3:
np.savetxt(perturbed_path + 'params_' + str(int(error * 100)) + 'error_' + str(k) + '.txt',
X=parameters[ordered_inds, :],
fmt=('%.5f', '%.5f', '%.5f'))
elif parameters.shape[1] == 2:
np.savetxt(perturbed_path + 'params_' + str(int(error * 100)) + 'error_' + str(k) + '.txt',
X=parameters[ordered_inds, :],
fmt=('%.5f', '%.5f'))
if error == 0:
break
def extract_chain_fromprocessed(chain_name = '', which_lens = None):
route = chainpath_out + 'processed_chains/' + chain_name + '/lens'+str(which_lens)+'/'
#lens_config, _ = read_R_index(chainpath_out + chain_name + '/R_index_config.txt', 0)
#fluxes = np.loadtxt(route+'modelfluxes.txt')
fluxes = np.squeeze(pandas.read_csv(route+'modelfluxes.txt',header=None,
sep=" ", index_col=None)).values
with open(route + '/samples.txt', 'r') as f:
lines = f.read().splitlines()
head = lines[0].split(' ')
params_header = ''
for word in head:
if word not in ['#', '']:
params_header += word + ' '
parameters = np.loadtxt(route + '/samples.txt')
assert np.shape(parameters)[0] == np.shape(fluxes)[0]
observed_fluxes = np.squeeze(np.loadtxt(route + 'observedfluxes.txt'))
return fluxes, observed_fluxes, parameters, params_header
def extract_chain(chain_name='',which_lens = None):
chain_info_path = chainpath_out + 'raw_chains_sidm/' + chain_name + '/simulation_info.txt'
Ncores, cores_per_lens, Nlens = read_run_partition(chain_info_path)
#lens_config, lens_R_index = read_R_index(chainpath_out+chain_name+'/R_index_config.txt',0)
chain_file_path = chainpath_out + 'raw_chains_sidm/' + chain_name +'/chain'
params_header = None
order = None
if ~os.path.exists(chainpath_out+'processed_chains/' + chain_name + '/'):
create_directory(chainpath_out+'processed_chains/' + chain_name + '/')
copy_directory(chain_info_path,chainpath_out+'processed_chains/' + chain_name + '/')
if ~os.path.exists(chainpath_out + 'processed_chains/' + chain_name + '/lens' + str(which_lens) + '/'):
create_directory(chainpath_out + 'processed_chains/' + chain_name + '/lens' + str(which_lens) + '/')
start = int((which_lens-1)*cores_per_lens)
end = int(start + cores_per_lens)
print(start, end)
init = True
#for i in range(start,end):
for i in range(start+1, end+1):
folder_name = chain_file_path + str(i)+'/'
#print(folder_name)
try:
fluxes = np.loadtxt(folder_name + '/fluxes.txt')
obs_data = read_data(folder_name + '/lensdata.txt')
observed_fluxes = obs_data[0].m
params = np.loadtxt(folder_name + '/parameters.txt', skiprows=1)
assert fluxes.shape[0] == params.shape[0]
except:
print('didnt find a file... '+str(chain_file_path + str(i)+'/'))
continue
if params_header is None:
with open(folder_name + '/parameters.txt', 'r') as f:
lines = f.read().splitlines()
head = lines[0].split(' ')
params_header = ''
for word in head:
if word not in ['#', '']:
params_header += word + ' '
if init:
lens_fluxes = fluxes
lens_params = params
init = False
else:
lens_fluxes = np.vstack((lens_fluxes,fluxes))
lens_params = np.vstack((lens_params,params))
observed_fluxes = observed_fluxes[order]
return lens_fluxes[:,order],observed_fluxes.reshape(1,4),lens_params,params_header
def resample(name, which_lens, parameter_vals_new, SIE_gamma_mean = 2.08,
SIE_gamma_sigma = 0.05):
fluxes, observedfluxes, parameters, header = extract_chain_fromprocessed(name, which_lens)
params_new = copy(parameter_vals_new)
parameter_names = list(filter(None, header.split(' ')))
newparams = np.ones_like(parameters)
sigmas = np.ones_like(parameters)
for i, pname in enumerate(parameter_names):
newparams[:,i] = params_new[pname][0]
sigmas[:,i] = params_new[pname][1]
delta = np.sum(np.absolute(newparams - parameters) * sigmas ** -1, axis=1)
index = np.argmin(delta)
fluxes_obs = fluxes[index, :]
print(parameters[index])
return fluxes, fluxes_obs, parameters, header, parameters[index][1]
def read_chain_info(fname):
with open(fname,'r') as f:
lines = f.read().splitlines()
params_varied = []
varyparams_info = {}
nextline = False
for line in lines:
if line == '# params_varied':
nextline = True
continue
if nextline:
if len(line)==0:
nextline=False
break
params_varied.append(line)
for pname in params_varied:
args = {}
for line in lines:
if line == pname+':':
nextline=True
continue
if nextline:
if len(line)==0:
nextline=False
break
args[line.split(' ')[0]] = line.split(' ')[1]
varyparams_info[pname] = args
truth_dic = {}
for line in lines:
if line == '# truths':
nextline = True
continue
if nextline:
if len(line)==0:
nextline=False
break
truth_dic[line.split(' ')[0]] = float(line.split(' ')[1])
return params_varied,truth_dic,varyparams_info
#new_chains_withsigma('WDM_run_7.7_tier2',[1,2],'WDM_run_7.7_sigma')
"""
if False:
for i in range(1, 11):
process_chain_i('WDM_run_7.7_tier2', which_lens=i, errors=[0])
if False:
fsub = 0.007
logmhm = 4.8
src_size = 0.035
LOS_norm = 1
new_name = 'CDM_run_sigma23_src35'
which_lens_indexes = np.arange(1, 11)
resample_chain('WDM_run_7.7_tier2', new_name, which_lens_indexes=which_lens_indexes, errors=[0],
parameters_new={'fsub': [fsub, 0.002], 'log_m_break': [logmhm, 0.1],
'source_size_kpc': [src_size, 0.005],
'LOS_normalization': [LOS_norm, 0.05]},
SIE_gamma_mean=2.08, SIE_gamma_sigma=0.04, transform_fsub = True)
fsub = 0.007
logmhm = 7.5
src_size = 0.035
LOS_norm = 1
new_name = 'WDM_run_sigma23_7.5_src35'
which_lens_indexes = np.arange(1, 11)
resample_chain('WDM_run_7.7_tier2', new_name, which_lens_indexes=which_lens_indexes, errors= [0],
parameters_new={'fsub': [fsub, 0.001], 'log_m_break': [logmhm, 0.1],
'source_size_kpc': [src_size, 0.006],
'LOS_normalization': [LOS_norm, 0.05]},
SIE_gamma_mean = 2.08, SIE_gamma_sigma = 0.04)
"""
|
import os
from datetime import datetime
import numpy as np
import luigi
import nifty.tools as nt
# we don't need to bother with the file reader
# wrapper here, because paintera needs n5 files anyway.
import z5py
from ..cluster_tasks import WorkflowBase
from ..label_multisets import LabelMultisetWorkflow
from ..downscaling import DownscalingWorkflow
from . import unique_block_labels as unique_tasks
from . import label_block_mapping as labels_to_block_tasks
class WritePainteraMetadata(luigi.Task):
tmp_folder = luigi.Parameter()
# path and keys
path = luigi.Parameter()
raw_key = luigi.Parameter()
label_group = luigi.Parameter()
# resolutions and n-scales
raw_resolution = luigi.ListParameter()
label_resolution = luigi.ListParameter()
n_scales = luigi.IntParameter()
offset = luigi.ListParameter()
max_id = luigi.IntParameter()
dependency = luigi.TaskParameter()
def _write_log(self, msg):
log_file = self.output().path
with open(log_file, 'a') as f:
f.write('%s: %s\n' % (str(datetime.now()), msg))
def requires(self):
return self.dependency
def _write_downsampling_factors(self, data_group, group):
for scale in range(1, self.n_scales):
scale_key = 's%i' % scale
factor = data_group[scale_key].attrs['downsamplingFactors']
group[scale_key].attrs['downsamplingFactors'] = factor
def run(self):
# compute the correct resolutions for raw data and labels
label_resolution = self.label_resolution
raw_resolution = self.raw_resolution
with z5py.File(self.path) as f:
# write metadata for the top-level label group
g = f[self.label_group]
g.attrs['painteraData'] = {'type': 'label'}
g.attrs['maxId'] = self.max_id
# add the metadata referencing the label to block lookup
g.attrs["labelBlockLookup"] = {"type": "n5-filesystem-relative",
"scaleDatasetPattern": "label-to-block-mapping/s%d"}
# write metadata for the label-data group
data_group = g['data']
data_group.attrs['maxId'] = self.max_id
# we revese resolution and offset because java n5 uses axis
# convention XYZ and we use ZYX
data_group.attrs['offset'] = self.offset[::-1]
data_group.attrs['resolution'] = label_resolution[::-1]
# note: we have the downsampling factors for the data group already
# add metadata for unique labels group
unique_group = g['unique-labels']
unique_group.attrs['multiScale'] = True
self._write_downsampling_factors(data_group, unique_group)
# add metadata for label to block mapping
mapping_group = g['label-to-block-mapping']
mapping_group.attrs['multiScale'] = True
self._write_downsampling_factors(data_group, mapping_group)
# add metadata for the raw data
raw_group = f[self.raw_key]
raw_group.attrs['resolution'] = raw_resolution[::-1]
self._write_log('write metadata successfull')
def output(self):
return luigi.LocalTarget(os.path.join(self.tmp_folder,
'write_paintera_metadata.log'))
class ConversionWorkflow(WorkflowBase):
path = luigi.Parameter()
raw_key = luigi.Parameter()
label_in_key = luigi.Parameter()
label_out_key = luigi.Parameter()
label_scale = luigi.IntParameter()
assignment_path = luigi.Parameter(default='')
assignment_key = luigi.Parameter(default='')
use_label_multiset = luigi.BoolParameter(default=False)
copy_labels = luigi.BoolParameter(default=False)
offset = luigi.ListParameter(default=[0, 0, 0])
resolution = luigi.ListParameter(default=[1, 1, 1])
restrict_sets = luigi.ListParameter(default=[])
restrict_scales = luigi.IntParameter(default=None)
##############################################################
# Step 1 Implementations: align scales and make label datasets
##############################################################
def _downsample_labels(self, scale_factors, dep):
task = DownscalingWorkflow
out_key_prefix = os.path.join(self.label_out_key, 'data')
halos = len(scale_factors) * [[0, 0, 0]]
dep = task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, target=self.target,
input_path=self.path, input_key=self.label_in_key,
output_path=self.path, output_key_prefix=out_key_prefix,
scale_factors=scale_factors, halos=halos,
metadata_format='paintera', force_copy=self.copy_labels,
dependency=dep)
return dep
def _make_label_multisets(self, scale_factors, dep):
task = LabelMultisetWorkflow
restrict_sets = self.restrict_sets
assert len(restrict_sets) == len(scale_factors),\
"Need restrict_sets for label-multisets: %i, %i" % (len(restrict_sets), len(scale_factors))
out_key_prefix = os.path.join(self.label_out_key, 'data')
dep = task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, target=self.target,
input_path=self.path, input_key=self.label_in_key,
output_path=self.path, output_prefix=out_key_prefix,
scale_factors=scale_factors, restrict_sets=restrict_sets,
dependency=dep)
return dep
def _align_scales(self, dep):
g_raw = z5py.File(self.path)[self.raw_key]
ds_labels = z5py.File(self.path)[self.label_in_key]
# make sure that the shape of the raw data and the
# labels agree at the specified scale level
label_scale_prefix = 's%i' % self.label_scale
assert label_scale_prefix in g_raw, "Cannot find label scale in raw data"
shape_raw = g_raw[label_scale_prefix].shape
shape_labels = ds_labels.shape
assert shape_raw == shape_labels, "%s, %s" % (str(shape_raw), str(shape_labels))
# get and sort the raw scales
raw_scales = list(g_raw.keys())
raw_scales = np.array([int(rscale[1:]) for rscale in raw_scales])
raw_scales = np.sort(raw_scales)
# compute the scale factors from the raw datasets
scale_factors = [[1, 1, 1]]
effective_scale_factors = [[1, 1, 1]]
for scale in raw_scales[1:]:
# we need to reverse the scale factors because paintera has axis order
# XYZ and we have axis order ZYX
effective_scale_factor = g_raw['s%i' % scale].attrs['downsamplingFactors'][::-1]
# find the relative scale factor
scale_factor = [int(sf_out // sf_in) for sf_out, sf_in
in zip(effective_scale_factor, effective_scale_factors[-1])]
effective_scale_factors.append(effective_scale_factor)
scale_factors.append(scale_factor)
# compute the label resolution
label_scale_factor = effective_scale_factors[self.label_scale]
label_resolution = [res * eff for res, eff in zip(self.resolution, label_scale_factor)]
# restrict to the scale factors we need for down-sampling the labels
scale_factors = scale_factors[(self.label_scale + 1):]
if self.restrict_scales is not None:
scale_factors = scale_factors[:self.restrict_scales]
# create downsampled labels in label-multiset format
# or by default downsampling
if self.use_label_multiset:
dep = self._make_label_multisets(scale_factors, dep)
else:
dep = self._downsample_labels(scale_factors, dep)
# prepend scale factor for scale 0
scale_factors = [[1, 1, 1]] + scale_factors
return dep, scale_factors, label_resolution
############################################
# Step 2 Implementations: make block uniques
############################################
def _uniques_in_blocks(self, dep, scale_factors):
task = getattr(unique_tasks, self._get_task_name('UniqueBlockLabels'))
effective_scale = [1, 1, 1]
for scale, factor in enumerate(scale_factors):
in_key = os.path.join(self.label_out_key, 'data', 's%i' % scale)
out_key = os.path.join(self.label_out_key, 'unique-labels', 's%i' % scale)
effective_scale = [eff * sf for eff, sf in zip(effective_scale, factor)]
dep = task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path, output_path=self.path,
input_key=in_key, output_key=out_key,
effective_scale_factor=effective_scale,
dependency=dep, prefix='s%i' % scale)
return dep
##############################################
# Step 3 Implementations: invert block uniques
##############################################
def _label_block_mapping(self, dep, scale_factors):
task = getattr(labels_to_block_tasks,
self._get_task_name('LabelBlockMapping'))
# get the framgent max id
with z5py.File(self.path) as f:
max_id = f[self.label_in_key].attrs['maxId']
# compte the label to block mapping for all scales
effective_scale = [1, 1, 1]
for scale, factor in enumerate(scale_factors):
in_key = os.path.join(self.label_out_key, 'unique-labels', 's%i' % scale)
out_key = os.path.join(self.label_out_key, 'label-to-block-mapping', 's%i' % scale)
effective_scale = [eff * sf for eff, sf in zip(effective_scale, factor)]
dep = task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path, output_path=self.path,
input_key=in_key, output_key=out_key,
number_of_labels=max_id + 1, dependency=dep,
effective_scale_factor=effective_scale,
prefix='s%i' % scale)
return dep
#####################################################
# Step 4 Implementations: fragment segment assignment
#####################################################
def _fragment_segment_assignment(self, dep):
if self.assignment_path == '':
# get the framgent max id
with z5py.File(self.path) as f:
max_id = f[self.label_in_key].attrs['maxId']
return dep, max_id
else:
assert self.assignment_key != ''
assert os.path.exists(self.assignment_path), self.assignment_path
# TODO should make this a task
with z5py.File(self.assignment_path) as f, z5py.File(self.path) as f_out:
assignments = f[self.assignment_key][:]
n_fragments = len(assignments)
# find the fragments which have non-trivial assignment
segment_ids, counts = np.unique(assignments,
return_counts=True)
seg_ids_to_counts = {seg_id: count
for seg_id, count in zip(segment_ids, counts)}
fragment_ids_to_counts = nt.takeDict(seg_ids_to_counts, assignments)
fragment_ids = np.arange(n_fragments, dtype='uint64')
non_triv_fragments = fragment_ids[fragment_ids_to_counts > 1]
non_triv_segments = assignments[non_triv_fragments]
non_triv_segments += n_fragments
# determine the overall max id
max_id = int(non_triv_segments.max())
# TODO do we need to assign a special value to ignore label (0) ?
frag_to_seg = np.vstack((non_triv_fragments, non_triv_segments))
# fragment_ids = np.arange(n_fragments, dtype='uint64')
# assignments += n_fragments
# frag_to_seg = np.vstack((fragment_ids, assignments))
# max_id = int(frag_to_seg.max())
out_key = os.path.join(self.label_out_key, 'fragment-segment-assignment')
chunks = (1, frag_to_seg.shape[1])
f_out.require_dataset(out_key, data=frag_to_seg, shape=frag_to_seg.shape,
compression='gzip', chunks=chunks)
return dep, max_id
def requires(self):
# align the scales of labels and raw data and make label datasets
dep, scale_factors, label_resolution = self._align_scales(self.dependency)
# # next, compute the mapping of unique labels to blocks
dep = self._uniques_in_blocks(dep, scale_factors)
# # next, compute the inverse mapping
dep = self._label_block_mapping(dep, scale_factors)
# # next, compute the fragment-segment-assignment
dep, max_id = self._fragment_segment_assignment(dep)
# # finally, write metadata
dep = WritePainteraMetadata(tmp_folder=self.tmp_folder, path=self.path,
raw_key=self.raw_key, label_group=self.label_out_key,
raw_resolution=self.resolution, label_resolution=label_resolution,
n_scales=len(scale_factors), offset=self.offset, max_id=max_id,
dependency=dep)
return dep
@staticmethod
def get_config():
configs = super(ConversionWorkflow, ConversionWorkflow).get_config()
configs.update({'unique_block_labels': unique_tasks.UniqueBlockLabelsLocal.default_task_config(),
'label_block_mapping': labels_to_block_tasks.LabelBlockMappingLocal.default_task_config(),
**DownscalingWorkflow.get_config(), **LabelMultisetWorkflow.get_config()})
return configs
|
from django.apps import AppConfig
class BalancesConfig(AppConfig):
name = 'balances'
|
raw_data = np.random.normal(10, 1, 100)
with tf.Session() as sess:
for i in range(len(raw_data)):
curr_avg = sess.run(update_avg , feed_dict = {curr_value:raw_data[i]})
sess.run(tf.assign(prev_avg, curr_avg)) |
""" Functions to create the circuit simulating the Pauli channel """
from qiskit import QuantumCircuit
import numpy as np
def pauli_channel_tanh(q, t, system, pauli_ancillae, eta=1, omega=1):
"""Construct the Pauli channel with rates
\\gamma_1(t) = \\eta/2
\\gamma_2(t) = \\eta/2
\\gamma_3(t) = -\\omega \\tanh (\\omega t) / 2
Args:
q: quantum register
t (real): time
system (int): the index of the system qubit
pauli_ancillae (list): the indices of the two ancillae for the Pauli ch.
eta (real): parameter
omega (real): parameter such that omega < eta
Returns:
a QuantumCircuit
"""
if np.isclose(t, 0):
return QuantumCircuit(q)
# We promote eta and omega to complex or the expression below won't work
eta = complex(eta)
omega = complex(omega)
p = [1/4 * (1 - np.exp(-2 * t *eta)),
1/4 * (1 - np.exp(-2 * t *eta)),
1/4 * (1 + np.exp(-2 * t * eta) - 2 *np.exp(-t *eta) * np.cosh(t *omega))]
return pauli_channel(q, system, pauli_ancillae, p)
def pauli_channel_tan(q, t, system, pauli_ancillae, eta=1, omega=1):
"""Construct the Pauli channel with rates
\gamma_1(t) = \eta/2
\gamma_2(t) = \eta/2
\gamma_3(t) = -\omega \tanh (\omega t) / 2
Args:
t (real): time
eta: parameter
omega: parameter such that omega < eta
Returns:
a QuantumCircuit
"""
# We promote eta and omega to complex
eta = complex(eta)
omega = complex(omega)
if np.isclose(t, 0): # Just return an emtpy circuit
return QuantumCircuit(q)
p = np.array([1/4 * (1 - np.exp(-2 * t *eta)),
1/4 * (1 - np.exp(-2 * t *eta)),
1/4 * (1 + np.exp(-2 * t * eta) - 2 *np.exp(-t *eta) * np.cos(t *omega))], dtype=complex)
return pauli_channel(q, system, pauli_ancillae, p)
def pauli_channel(q, system, pauli_ancillae, p):
"""
Apply the Pauli channel to system with probabilities p
(see Eq. (18) of the paper)
"""
# Make sure p is an array of complex numbers or the next formula will not work
p = np.array(p, dtype=complex)
# A solution to Eq. (18)
c = [np.sqrt(1 - np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2]))))))/np.sqrt(2),
np.sqrt(8*p[0]**3 - 4*p[0]**2*(-1 - 6*p[2] + np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))))) +
(1 - 2*p[2])**2*(-1 + 2*p[2] + np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))))) -
2*p[0]*(1 + 4*(p[2] - 3*p[2]**2 - p[2]*np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2]))))) +
np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))*np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2]))))))))/
(np.sqrt(2)*np.sqrt((-1 + 2*p[0] + 2*p[2])*(4*p[0]**2 + (1 - 2*p[2])**2 + p[0]*(4 + 8*p[2])))),
np.sqrt((8*p[0]**3 - 4*p[0]**2*(-1 - 6*p[2] + np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))))) +
(1 - 2*p[2])**2*(-1 + 2*p[2] + np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))))) -
2*p[0]*(1 + 4*(p[2] - 3*p[2]**2 - p[2]*np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2]))))) +
np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2])))*np.sqrt(-4*p[0]**2 + (1 - 2*p[2])**2 + 8*p[0]*(p[2] + np.sqrt(-(p[2]*(-1 + 2*p[0] + p[2]))))))))/
(4*p[0]**2 + (1 - 2*p[2])**2 + p[0]*(4 + 8*p[2])))/np.sqrt(-2 + 4*p[0] + 4*p[2])]
theta = 2*np.arccos(np.real(c))
dc = QuantumCircuit(q)
dc.ry(theta[0], q[pauli_ancillae[0]])
dc.cx(q[pauli_ancillae[0]], q[pauli_ancillae[1]])
dc.ry(theta[1], q[pauli_ancillae[0]])
dc.ry(theta[2], q[pauli_ancillae[1]])
dc.cx(q[pauli_ancillae[0]], q[system])
dc.cy(q[pauli_ancillae[1]], q[system])
return dc |
import pygame, sys
from pygame.locals import *
pygame.init()
class Brick:
def __init__(self, x, y, color):
self.image = pygame.Surface([40, 13])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
|
"""Item purchase models."""
# Python
from decimal import Decimal
# Django
from django.db import models
from django.db.models import Sum, F, FloatField, Max
from django.db.models.signals import post_save, pre_save
from django.db import transaction
from django.dispatch import receiver
from django.utils import timezone
# Models
from apps.utils.models import BaseModelWithoutStatus
from apps.inventories.models import Product
from apps.purchases.models import Purchase
TAX_CHOICES = [
("0 %", 0.0),
("21 %", 0.21),
("10.5 %", 0.105),
]
class ItemPurchase(BaseModelWithoutStatus):
"""Item purchase class."""
price = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, verbose_name="Precio")
quantity = models.FloatField(default=1, verbose_name='Cantidad')
discount = models.DecimalField(default=0.0, blank=True, max_digits=8, decimal_places=2, verbose_name="Descuento")
subtotal = models.DecimalField(default=0.0, blank=True, max_digits=10, decimal_places=2)
total = models.DecimalField(default=0.0, blank=True, max_digits=10, decimal_places=2)
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL, verbose_name="Producto")
purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE, verbose_name="Compra")
class Meta:
ordering = ['id',]
verbose_name = 'item compra'
verbose_name_plural = 'item compras'
def calculate_subtotal(self):
self.subtotal = self.price * Decimal.from_float(self.quantity)
def calculate_total(self):
self.calculate_subtotal()
self.total = self.subtotal - self.discount
def save(self, *args, **kwargs):
super(ItemPurchase, self).save(*args, **kwargs)
self.calculate_total()
super(ItemPurchase, self).save(update_fields=['total'])
@transaction.atomic
def delete(self, *args, **kwargs):
purchase = self.purchase
product = self.product
quantity = self.quantity
super(ItemPurchase, self).delete(*args, **kwargs)
purchase.calculate_total()
stock = product.stock
stock -= quantity
product.stock = stock
product.save(update_fields=['stock',])
def __str__(self):
return str(self.total)
@receiver(post_save, sender=ItemPurchase)
def update_total_sales_at_item(sender, instance, **kwargs):
instance.purchase.calculate_total()
@receiver(pre_save, sender=ItemPurchase)
def update_stock_in_article(sender, instance, **kwargs):
try:
old_instance = ItemPurchase.objects.get(id=instance.id)
except ItemPurchase.DoesNotExist:
old_instance = None
if not old_instance:
return
old_stock = old_instance.quantity
if old_instance.product.stock:
old_instance.product.stock += old_stock
old_instance.product.save(update_fields=['stock',])
|
# -*- coding: utf-8 -*-
"""
gagepy.utils
~~~~~~~~~~~~
Utility helper functions for gagepy.
:authors: 2016 by Jeremiah Lant, see AUTHORS
:license: United States Geological Survey (USGS), see LICENSE file
"""
import os
import numpy as np
import datetime
import re
def get_file_paths(dirname, file_ext):
"""Return a list of absolute file paths for certain files files in a directory. Walks through
subdirectories.
:param dirname: Name of directory to start walking
:type dirname: string
:param file_ext: File extension to look for
:file_ext type: string
:returns: List of absolute file paths
:rtype: list
"""
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
if file_ext and filepath.endswith(file_ext):
file_paths.append(filepath)
return file_paths
def get_file_info(filepath):
"""Return a file's directory and name for a file path.
:param filepath: Path to file
:type filepath: string
:returns: File directory and file name
:type: tuple
"""
filedir, filename = os.path.split(filepath)
# filedir is an empty string when file is in current directory
if not filedir:
filedir = os.getcwd()
return filedir, filename
def rmchars(value):
"""Remove special characters from alphanumeric values except for period (.)
and negative (-) characters.
:param value: Alphanumeric value
:type value: string
:returns: Alphanumeric value stripped of any special characters
:rtype: string
>>> import utils
>>> utils.rmchars(value = "*6.5_")
'6.5'
>>> utils.rmchars(value = "ICE")
'ICE'
>>> utils.rmchars(value = "-4.2")
'-4.2'
>>> utils.rmchars(value = "%&!@#8.32&#*;")
'8.32'
"""
value = re.sub("[^A-Za-z0-9.-]+", "", value)
return value
def is_float(value):
"""Return True if a string value can be converted to a float and False otherwise.
:param value: Value to check
:rtype: bool
>>> import utils
>>> utils.is_float(value = "2.5")
True
>>> utils.is_float(value = "hello world")
False
>>> utils.is_float(value = "5.5_")
False
"""
try:
float(value)
return True
except ValueError:
return False
def to_float(value):
"""Convert a value to a float type.
:param value: Value to convert to float
:returns: Value as a float
:rtype: float
"""
value = rmchars(value)
if is_float(value):
return float(value)
else:
raise ValueError("Can not convert {} value to a float".format(value))
def to_nan(value, msg=None):
"""Convert a value to a numpy nan and print a message if available.
:param value: Value to convert to nan
:type value:
:param msg: Optional message to print to screen
:returns: Numpy nan value
:rtype: float
"""
if msg:
print(msg)
return np.nan
def subset_data(dates, values, start_date, end_date):
"""Return a subset of date and value arrays to match the range of dates
between a given start_date and end_date. If start_date and end_date are not
within the range of dates specified in dates, then the start_date and
end_date are set to the first and last dates in the dates array.
:param dates: Array of dates as datetime objects
:type dates: numpy.ndarray
:param values: Array of numeric values
:type values: numpy.ndarray
:param start_date: A datetime object
:type start_date: datetime.datetime
:param end_date: A datetime object
:type end_date: datetime.datetime
:returns: A subset of dates and values
:rtype: tuple
"""
if len(dates) != len(values):
raise ValueError("Length of dates {} does not equal length of values {}".format(len(dates), len(values)))
else:
# if start_date or end_date are not within dates, set them to the first and last elements in dates
if start_date < dates[0] or start_date > dates[-1]:
start_date = dates[0]
if end_date > dates[-1] or end_date < dates[0]:
end_date = dates[-1]
# find start and ending indices; have to convert idx from array to int to slice
start_idx = int(np.where(dates == start_date)[0])
end_idx = int(np.where(dates == end_date)[0])
# subset variable and date range;
date_subset = dates[start_idx:end_idx + 1]
values_subset = values[start_idx:end_idx + 1]
return date_subset, values_subset
def find_start_end_dates(dates1, dates2):
"""Find start and end dates between lists (or arrays) of datetime objects
that do not have the same length.
The start date will be the later of two dates.
The end date will be the earlier of the two dates.
:param dates1: List or array of datetime objects
:type dates1: list or numpy.ndarray
:param dates2: List or array of datetime objects
:type dates2: list or numpy.ndarray
:returns: Tuple of start date and end date
:rtype: tuple
:raises: ValueError for non overlapping dates
"""
# convert dates to sets for set intersection
date1_set = set(dates1)
date2_set = set(dates2)
if date1_set.intersection(date2_set):
# start date
if dates2[0] > dates1[0]:
start_date = dates2[0]
else:
start_date = dates1[0]
# end date
if dates2[-1] > dates1[-1]:
end_date = dates1[-1]
else:
end_date = dates2[-1]
return start_date, end_date
else:
raise ValueError("No overlapping dates.")
def add_ending(file, suffix, ext, delimiter="-"):
"""Add a new ending to a filename,
:param file: File or path to file
:type file: string
:param suffix: Suffix to add to end of file
:type suffix: string
:param ext: File extension
:type ext: string
:param delimiter: Delimiter, default is the dash character
:type delimiter: string
:returns: New file
:rtype: string
.. note::
Spaces in filenames are replaced by delimiter to keep with Unix file naming conventions.
>>> import utils
>>> utils.add_ending(file="dv.txt", suffix="summary", ext=".txt")
'dv-summary.txt'
>>> utils.add_ending(file="dv.rdb", suffix="summary", ext=".rst", delimiter="_")
'dv_summary.rst'
>>> utils.add_ending(file="/home/play/dv.rdb", suffix="summary", ext=".rst")
'/home/play/dv-summary.rst'
>>> utils.add_ending(file="daily values.rdb", suffix="summary", ext=".rst")
'daily-values-summary.rst'
"""
assert isinstance(file, str), "File must be a string."
assert isinstance(suffix, str), "Suffix must be a string."
assert isinstance(ext, str), "Extension must be a string."
assert isinstance(delimiter, str), "Delimiter must be a string."
path, fullname = os.path.split(file)
name, ext_orig = os.path.splitext(fullname)
parts = name.split()
if suffix:
parts.append(suffix)
if ext:
newname = delimiter.join(parts) + ext
else:
newname = delimiter.join(parts) + ext_orig
return os.path.join(path, newname)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2013 Raphaël Barrois
import sys
if sys.version_info[:2] < (2, 7): # pragma: no cover
import unittest2 as unittest
else: # pragma: no cover
import unittest
if sys.version_info[0] >= 3: # pragma: no cover
import unittest.mock as mock
else: # pragma: no cover
import mock
|
from __future__ import absolute_import, print_function
import errno
import os
import sys
import json
import time
from typing import TYPE_CHECKING
import psutil
MAX_ARGUMENTS_JSON_LENGTH = 65536
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
import functools
import logging
import tempfile
import traceback
# noinspection PyCompatibility
from builtins import map
from future.utils import iteritems
from simpleflow import compat, format
from simpleflow.exceptions import ExecutionError, ExecutionTimeoutError
from simpleflow.utils import json_dumps
if TYPE_CHECKING:
from typing import Any, Iterable # NOQA
import inspect # NOQA
__all__ = ['program', 'python']
class RequiredArgument(object):
pass
def format_arguments(*args, **kwargs):
"""
Returns a string that contains the values of *args* and *kwargs* as command
line options.
:param args: that can be converted to strings.
:type args: tuple.
:param kwargs: whose keys and values can be converted to strings.
:type kwargs: dict.
:returns:
:rtype: str.
The elements args must be convertible to strings and will be used as
positional arguments.
The items of *kwargs* are translated to key/value options (-c=1). Their
format follows the convention of one hyphen for short options (-c) and two
hyphens for long options (--val).
Examples:
>>> sorted(format_arguments('a', 'b', c=1, val=2))
['--val="2"', '-c="1"', 'a', 'b']
"""
def arg(key):
if len(key) == 1:
return '-' + str(key) # short option -c
return '--' + str(key) # long option --val
return ['{}="{}"'.format(arg(k), v) for k, v in
iteritems(kwargs)] + list(map(str, args))
def zip_arguments_defaults(argspec):
# type: (inspect.ArgSpec) -> Iterable
if not argspec.defaults:
return []
return zip(
argspec.args[-len(argspec.defaults):],
argspec.defaults)
def check_arguments(argspec, args):
# type: (inspect.ArgSpec, Any) -> None
"""Validates there is the right number of arguments"""
# func() or func(**kwargs) or func(a=1, b=2)
if not argspec.varargs and not argspec.args and args:
raise TypeError('command does not take varargs')
# Calling func(a, b) with func(1, 2, 3)
if not argspec.varargs and argspec.args and len(args) != len(argspec.args):
raise TypeError('command takes {} arguments: {} passed'.format(
len(argspec.args),
len(args)))
def check_keyword_arguments(argspec, kwargs):
# type: (inspect.ArgSpec, dict) -> None
# func() or func(*args) or func(a, b)
if not argspec.keywords and not argspec.defaults and kwargs:
raise TypeError('command does not take keyword arguments')
arguments_defaults = zip_arguments_defaults(argspec)
not_found = (set(name for name, value in arguments_defaults if
value is RequiredArgument) -
set(kwargs))
# Calling func(a=1, b) with func(2) instead of func(a=0, 2)
if not_found:
raise TypeError('argument{} "{}" not found'.format(
's' if len(not_found) > 1 else '',
', '.join(not_found)))
def format_arguments_json(*args, **kwargs):
dump = json_dumps({
'args': args,
'kwargs': kwargs,
})
return dump
def get_name(func):
"""
Returns the name of a callable.
It handles different types of callable: function, callable object with
``__call__`` method and callable objects that provide their name in the
``name`` attributes.
:type func: callable.
:returns:
:rtype: str.
"""
prefix = func.__module__
if not callable(func):
raise ValueError('{} is not callable'.format(
func))
if hasattr(func, 'name'):
name = func.name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = func.__class__.__name__
return '.'.join([prefix, name])
def wait_subprocess(process, timeout=None, command_info=None):
"""
Wait for a process, raise if timeout.
:param process: the process to wait
:param timeout: timeout after 'timeout' seconds
:type timeout: int | None
:param command_info:
:returns: return code
:rtype: int.
"""
if timeout:
t_start = time.time()
rc = process.poll()
while time.time() - t_start < timeout and rc is None:
time.sleep(1)
rc = process.poll()
if rc is None:
try:
process.terminate() # send SIGTERM
except OSError as e:
# Ignore that exception the case the sub-process already terminated after last poll() call.
if e.errno == errno.ESRCH:
return process.poll()
else:
raise
raise ExecutionTimeoutError(command=command_info, timeout_value=timeout)
return rc
return process.wait()
def python(interpreter='python', logger_name=__name__, timeout=None, kill_children=False):
"""
Execute a callable as an external Python program.
One of the use cases is to use a different interpreter than the current one
such as pypy.
Arguments of the decorated callable must be serializable in JSON.
"""
def wrap_callable(func):
@functools.wraps(func)
def execute(*args, **kwargs):
logger = logging.getLogger(logger_name)
command = 'simpleflow.execute' # name of a module.
sys.stdout.flush()
sys.stderr.flush()
result_str = None # useless
context = kwargs.pop('context', {})
with tempfile.TemporaryFile() as result_fd, tempfile.TemporaryFile() as error_fd:
dup_result_fd = os.dup(result_fd.fileno()) # remove FD_CLOEXEC
dup_error_fd = os.dup(error_fd.fileno()) # remove FD_CLOEXEC
arguments_json = format_arguments_json(*args, **kwargs)
full_command = [
interpreter, '-m', command, # execute module a script.
get_name(func),
'--logger-name={}'.format(logger_name),
'--result-fd={}'.format(dup_result_fd),
'--error-fd={}'.format(dup_error_fd),
'--context={}'.format(json_dumps(context)),
]
if len(arguments_json) < MAX_ARGUMENTS_JSON_LENGTH: # command-line limit on Linux: 128K
full_command.append(arguments_json)
arg_file = None
arg_fd = None
else:
arg_file = tempfile.TemporaryFile()
arg_file.write(arguments_json.encode('utf-8'))
arg_file.flush()
arg_file.seek(0)
arg_fd = os.dup(arg_file.fileno())
full_command.append('--arguments-json-fd={}'.format(arg_fd))
full_command.append('foo') # dummy funcarg
if kill_children:
full_command.append('--kill-children')
if compat.PY2: # close_fds doesn't work with python2 (using its C _posixsubprocess helper)
close_fds = False
pass_fds = []
else:
close_fds = True
pass_fds = [dup_result_fd, dup_error_fd]
if arg_file:
pass_fds.append(arg_fd)
process = subprocess.Popen(
full_command,
bufsize=-1,
close_fds=close_fds,
pass_fds=pass_fds,
)
rc = wait_subprocess(process, timeout=timeout, command_info=full_command)
os.close(dup_result_fd)
os.close(dup_error_fd)
if arg_file:
arg_file.close()
if rc:
error_fd.seek(0)
err_output = error_fd.read()
if err_output:
if not compat.PY2:
err_output = err_output.decode('utf-8', errors='replace')
raise ExecutionError(err_output)
result_fd.seek(0)
result_str = result_fd.read()
if not result_str:
return None
try:
if not compat.PY2:
result_str = result_str.decode('utf-8', errors='replace')
result = format.decode(result_str)
return result
except BaseException as ex:
logger.exception('Exception in python.execute: {} {}'.format(ex.__class__.__name__, ex))
logger.warning('%r', result_str)
# Not automatically assigned in python < 3.2.
execute.__wrapped__ = func
execute.add_context_in_kwargs = True
return execute
return wrap_callable
def program(path=None, argument_format=format_arguments):
r"""
Decorate a callable to execute it as an external program.
:param path: of the program to execute. If it is ``None`` the name of the
executable will be the name of the callable.
:type path: str.
:param argument_format: takes the arguments of the callable and converts
them to command line arguments.
:type argument_format: callable(*args, **kwargs).
:returns:
:rtype: callable(*args, **kwargs).
Examples
--------
>>> @program()
... def ls(path):
... pass
>>> ls('/etc/resolv.conf')
'/etc/resolv.conf\n'
It will execute the ``ls`` command and requires a single positional
argument *path*.
"""
import inspect
def wrap_callable(func):
@functools.wraps(func)
def execute(*args, **kwargs):
check_arguments(argspec, args)
check_keyword_arguments(argspec, kwargs)
command = path or func.__name__
return subprocess.check_output(
[command] + argument_format(*args, **kwargs),
universal_newlines=True)
try:
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = inspect.getfullargspec(func)
argspec = inspect.ArgSpec(args, varargs, varkw, defaults)
except AttributeError:
# noinspection PyDeprecation
argspec = inspect.getargspec(func)
# Not automatically assigned in python < 3.2.
execute.__wrapped__ = func
return execute
return wrap_callable
def make_callable(funcname):
"""
Return a callable object from a string.
This function resolves a name into a callable object. It automatically
loads the required modules. If there is no module path, it considers the
callable is a builtin.
:param funcname: name of the callable.
:type funcname: str.
:returns:
:rtype: callable.
Examples
--------
Loading a function from a library:
>>> func = make_callable('itertools.chain')
>>> list(func(range(3), range(4)))
[0, 1, 2, 0, 1, 2, 3]
Loading a builtin:
>>> func = make_callable('map')
>>> list(func(lambda x: x + 1, range(4)))
[1, 2, 3, 4]
"""
if '.' not in funcname:
module_name = 'builtins'
object_name = funcname
else:
module_name, object_name = funcname.rsplit('.', 1)
module = __import__(module_name, fromlist=['*'])
try:
callable_ = getattr(module, object_name)
except AttributeError:
raise AttributeError('module {} has no attribute {}'.format(
module.__name__,
object_name,
))
return callable_
def main():
"""
When executed as a script, this module expects the name of a callable as
its first argument and the arguments of the callable encoded in a JSON
string as its second argument. It then executes the callable with the
arguments after decoding them into Python objects. It finally encodes the
value returned by the callable into a JSON string and prints it on stdout.
the arguments of the callable are stored in a dict with the following
format: ::
{'args': [...],
'kwargs': {
...,
}
}
Synopsis
--------
::
usage: execute.py [-h] funcname funcargs
positional arguments:
funcname name of the callable to execute
funcargs callable arguments in JSON
optional arguments:
-h, --help show this help message and exit
Examples
--------
::
$ python -m simpleflow.execute "os.path.exists" '{"args": ["/tmp"]}'
true
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'funcname',
help='name of the callable to execute',
)
parser.add_argument(
'funcargs',
help='callable arguments in JSON',
)
parser.add_argument(
'--context',
help='Activity Context',
)
parser.add_argument(
'--logger-name',
help='logger name',
)
parser.add_argument(
'--result-fd',
type=int,
default=1,
metavar='N',
help='result file descriptor',
)
parser.add_argument(
'--error-fd',
type=int,
default=2,
metavar='N',
help='error file descriptor',
)
parser.add_argument(
'--arguments-json-fd',
type=int,
default=None,
metavar='N',
help='JSON input file descriptor',
)
parser.add_argument(
'--kill-children',
action='store_true',
help='kill child processes on exit',
)
cmd_arguments = parser.parse_args()
def kill_child_processes():
process = psutil.Process(os.getpid())
children = process.children(recursive=True)
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
pass
_, still_alive = psutil.wait_procs(children, timeout=0.3)
for child in still_alive:
try:
child.kill()
except psutil.NoSuchProcess:
pass
funcname = cmd_arguments.funcname
if cmd_arguments.arguments_json_fd is None:
content = cmd_arguments.funcargs
if content is None:
parser.error('the following arguments are required: funcargs')
else:
with os.fdopen(cmd_arguments.arguments_json_fd) as arguments_json_file:
content = arguments_json_file.read()
try:
arguments = format.decode(content)
except Exception:
raise ValueError('cannot load arguments from {}'.format(
content))
if cmd_arguments.logger_name:
logger = logging.getLogger(cmd_arguments.logger_name)
else:
logger = logging.getLogger(__name__)
callable_ = make_callable(funcname)
if hasattr(callable_, '__wrapped__'):
callable_ = callable_.__wrapped__
args = arguments.get('args', ())
kwargs = arguments.get('kwargs', {})
context = json.loads(cmd_arguments.context) if cmd_arguments.context is not None else None
try:
if hasattr(callable_, 'execute'):
inst = callable_(*args, **kwargs)
if context is not None:
inst.context = context
result = inst.execute()
if hasattr(inst, 'post_execute'):
inst.post_execute()
else:
if context is not None:
callable_.context = context
result = callable_(*args, **kwargs)
except Exception as err:
logger.error('Exception: {}'.format(err))
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = traceback.format_tb(exc_traceback)
details = json_dumps(
{
'error': exc_type.__name__,
'message': str(exc_value),
'traceback': tb,
},
default=repr,
)
if cmd_arguments.error_fd == 2:
sys.stderr.flush()
if not compat.PY2:
details = details.encode('utf-8')
os.write(cmd_arguments.error_fd, details)
if cmd_arguments.kill_children:
kill_child_processes()
sys.exit(1)
if cmd_arguments.result_fd == 1: # stdout (legacy)
sys.stdout.flush() # may have print's in flight
os.write(cmd_arguments.result_fd, b'\n')
result = json_dumps(result)
if not compat.PY2:
result = result.encode('utf-8')
os.write(cmd_arguments.result_fd, result)
if cmd_arguments.kill_children:
kill_child_processes()
if __name__ == '__main__':
main()
|
import sys
def main(fname):
allOps = {}
with open(fname, "r") as fileobj:
for line in fileobj.readlines():
stripped = line.strip().split(" ")[-1]
if stripped != "IN" and stripped != "OUT":
if stripped in allOps:
allOps[stripped] += 1
else:
allOps[stripped] = 1
print(allOps)
if __name__ == "__main__":
if len(sys.argv) != 2 or sys.argv[1].split(".")[1] != "circ":
print("Format: python3 {} [file ending in circ]".format(sys.argv[0]))
sys.exit()
main(sys.argv[1])
|
from django.conf import settings
from django.conf.urls.static import static
from django.urls import include, path
from django.contrib import admin
from scaife_viewer.core.views import (
LibraryCollectionVectorView,
LibraryCollectionView,
LibraryInfoView,
LibraryPassageView,
LibraryView,
Reader,
library_text_redirect,
morpheus,
search,
search_json,
)
from .views import about, app, home, profile
api_patterns = (
[
path("library/json/", LibraryView.as_view(format="json"), name="library"),
path("library/json/info", LibraryInfoView.as_view(), name="library_info"),
path("library/vector/<str:urn>/", LibraryCollectionVectorView.as_view(), name="library_collection_vector"),
path("library/passage/<str:urn>/json/", LibraryPassageView.as_view(format="json"), name="library_passage"),
path("library/passage/<str:urn>/text/", LibraryPassageView.as_view(format="text"), name="library_passage_text"),
path("library/passage/<str:urn>/xml/", LibraryPassageView.as_view(format="xml"), name="library_passage_xml"),
path("library/<str:urn>/json/", LibraryCollectionView.as_view(format="json"), name="library_collection"),
path("search/json/", search_json, name="search"),
path("morpheus/", morpheus, name="morpheus"),
],
"api",
)
site_patterns = [
path("", home, name="home"),
path("about/", about, name="about"),
path("admin/", admin.site.urls),
path("account/", include("account.urls")),
path("profile/", profile, name="profile"),
path("search/", search, name="search"),
path("reading/", include("sv_pdl.reading.urls")),
path("openid/", include("oidc_provider.urls", namespace="oidc_provider")),
path(".well-known/", include("letsencrypt.urls")),
]
scaife_viewer_patterns = [
path("", include(api_patterns)),
path("library/", LibraryView.as_view(format="html"), name="library"),
path("library/<str:urn>/", LibraryCollectionView.as_view(format="html"), name="library_collection"),
path("library/<str:urn>/redirect/", library_text_redirect, name="library_text_redirect"),
path("reader/<str:urn>/", Reader.as_view(), name="reader"),
]
urlpatterns = site_patterns + scaife_viewer_patterns + [
path(
"atlas/",
include("scaife_viewer.atlas.urls")
),
path("<path:path>/", app, name="app"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
"""
This module contains class Model.
"""
# pylint: disable=bad-continuation
import torch
import torch.nn as nn
class ZSamplePredsMixin:
"""
This mixin adds a method to get predictions for a set of z-samples.
"""
# pylint: disable=too-few-public-methods
def get_z_sample_preds(self, x_pt, z_samples):
"""
This method evaluates the model over every point in x once for every sample in z_samples.
"""
z_samples_size = z_samples.shape[0]
# Turn off grad while we calculate our targets.
with torch.no_grad():
# Create a tensor with all the z samples repeated once for every element in the
# batch. This will be matched with x_ex when finding targets.
# [z0, -> for x datapoint 0
# z0, -> for x datapoint 1
# ...
# z0, -> for x datapoint n
# z1, -> for x datapoint 0
# z1, -> for x datapoint 1
# ...
# z1, -> for x datapoint n
# ...
# zS, -> for x datapoint 0
# zS, -> for x datapoint 1
# ...
# zS, -> for x datapoint n]
# dimensions: (data points * z-samples, z-sample dimensions)
z_samples_ex = torch.repeat_interleave(z_samples, x_pt.shape[0], dim=0).to(
device=self.device
)
# Create a tensor with a copy of the elements in the batch for every z sample. This
# will be matched with z_samples_ex when finding targets.
# [x0, -> for z sample 0
# x1, -> for z sample 0
# ...
# xn, -> for z sample 0
# x0, -> for z sample 1
# x1, -> for z sample 1
# ...
# xn, -> for z sample 1
# ...
# x0, -> for z sample S
# x1, -> for z sample S
# ...
# xn, -> for z sample S]
# dimensions: (data points * z-samples, input dimensions)
x_ex = torch.cat(z_samples_size * [x_pt]).to(device=self.device)
# Run the model with all the elements x on every z sample.
# [y <- x0 z0,
# y <- x1 z0,
# ...
# y <- xn z0,
# y <- x0 z1,
# y <- x1 z1,
# ...
# y <- xn z1
# ...
# y <- x0 zS,
# y <- x1 zS,
# ...
# y <- xn zS]
# dimensions: (data points * z-samples, output dimensions)
y_predict = self.forward_z(x_ex, z_samples_ex)
# Create a matrix view of the results with a column for every element and a row for
# every z sample.
# [[y <- x0 z0, y <- x1 z0, ..., y <- xn z0],
# [y <- x0 z1, y <- x1 z1, ..., y <- xn z1],
# ...,
# [y <- x0 zS, y <- x1 zS, ..., y <- xn zS]]
# dimensions: (z-samples, data points, output dimensions)
y_predict_mat = y_predict.view(z_samples_size, x_pt.shape[0])
return y_predict_mat
DEFAULT_HIDDEN_SIZE = 1024
# DEFAULT_HIDDEN_SIZE = 256
OUT_SIZE = 1
class Model(nn.Module, ZSamplePredsMixin):
"""
This is the neural network model.
"""
def __init__(
self, z_space_size, x_space_size, device, hidden_size=DEFAULT_HIDDEN_SIZE
):
super().__init__()
# Perform initialization of the pytorch superclass
super(Model, self).__init__()
self.device = device
self.hidden_size = hidden_size
# Define layer types
self.linear1 = nn.Linear(x_space_size + z_space_size, self.hidden_size)
self.linear2 = nn.Linear(self.hidden_size, self.hidden_size)
self.linear3 = nn.Linear(self.hidden_size, self.hidden_size)
self.linear4 = nn.Linear(self.hidden_size, OUT_SIZE)
def forward_z(self, x_pt, z_pt):
"""
This method runs a forward pass through the model with the provided input x
and z-samples.
"""
x_pt = torch.cat(
(x_pt.view(x_pt.size(0), -1), z_pt.view(z_pt.size(0), -1)), dim=1
)
return self.forward(x_pt)
def forward(self, x_pt):
"""
This method runs a forward pass through the model with the provided input x.
"""
x_pt = self.linear1(x_pt)
x_pt = torch.nn.functional.leaky_relu(x_pt, 0.1)
x_pt = self.linear2(x_pt)
x_pt = torch.nn.functional.leaky_relu(x_pt, 0.1)
x_pt = self.linear3(x_pt)
x_pt = torch.nn.functional.leaky_relu(x_pt, 0.1)
x_pt = self.linear4(x_pt)
return x_pt
|
"""Contains base class for ``SqrtGGN{Exact, MC}`` module extensions."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, List, Tuple, Union
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives.basederivatives import BaseDerivatives
from backpack.extensions.mat_to_mat_jac_base import MatToJacMat
if TYPE_CHECKING:
from backpack.extensions.secondorder.sqrt_ggn import SqrtGGNExact, SqrtGGNMC
class SqrtGGNBaseModule(MatToJacMat):
"""Base module extension for ``SqrtGGN{Exact, MC}``."""
def __init__(self, derivatives: BaseDerivatives, params: List[str] = None):
"""Store parameter names and derivatives.
Sets up methods that extract the GGN/Fisher matrix square root for the
passed parameters, unless these methods are overwritten by a child class.
Args:
derivatives: derivatives object.
params: List of parameter names. Defaults to None.
"""
if params is not None:
for param_str in params:
if not hasattr(self, param_str):
setattr(self, param_str, self._make_param_function(param_str))
super().__init__(derivatives, params=params)
def _make_param_function(
self, param_str: str
) -> Callable[
[Union[SqrtGGNExact, SqrtGGNMC], Module, Tuple[Tensor], Tuple[Tensor], Tensor],
Tensor,
]:
"""Create a function that computes the GGN/Fisher square root for a parameter.
Args:
param_str: name of parameter
Returns:
Function that computes the GGN/Fisher matrix square root.
"""
def param_function(
ext: Union[SqrtGGNExact, SqrtGGNMC],
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
backproped: Tensor,
) -> Tensor:
"""Calculate the GGN/Fisher matrix square root with the derivatives object.
Args:
ext: extension that is used
module: module that performed forward pass
g_inp: input gradient tensors
g_out: output gradient tensors
backproped: Backpropagated quantities from second-order extension.
Returns:
GGN/Fisher matrix square root.
"""
return self.derivatives.param_mjp(
param_str,
module,
g_inp,
g_out,
backproped,
sum_batch=False,
subsampling=ext.get_subsampling(),
)
return param_function
|
"""Program Created By Vivek Sharma http://doyl.in"""
import socket
def Main():
host='127.0.0.1'
port=7000
s=socket.socket() #socket Created
s.bind((host,port)) #Port Binded
s.listen(1) # Waiting
connection,addr=s.accept() #accept With Connection And Address
print('connectn from'+ str(addr))
while True:
data=connection.recv(1024).decode() # Recv Data Over Connection
if not data:
break
print('From User Recievd'+str(data))
data=str(data).upper()
print('Sending: '+str(data))
connection.send(data.encode()) #Sending Data
connection.close()
if __name__=='__main__':
Main()
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import traceback
from celery import task
from django.core.cache import cache
from django.db import transaction
from django.utils import timezone
from backend.apps.organization.models import SyncErrorLog, SyncRecord
from backend.biz.org_sync.department import DBDepartmentSyncExactInfo, DBDepartmentSyncService
from backend.biz.org_sync.department_member import DBDepartmentMemberSyncService
from backend.biz.org_sync.iam_department import IAMBackendDepartmentSyncService
from backend.biz.org_sync.iam_user import IAMBackendUserSyncService
from backend.biz.org_sync.iam_user_department import IAMBackendUserDepartmentSyncService
from backend.biz.org_sync.syncer import Syncer
from backend.biz.org_sync.user import DBUserSyncService
from backend.biz.org_sync.user_leader import DBUserLeaderSyncService
from .constants import SYNC_TASK_DEFAULT_EXECUTOR, SyncTaskLockKey, SyncTaskStatus, SyncType
logger = logging.getLogger("celery")
@task(ignore_result=True)
def sync_organization(executor: str = SYNC_TASK_DEFAULT_EXECUTOR):
try:
# 分布式锁,避免同一时间该任务多个worker执行
with cache.lock(SyncTaskLockKey.Full.value, timeout=10): # type: ignore[attr-defined]
# Note: 虽然拿到锁了,但是还是得确定没有正在运行的任务才可以(因为10秒后锁自动释放了)
if SyncRecord.objects.filter(type=SyncType.Full.value, status=SyncTaskStatus.Running.value).exists():
return
# 添加执行记录
record = SyncRecord.objects.create(
executor=executor, type=SyncType.Full.value, status=SyncTaskStatus.Running.value
)
record_id = record.id
except Exception: # pylint: disable=broad-except
traceback_msg = traceback.format_exc()
exception_msg = "sync_organization cache lock error"
logger.exception(exception_msg)
# 获取分布式锁失败时,需要创建一条失败记录
record = SyncRecord.objects.create(
executor=executor, type=SyncType.Full.value, status=SyncTaskStatus.Failed.value
)
SyncErrorLog.objects.create_error_log(record.id, exception_msg, traceback_msg)
return
try:
# 1. SaaS 从用户管理同步组织架构
# 用户
user_sync_service = DBUserSyncService()
# 部门
department_sync_service = DBDepartmentSyncService()
# 部门与用户关系
department_member_sync_service = DBDepartmentMemberSyncService()
# 用户与Leader关系
user_leader_service = DBUserLeaderSyncService()
# 开始执行同步变更
with transaction.atomic():
services = [
user_sync_service,
department_sync_service,
department_member_sync_service,
user_leader_service,
]
# 执行DB变更
for service in services:
service.sync_to_db()
# 计算和同步部门的冗余数据
DBDepartmentSyncExactInfo().sync_to_db()
# 2. SaaS 将DB存储的组织架构同步给IAM后台
iam_backend_user_sync_service = IAMBackendUserSyncService()
iam_backend_department_sync_service = IAMBackendDepartmentSyncService()
iam_backend_user_department_sync_service = IAMBackendUserDepartmentSyncService()
iam_services = [
iam_backend_user_sync_service,
iam_backend_department_sync_service,
iam_backend_user_department_sync_service,
]
for iam_service in iam_services:
iam_service.sync_to_iam_backend()
sync_status, exception_msg, traceback_msg = SyncTaskStatus.Succeed.value, "", ""
except Exception: # pylint: disable=broad-except
sync_status = SyncTaskStatus.Failed.value
exception_msg = "sync_organization error"
traceback_msg = traceback.format_exc()
logger.exception(exception_msg)
SyncRecord.objects.filter(id=record_id).update(status=sync_status, updated_time=timezone.now())
if sync_status == SyncTaskStatus.Failed.value:
SyncErrorLog.objects.create_error_log(record_id, exception_msg, traceback_msg)
@task(ignore_result=True)
def sync_new_users():
"""
定时同步新增用户
"""
# 已有全量任务在执行,则无需再执行单用户同步
if SyncRecord.objects.filter(type=SyncType.Full.value, status=SyncTaskStatus.Running.value).exists():
return
try:
Syncer().sync_new_users()
except Exception: # pylint: disable=broad-except
logger.exception("sync_new_users error")
|
"""
HTTP Resource for displaying connection in OpenNSA.
Currently rather simple. No CSS, just raw html tables.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2012)
"""
from twisted.web import resource, server
from opennsa import database
HTML_HEADER = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>%(title)s</title>
</head>
<body>
"""
HTML_FOOTER = """ </body>
</html>
"""
class ConnectionListResource(resource.Resource):
def __init__(self):
pass
def render_GET(self, request):
d = database.ServiceConnection.find()
d.addCallback(self.renderPage, request)
return server.NOT_DONE_YET
def renderPage(self, connections, request):
ib = 4 * ' '
body = """
<h3>Connections</h3>
<p>
<table style="width:95%" border=1>
<thead>
<tr>
<th>Connection Id</th>
<th>Lifecycle state</th>
<th>Source</th>
<th>Destination</th>
<th>Start time</th>
<th>End time</th>
</tr>
</thead>
<tbody>"""
for c in connections:
#print c
source = c.source_network + ':' + c.source_port + (':' + c.source_label.labelValue() if c.source_label else '')
dest = c.dest_network + ':' + c.dest_port + (':' + c.dest_label.labelValue() if c.dest_label else '')
start_time = c.start_time.replace(microsecond=0) if c.start_time is not None else '-'
end_time = c.end_time.replace(microsecond=0)
body += """
<tr>
<th><div>%s</div></th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
""" % (c.connection_id, c.lifecycle_state, source, dest, start_time, end_time)
body += 4*ib + '</tbody>'
body += 3*ib + '</table>'
body = str(body)
request.write(HTML_HEADER % {'title': 'OpenNSA Connections'} )
request.write(body)
request.write(HTML_FOOTER)
request.finish()
return server.NOT_DONE_YET
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/8/10 下午12:31
@Auth : apecode.
@File : utili.py
@Software : PyCharm
@Blog : https://liuyangxiong.cn
"""
import time
def getTenAfter():
now = int(time.time() + 600)
# 转换为其他日期格式,如:"%Y-%m-%d %H:%M:%S"
timeArray = time.localtime(now)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
def html_format(date, postContext, url, signStr) -> str:
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<style>
body {
background: rgba(34, 29, 29);
}
h1 {
color: crimson;
text-align: center;
}
h2 {
color: #ef3030;
text-align: center;
}
a {
text-decoration: none;
color: #590b81;
}
span {
color: darkslateblue;
}
</style>
</head>
<body>
<h1>%s</h1>
<h2>%s</h2>
<h2><strong>表单详细: </strong><a href="%s">%s</a></h2>
<h2><strong>签到位置: </strong><span>%s</span></h2>
</body>
</html>
""" % (date, postContext, url, url, signStr)
return html
# 获取时段
def getTimePeriod() -> int:
def period(t) -> int:
return int(time.mktime(time.strptime(time.strftime("%Y-%m-%d {}".format(t), time.localtime(int(time.time()))), '%Y-%m-%d %H:%M:%S')))
nowTime = int(time.time())
if period("6:30:00") < nowTime <= period("9:00:00"): # 晨检
return 1
elif period("12:00:00") < nowTime <= period("14:30:00"): # 午检
return 2
elif period("21:30:00") < nowTime <= period("23:00:00"): # 晚检
return 3
else:
return 0 # 未到时间
# 通过数值得到时段
def fromIntGetTimePeriod(num: int):
if num == 1:
return [
time.strftime("%Y-%m-%d 6:30:00", time.localtime(int(time.time()))),
time.strftime("%Y-%m-%d 9:00:00", time.localtime(int(time.time())))
]
elif num == 2:
return [
time.strftime("%Y-%m-%d 12:00:00", time.localtime(int(time.time()))),
time.strftime("%Y-%m-%d 14:30:00", time.localtime(int(time.time())))
]
elif num == 3:
return [
time.strftime("%Y-%m-%d 19:30:00", time.localtime(int(time.time()))),
time.strftime("%Y-%m-%d 21:30:00", time.localtime(int(time.time())))
]
else:
return []
# 根据当前时间判断晨检、午检、晚检
def GenerateNowTime() -> str:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 9:00:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
nowTime = int(time.time())
if nowTime > dayTime:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 14:30:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
if nowTime > dayTime:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 19:30:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
else:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 12:00:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
else:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 06:30:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
return dayTime
def when_time():
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 9:00:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
nowTime = int(time.time())
if nowTime > dayTime:
dayTime = int(time.mktime(time.strptime(time.strftime("%Y-%m-%d 14:30:00", time.localtime(
int(time.time()))), '%Y-%m-%d %H:%M:%S')))
if nowTime > dayTime:
return 2 # 表示 晚检
else:
return 1 # 表示 午检
else:
return 0 # 表示 晨检
|
import random
import sqlalchemy as sa
from sqlalchemy.schema import MetaData
from sqlalchemy.ext.declarative import as_declarative
from falcon_helpers.sqla.core import utcnow
from falcon_helpers.sqla.utils import random_data_for_type
from falcon_helpers.sqla.db import session
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
class BaseColumns:
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
created_ts = sa.Column(sa.DateTime, server_default=utcnow())
updated_ts = sa.Column(sa.DateTime, server_default=utcnow(),
server_onupdate=utcnow())
class BaseFunctions:
@classmethod
def orm_column_names(cls):
return frozenset(x.key for x in sa.inspect(cls).attrs)
class Testable:
testing_random_nulls = True
@classmethod
def testing_create(cls, _commit=True, **kwargs):
"""Create an object for testing with default data appropriate for the
field type
:param _numeric_defaults_range: a tuple of (HIGH, LOW) which controls
the acceptable defaults of the number types. Both integer and
numeric (float) fields are controlled by this setting. This is
helpful when some fields have a constrained value.
ATTRIBUTION: This was largely copied from the wonderful folks at Level
12, they write high-class software, check them out: https://level12.io
"""
NUMERIC_LOW, NUMERIC_HIGH = kwargs.pop('_numeric_defaults_range', (-100, 100))
insp = sa.inspection.inspect(cls)
def skippable(column):
return (
column.key in kwargs # skip fields already in kwargs
or column.foreign_keys # skip foreign keys
or column.server_default # skip fields with server defaults
or column.default # skip fields with defaults
or column.primary_key) # skip any primary key
for column in (col for col in insp.columns if not skippable(col)):
try:
if column.nullable and cls.testing_random_nulls and random.choice([True, False]):
kwargs[column.key] = None
else:
kwargs[column.key] = random_data_for_type(
column, NUMERIC_HIGH, NUMERIC_LOW)
except ValueError:
pass
obj = cls(**kwargs)
if _commit:
session.add(obj)
session.commit()
return obj
@as_declarative(metadata=metadata)
class ModelBase(Testable, BaseFunctions):
query = session.query_property()
|
# coding=utf8
from .data import DataBase
from huskar_api.models.dataware.zookeeper import config_client
class ConfigData(DataBase):
client = config_client
|
"""
Problem 4:
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def is_polindrome(n):
return str(n) == str(n)[::-1]
polindrome = 0
for i in range(100, 999):
for j in range(100, 999):
if is_polindrome(i * j) and i * j > polindrome:
polindrome = i * j
print('answer: %d' %polindrome)
|
import requests
from fake_useragent import UserAgent
from components.headers import get_headers
from components.proxies import getProxies
from components.login import login
from components.download import download
from arguments import wallhaven_argparse
ua = UserAgent(use_cache_server=False, path="components/fake_useragent.json")
args = wallhaven_argparse().parse_args()
if __name__ == "__main__":
session = requests.Session()
headers = get_headers(ua=ua)
if args.use_proxy == True:
proxies = getProxies(proxy=args.proxy, http=False, https=True)
else:
proxies = None
response = session.get(url=args.login_url_get, headers=headers)
response = login(args=args, response=response, session=session, headers=headers)
download(
args=args, response=response, session=session, headers=headers, proxies=proxies
)
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from string import Template
import optparse
import os
import sys
try:
grit_module_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'tools', 'grit')
sys.path.insert(0, grit_module_path)
from grit.format import data_pack as DataPack
except ImportError, e:
print 'ImportError: ', e
sys.exit(-1)
def is_ascii(s):
return all(ord(c) < 128 for c in s)
header_template = \
"""// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MOJO_SERVICES_HTML_VIEWER_BLINK_RESOURCE_MAP_H_
#define MOJO_SERVICES_HTML_VIEWER_BLINK_RESOURCE_MAP_H_
#include <map>
namespace html_viewer {
class BlinkResourceMap {
public:
BlinkResourceMap();
const char* GetResource(int id, int* length);
private:
struct ResourceEntry {
const char* data;
int length;
ResourceEntry()
: data(nullptr)
, length(0) {
}
ResourceEntry(const char* data, int length)
: data(data)
, length(length) {
}
};
typedef std::map<int, ResourceEntry> ResourceMap;
ResourceMap resources_;
};
} // namespace html_viewer
#endif // MOJO_SERVICES_HTML_VIEWER_BLINK_RESOURCE_MAP_H_"""
cpp_template = \
"""// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "$header_file_name"
#include "base/macros.h"
namespace html_viewer {
$definitions
BlinkResourceMap::BlinkResourceMap()
{
$map_initializer
}
const char* BlinkResourceMap::GetResource(int id, int* length)
{
ResourceMap::iterator it = resources_.find(id);
if (it == resources_.end()) {
*length = 0;
return nullptr;
}
*length = it->second.length;
return it->second.data;
}
} // namespace html_viewer"""
def main():
parser = optparse.OptionParser(
usage='Usage: %prog --pak-file PAK_FILE --header HEADER --cpp CPP\n')
parser.add_option('-i', '--pak-file', action='store', dest='pak_file',
help='The .pak file to be extracted.')
parser.add_option('', '--header', action='store', dest='header_file',
help='Header file to be generated.')
parser.add_option('', '--cpp', action='store', dest='cpp_file',
help='C++ file to be generated.')
(options, _) = parser.parse_args()
if (not options.pak_file or not options.header_file or not options.cpp_file):
parser.print_help()
sys.exit(-1)
header_file = open(options.header_file, 'w+')
cpp_file = open(options.cpp_file, 'w+')
pak_contents = DataPack.ReadDataPack(options.pak_file)
resourceIds = []
header_contents = dict()
cpp_contents = dict()
definitions = []
for (resId, data) in pak_contents.resources.iteritems():
if not is_ascii(data):
continue
resourceIds.append(resId)
hex_values = ['0x{0:02x}'.format(ord(char)) for char in data]
f = lambda A, n=12: [A[i:i+n] for i in range(0, len(A), n)]
hex_values_string = ',\n '.join(', '.join(x) for x in f(hex_values))
cpp_definition = \
'const char kResource%s[%d] = {\n %s \n};' % \
(str(resId), len(hex_values), hex_values_string)
definitions.append(cpp_definition)
header_file_contents = Template(header_template).substitute(header_contents)
header_file.write(header_file_contents)
header_file.close()
map_initializer = []
for resId in resourceIds:
insert_statement = \
'resources_.insert(std::pair<int, ResourceEntry>(\n' \
' %s, ResourceEntry(kResource%s, arraysize(kResource%s))));'
map_initializer.append( \
insert_statement % (str(resId), str(resId), str(resId)))
cpp_contents['definitions']= '\n'.join(definitions)
cpp_contents['header_file_name'] = os.path.basename(options.header_file)
cpp_contents['map_initializer'] = '\n '.join(map_initializer)
cpp_file_contents = Template(cpp_template).substitute(cpp_contents)
cpp_file.write(cpp_file_contents)
cpp_file.close()
if __name__ == '__main__':
main()
|
from marshmallow import fields, validate
from .base import BaseSchema
class PageSchema(BaseSchema):
page = fields.Integer(
required=True, validate=validate.Range(min=1), allow_none=False
)
|
from flask import Flask, request, render_template
from flask_assistant import Assistant, ask, tell
from steem import Steem
from steem.converter import Converter
from steem.blog import Blog
from steem.account import Account
from steem.amount import Amount
from steemconnect.client import Client
from steemconnect.operations import Follow, Unfollow, Mute, ClaimRewardBalance, Comment, CommentOptions, Vote
import requests, json, os, random, string
St_username = ""
Tag = ''
s = Steem()
c = Converter()
app = Flask(__name__)
assist = Assistant(app, route='/api', project_id=os.environ.get('project_id'))
app.config['INTEGRATIONS'] = ['ACTIONS_ON_GOOGLE'] # To enable Rich Messages
posts = s.get_discussions_by_trending({"limit":"8"}) # To cache the top 8 trending posts
sc = Client(client_id=os.environ.get('client_id'), client_secret=os.environ.get('client_secret'))
class Steemian:
def __init__(self, St_username):
self.username = St_username
self.data = Account(self.username)
self.reputation = str(self.data.rep)
self.upvoteworth = self.calculate_voteworth()
self.steempower = self.calculate_steempower(True)
self.availablesp = self.calculate_steempower(False) # To get the amount of Steempower that can be delegated
self.wallet = self.data.balances
self.accountworth = self.calculate_accountworth()
self.steemprice = self.cmc_price('1230') # To get the price of Steem form coinmarketcap
self.sbdprice = self.cmc_price('1312') # To get the price of Steem Dollars form coinmarketcap
self.bloglink = 'https://steemit.com/@'+St_username # To get the price user's blog link
self.rewards = [self.data["reward_steem_balance"],self.data["reward_sbd_balance"],self.data["reward_vesting_balance"]]
if self.data["next_vesting_withdrawal"] != "1969-12-31T23:59:59": # Return True if the user is powering down
self.powerdown = True
else:
self.powerdown = False
def calculate_voteworth(self): # To calculate the vote worth
reward_fund = s.get_reward_fund()
sbd_median_price = s.get_current_median_history_price()
vests = Amount(self.data['vesting_shares'])+Amount(self.data['received_vesting_shares'])-Amount(self.data['delegated_vesting_shares'])
vestingShares = int(vests * 1e6)
rshares = 0.02 * vestingShares
estimated_upvote = rshares / float(reward_fund['recent_claims']) * Amount(reward_fund['reward_balance']).amount * Amount(sbd_median_price['base']).amount
estimated_upvote = estimated_upvote * (float(self.data['voting_power'])/10000)
return ('$'+str(round(estimated_upvote, 2)))
def calculate_steempower(self,cd): # To calculate the steem power
def vests2sp(v): # To convert vests into steem power
sp = c.vests_to_sp(v)
return str(round(sp, 1))
total = float(Amount(self.data['vesting_shares'])+Amount(self.data['received_vesting_shares'])-Amount(self.data['delegated_vesting_shares']))
owned = Amount(self.data['vesting_shares']).amount
delegated = Amount(self.data['delegated_vesting_shares']).amount
received = Amount(self.data['received_vesting_shares']).amount
if cd: # A switch between availablesp and steempower
return('Your total Steem Power is %s ... You own %s. You are delegating %s and you are receiving %s.'% (vests2sp(total),vests2sp(owned),vests2sp(delegated), vests2sp(received)))
else:
return(float(vests2sp(owned - delegated)))
def cmc_price(self,id): # To get the price of a given currency
r = requests.get("https://api.coinmarketcap.com/v2/ticker/"+id)
data = r.json()
return data['data']['quotes']['USD']['price']
def calculate_accountworth(self): # To calculate the account worth
SBD_price = self.cmc_price('1312')
STEEM_price = self.cmc_price('1230')
accountworth = (self.wallet['total']['STEEM'] + self.data.sp)*STEEM_price + self.wallet['total']['SBD']*SBD_price
return round(accountworth,0)
######################## Functions ######################################
def getpostimg(i): # To get a post's thumbnail
global posts
metadata = posts[i]['json_metadata']
imagedata = json.loads(metadata)
try : # To test if the post is a dtube video
imglink = 'https://snap1.d.tube/ipfs/'+imagedata['video']['info']['snaphash'] # To get the video's thumbnail form d.tube
return(imglink)
except KeyError: # If it's a regular post
try:
return imagedata['image'][0]
except: # If no image is available
return 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/15/No_image_available_600_x_450.svg/320px-No_image_available_600_x_450.svg.png' # a "no image available" picture from wikimedia
def eligible_delegation(user,num): # To check if the delegation is possible
user = Steemian(user)
if user.powerdown:
return ('You are powering down')
elif user.availablesp < float(num):
return ('Insufficient Available Steempower')
else: # If the user isn't powering down and there's enough SP
return ('eligible')
def resteem(username,author):
if username == author:
return ('A post by %s' % username)
else:
return ('Resteemed')
def randomperm(length):
letter = string.ascii_lowercase
return ''.join(random.choice(letter) for i in range(length))
##############################################################
# Returns a welcome msg and refreshes the access token
@assist.action('Welcome')
def Welcome():
sc.access_token = None
return ask('Hello, Steem Voice is here! \nCan you provide me with a valid username?')
# Setting a new username and changing it
@assist.action('Change_username')
@assist.action('Welcome_username')
def r_Welcome(username):
global St_username
St_username = username
sc.access_token = None
return ask('Got it. How can I assist?')
# Is used to calculate the reputation of a given user
@assist.action('reputation')
def r_rep():
user = Steemian(St_username)
return ask(user.reputation)
# Is used to calculate the upvote worth of a given user
@assist.action('vote_worth')
def r_voteworth():
user = Steemian(St_username)
return ask(user.upvoteworth)
# Is used to get a user's latest post
@assist.action('last_post')
def r_last_post():
b = Blog(St_username)
user = Steemian(St_username)
post = b.take(1)
resp = ask('Your latest post is: \n'+ post[0]['title'])
postlink = user.bloglink+'/'+post[0]['permlink']
resp.link_out('Open the post', postlink) # Is used to create a button that takes you to the post
return resp
@assist.action('wallet')
def r_desire(desire):
user = Steemian(St_username)
if desire == 'steem': # Is used to get the available steem of a given user
return ask('%s STEEM' % user.wallet['available']['STEEM'])
elif desire == 'sbd': # Is used to get the available sbd of a given user
return ask('%s Steem Dollars' % user.wallet['available']['SBD'])
elif desire == 'savings': # Is used to get the savings of a given user
return ask('You have %s Steem Dollars and %s STEEM in your savings' % (user.wallet['savings']['SBD'],user.wallet['savings']['STEEM']))
elif desire == 'accountworth': # Is used to calculate the account worth
return ask('Your account is worth approximately $%i according to coinmarketcap\'s latest prices.' % user.accountworth)
elif desire == 'steempower': # Is used to calculate the steem power of a given user
return ask(user.steempower)
else:
return ask('Error! Please try again')
@assist.action('price')
def r_price(currency):
user = Steemian(St_username)
if currency == 'steem': # Is used to get the price of STEEM
return ask('Steem is now worth $'+str(round(user.steemprice,2))+' according to coinmarketcap.')
elif currency == 'sbd': # Is used to get the price of SBD
return ask('Steem Dollars is now worth $'+str(round(user.sbdprice,2))+' according to coinmarketcap.')
else:
return ask('Error! Please try again')
@assist.action('trending') # Is used to display the top 8 trending posts (a certain tag can be specified)
def r_trendingposts(CTG,Tag):
global posts
if (Tag != '')or (CTG != 'trending'): # If a certain tag is specified or another category -posts- will be reloaded into the top 8 trending posts of that tag
posts = eval('s.get_discussions_by_'+CTG)({"tag":Tag,"limit":"8"})
if Tag == '':
Tag = 'all tags' # To keep a proper resp statment even when no tag is specified
if CTG == 'created':
resp = ask(('Here are the newest posts in %s') % (Tag)).build_carousel()
else:
resp = ask(('Here are the top %s posts in %s') % (CTG,Tag)).build_carousel() # To make a new carousel
for i in range(8): # Add each post to the carousel
try:
resp.add_item(posts[i]['title'],
key=(str(i)), # This key will be used if the user chooses a certain post
img_url=getpostimg(i)
)
except IndexError: # If the available posts are less than 8 (mostly promoted ones)
break
return resp
@assist.action('userpostsrep')
@assist.action('r_openfeed')
@assist.action('trendingresp') # To show a card of the post chosen
def r_trendingresp(OPTION):
global posts, Option
Option = int(OPTION) # This is the key of the chosen post
postlink = 'https://steemit.com/@'+posts[Option]['author']+'/'+posts[Option]['permlink']
resp = ask('Click the button below to open the post')
date,time = posts[Option]['created'].split('T')
resp.card(title=posts[Option]['title'],
text=('A post by %s created on %s at %s' % (posts[Option]['author'],date,time)),
img_url=getpostimg(Option),
link=postlink,
link_title='Open The post'
)
return resp.suggest('Upvote the post', 'Write a comment')
@assist.action('openblog') # Retruns a button to the user's blog
def r_openblog():
user = Steemian(St_username)
return ask('Click the button below to open your blog').link_out('Open Blog',user.bloglink)
@assist.action('openfeed') # Retruns a list of posts from the user's list
def r_feed():
global posts
posts = s.get_discussions_by_feed({"tag":St_username,"limit":"10"})
resp = ask('Here are the latest posts from your feed').build_carousel()
for i in range(10):
try:
resp.add_item(posts[i]['title'],
key=(str(i)), # This key will be used if the user chooses a certain post
img_url=getpostimg(i)
)
except IndexError: # If the available posts are less than 8
break
return resp
# Allows the user to connect their account using Steemconnect
@assist.action('login')
def r_login():
login_url = sc.get_login_url(
str(os.environ.get('server')) + "/login", # This is the callback URL
"login,custom_json,comment,vote", # The scopes needed (login allows us to verify the user'steem identity while custom_json allows us to Follow, unfollow and mute)
)
resp = ask("Please use the button below to login with SteemConnect")
resp.link_out('Click Here', login_url) # To return the button that takes the user to the login page
return resp
# To check if the user successfully connected his account
@assist.action('check')
def r_check():
if sc.access_token == None: # No access token
return ask('Error, Please try to connect your account')
else:
return ask('Hello %s ! You can now use commands such as follow, unfollow, mute ...' % sc.me()["name"])
@assist.action('follow')
def r_follow(inst,username):
try:
if inst == 'follow': # To follow a certain user
follow = Follow(sc.me()["name"], username)
sc.broadcast([follow.to_operation_structure()])
return ask('Done, you are now following %s' % username)
elif inst == 'unfollow': # To unfollow a certain user
unfollow = Unfollow(sc.me()["name"], username)
sc.broadcast([unfollow.to_operation_structure()])
return ask('Done, you are no longer following %s' % username)
elif inst == 'mute': # To mute a certain user
ignore = Mute(sc.me()["name"], username)
sc.broadcast([ignore.to_operation_structure()])
return ask('Done, %s is now muted' % username)
else:
return ask('Error, Please try again!')
except ValueError:
return ask('Please connect your account before using this command')
# To check if you are following a user
@assist.action('followingcheck')
def r_followingcheck(username):
count = s.get_follow_count(St_username)['following_count'] # To get the total number of following
thousands = int(count/1000)
other = count%1000
lastuser = 0
flist = []
for i in range(thousands): # s.get_following has a limit of 1000 so I have to break the total followers into groups of 1000
flist.extend(s.get_following(St_username,lastuser,'blog',1000))
lastuser = flist[-1]['following']
flist.extend(s.get_following(St_username,lastuser,'blog',other))
cond = False # Not following
for i in range(count):
if flist[i]['following'] == username.strip(): # To remove the extra space
cond = True # Following
if cond:
return ask('You are following %s' % username)
else:
return ask('You are not following %s' % username)
# To check if a user is following you
@assist.action('followcheck')
def r_followcheck(username):
count = s.get_follow_count(St_username)['follower_count'] # To get the total number of followers
thousands = int(count/1000)
other = count%1000
lastuser = 0
flist = []
for i in range(thousands):
flist.extend(s.get_followers(St_username,lastuser,'blog',1000))
lastuser = flist[-1]['follower']
flist.extend(s.get_followers(St_username,lastuser,'blog',other))
cond = False
for i in range(count):
if flist[i]['follower'] == username.strip():
cond = True
if cond:
return ask('%s is following you' % username)
else:
return ask('%s is not following you' % username)
# Used to delegate SP
@assist.action('delegation')
def r_delegation(number,username):
check = eligible_delegation(St_username,number)
if check == 'eligible':
resp = ask('You can use the link below to delegate using Steemconnect')
link = ("https://steemconnect.com/sign/delegate-vesting-shares?delegator="+St_username+"&delegatee="+username+"&vesting_shares="+str(number)+"%20SP")
resp.link_out('Click here', link)
return resp
else:
return ask("Error: "+check) # To show the type of error
# To Claim all rewards
@assist.action('claim')
def r_claim():
try:
user = Steemian(sc.me()["name"])
claim_reward_balance = ClaimRewardBalance('account', user.rewards[0], user.rewards[1], user.rewards[2])
sc.broadcast([claim_reward_balance.to_operation_structure()])
return ask('You have sucessfully claimed %s, %s and %s' % (user.rewards[0],user.rewards[1],user.rewards[2]))
except: # If the user didn't connect his account
return ask('Please connect your account before using this command')
@assist.action('openreplies') # Open a link to replies
def r_openreplies():
user = Steemian(St_username)
return ask('Click the button below to open your replies').link_out('Open Replies',(user.bloglink+'/recent-replies'))
@assist.action('opencomments') # Open a link to comments
def r_opencomments():
user = Steemian(St_username)
return ask('Click the button below to open your comments').link_out('Open Comments',(user.bloglink+'/comments'))
@assist.action('delegations') # To show the list of delegations
def r_delegations():
delegations = s.get_vesting_delegations(St_username, '', 100)
if len(delegations) == 0:
return ask('No active delegations')
else:
resp = ask('Choose one:').build_carousel()
for i in range(len(delegations)):
resp.add_item(delegations[i]['delegatee'],
key=(str(i)), # This key will be used if the user chooses a certain post
description= str(round(c.vests_to_sp(Amount(delegations[i]['vesting_shares']).amount)))+" SP", # To convert vests to SP
img_url='https://steemitimages.com/u/'+delegations[i]['delegatee']+'/avatar' # To get the avatar image of the delegatee
)
return resp
@assist.action('cdelegations') # To cancel a delegation
def r_cdelegations(OPTION):
OPTION = int(OPTION)
delegations = s.get_vesting_delegations(St_username, '', 100)
resp = ask('Click the button below to cancel the delegation')
resp.card(title=delegations[OPTION]['delegatee'],
text=str(round(c.vests_to_sp(Amount(delegations[OPTION]['vesting_shares']).amount)))+" SP",
img_url='https://steemitimages.com/u/'+delegations[OPTION]['delegatee']+'/avatar',
link=("https://steemconnect.com/sign/delegate-vesting-shares?delegator="+St_username+"&delegatee="+delegations[OPTION]['delegatee']+"&vesting_shares=0%20SP"),
link_title='Cancel Delegation'
)
return resp
# Transfer Steem or SBD
@assist.action('transfer')
def r_transfer(number,currency,username):
url = sc.hot_sign(
"transfer",
{
"to": username,
"amount": number+' '+currency.upper(),
},
)
return ask('Click the button below to continue with your transfer:').link_out('Click here',url)
# Return a user's 8 latest posts
@assist.action('userposts')
def r_userposts(username):
global posts
discussion_query = {
"tag": username.replace(' ', ''),
"limit": 8,
}
posts = s.get_discussions_by_blog(discussion_query)
resp = ask('There you go:').build_carousel()
for i in range(len(posts)):
resp.add_item(posts[i]['title'],
key=(str(i)),
description=resteem(username,posts[i]['author']),
img_url=getpostimg(i) # To get the avatar image of the delegatee
)
return resp
# To save the comment in a variable called comment and ask the user for confirmation
@assist.action('commentconfirmation')
def r_comment(any):
global comment
comment = any
return ask('Would you like to confirm this comment: %s' % comment).suggest('Yes','No')
# If the user confirms the comment, the app will broadcast the comment
@assist.action('broadcastcomment')
def r_broadcastcomment(yon):
try:
global comment,posts,Option
if yon == 'Yes':
finalcomment = Comment(
sc.me()["name"], #author
randomperm(10), #permlink
comment, #body
parent_author=posts[Option]['author'],
parent_permlink=posts[Option]['permlink'],
)
sc.broadcast([finalcomment.to_operation_structure()])
return ask('broadcasting %s to %s' % (comment,posts[Option]['title']))
else:
return ask('Canceling comment')
except: # If the user didn't connect his account
return ask('Please connect your account before using this command')
# To save the upvote percentage in a variable called percent and ask the user for confirmation
@assist.action('upvoteconfirmation')
def r_upvote(number,vote):
if (int(number)<=100) and (0<=int(number)):
global percent, votetype
votetype = vote
if votetype == 'upvote':
percent = int(number)
return ask('Would you like to confirm this upvote: %s percent' % percent).suggest('Yes','No')
else:
percent = -int(number)
return ask('Would you like to confirm this downvote: %s percent' % (-1*percent)).suggest('Yes','No')
else:
return ask('Please make sure to enter a valid percent')
# If the user confirms the upvote, the app will broadcast the upvote
@assist.action('broadcastupvote')
def r_broadcastupvote(yon):
try:
global percent, posts, Option, votetype
vote = Vote(sc.me()['name'], posts[Option]["author"], posts[Option]["permlink"], int(percent))
sc.broadcast([vote.to_operation_structure()])
if votetype =='upvote':
return ask('broadcasting upvote to %s' % posts[Option]['title'])
else:
return ask('broadcasting downvote to %s' % posts[Option]['title'])
except: # If the user didn't connect his account
return ask('Please connect your account before using this command')
# Return a card with the user's name, avatar and description
@assist.action('whois')
def r_whois(username):
user = Steemian(username)
resp = ask('There you go')
resp.card(title='Name: %s' % user.data["json_metadata"]["profile"]['name'],
text='Description: %s' % user.data["json_metadata"]["profile"]['about'],
img_url='https://steemitimages.com/u/'+username+'/avatar',
)
return resp
# Enables users to reg their usernames into the database
@assist.action('reguser')
def r_reguser():
try:
with open('usernames.json') as old:
database = json.load(old)
name = {sc.me()['name']:sc.me()['name']}
database.update(name)
with open('usernames.json', 'w') as new:
json.dump(database, new)
return ask('Done %s was sucessfully added.' % sc.me()['name'] )
except:
return ask('Please connect your account before using this command')
# Allows setting the access token and Shows the page when user successfully authorizes the app
@app.route('/login')
def loginpage():
sc.access_token = request.args.get("access_token")
return render_template('success.html', variable = sc.me()["name"])
# run Flask app
if __name__ == '__main__':
app.run(debug=True) |
import pytest
from ...renderer.VideoExporter import RendererVideoExporter
class TestVideoExporter:
# region __init__
def test_initRaisesErrorOnInvalidParameters(self):
with pytest.raises(TypeError):
RendererVideoExporter(0.2, 224, 224, 60)
with pytest.raises(TypeError):
RendererVideoExporter('/path/to/a/file', 'not a number', 224, 60)
with pytest.raises(TypeError):
RendererVideoExporter('/path/to/a/file', 224, 'not a number', 60)
with pytest.raises(TypeError):
RendererVideoExporter('/path/to/a/file', 224, 224, 'not a number')
with pytest.raises(ValueError):
RendererVideoExporter('/path/to/a/file', -10, 224, 60)
with pytest.raises(ValueError):
RendererVideoExporter('/path/to/a/file', 224, -10, 60)
with pytest.raises(ValueError):
RendererVideoExporter('/path/to/a/file', 224, 224, -10)
# endregion
def test_createsVideoFile(self, basicRenderer, tmpdir):
videoPath = tmpdir.join("test.mp4")
videoExporter = RendererVideoExporter(str(videoPath), 224, 224, 30)
videoExporter.onStartRender(basicRenderer, 1)
videoExporter.onFrame(basicRenderer)
videoExporter.onStopRender(basicRenderer)
assert videoPath.check()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from playhouse.migrate import migrate
logger = logging.getLogger('data')
def forward(migrator):
# This migration may take some time to run.
# It adds indexes to integer fields that should have been indexed all
# along, for many models.
migrate(
migrator.add_index('seed', ('fetch_index',), False),
migrator.add_index('query', ('fetch_index',), False),
migrator.add_index('search', ('fetch_index',), False),
migrator.add_index('webpageversion', ('fetch_index',), False),
migrator.add_index('questionsnapshot', ('fetch_index',), False),
migrator.add_index('githubproject', ('fetch_index',), False),
migrator.add_index('issue', ('fetch_index',), False),
migrator.add_index('issuecomment', ('fetch_index',), False),
migrator.add_index('issueevent', ('fetch_index',), False),
migrator.add_index('postsnippet', ('compute_index',), False),
migrator.add_index('postnpminstallpackage', ('compute_index',), False),
migrator.add_index('task', ('compute_index',), False),
)
|
#!/usr/bin/env python
"""botty.py: source code of sushi bot"""
import win32api, win32con, time
__author__ = "Asdf"
__copyright__ = "Copyright 2017, Planet Earth"
# global values for screen size on 130% zoom, get top right corner for x_pad and y_pad
# top left (x,y): 317, 292 and bot right (x,y): 1151, 917
x_pad = 317
y_pad = 292
def leftClick():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
print("Click.") # completely optional. But nice for debugging purposes.
def leftDown():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(.1)
print('left Down')
def leftUp():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
time.sleep(.1)
print('left release')
def mousePos(cord):
win32api.SetCursorPos((x_pad + cord[0], y_pad + cord[1]))
def get_cords():
x, y = win32api.GetCursorPos()
x = x - x_pad
y = y - y_pad
print(x, y)
def startGame():
# location of first menu - Play
mousePos((420, 270))
leftClick()
time.sleep(.1)
# location of second menu - Iphone continue
mousePos((410, 510))
leftClick()
time.sleep(.1)
# location of third menu - Skip
mousePos((760, 595))
leftClick()
time.sleep(.1)
# location of fourth menu
mousePos((410, 500))
leftClick()
time.sleep(.1)
startGame()
|
from time import sleep
from pprint import pformat
from instrumentslib import (
get_instrument_cfg,
instrument,
)
cfg = get_instrument_cfg('bk_1686B')
_Parent = cfg['InstrumentParentClass']
resource_kwargs = cfg['resource_kwargs']
@instrument(resource_kwargs, 'bk_1686B')
class bk_1686B(_Parent):
def write(self, *args, **kwargs):
sleep(0.1)
return _Parent.write(self, *args, **kwargs)
def read(self, *args, **kwargs):
sleep(0.1)
ans = _Parent.read(self, *args, **kwargs)
sleep(0.1)
_Parent.read(self, *args, **kwargs)
return ans
def init(self):
self.configure()
self.initialized = True
|
buka = open('enc').read().split('\n')
enc = int(buka[0].split(': ')[1])
n = int(buka[1].split(': ')[1])
hint = int(buka[2].split(': ')[1])
z = ((hint - n) / 0xdeadbeef) - 0xdeadbeef # c = 0xdeadbeef
dist = z // 4
p = z // 2
q = z - p
print "p = {}".format(p)
print "q = {}".format(q)
print "p*q = {}".format(p*q)
print "n = {}".format(n)
print "="*40
# binary search, mulai dari p = q
while p*q != n:
if p*q > n:
p += dist
q = z - p
elif p*q < n:
p -= dist
q = z - p
dist = dist // 2
print "p = {}".format(p)
print "q = {}".format(q)
print "p*q = {}".format(p*q)
print "n = {}".format(n)
print "="*40
print "Dapet cuk!!1!1!!".upper()
print "p = {}".format(p)
print "q = {}".format(q)
from libnum import *
from Crypto.Util.number import *
e = 65537
phi = (p-1) * (q-1)
d = inverse(e, phi)
print n2s(pow(enc, d, n)) |
# plots of key continuous distributions
import numpy as np
import matplotlib.pylab as plt
N = 10000000
B = 100
x = np.arange(B)/B
# uniform
t = np.random.random(N)
u = np.histogram(t, bins=B)[0]
u = u / u.sum()
# normal
t = np.random.normal(0, 1, size=N)
n = np.histogram(t, bins=B)[0]
n = n / n.sum()
# gamma
t = np.random.gamma(5.0, size=N)
g = np.histogram(t, bins=B)[0]
g = g / g.sum()
# beta
t = np.random.beta(5,2, size=N)
b = np.histogram(t, bins=B)[0]
b = b / b.sum()
plt.plot(x,u,color='k',linestyle='solid')
plt.plot(x,n,color='k',linestyle='dotted')
plt.plot(x,g,color='k',linestyle='dashed')
plt.plot(x,b,color='k',linestyle='dashdot')
plt.ylabel("Probability")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
#plt.savefig("continuous.png", dpi=300)
plt.show()
plt.close()
# central limit theorem
M = 10000
m = np.zeros(M)
for i in range(M):
t = np.random.beta(5,2,size=M)
m[i] = t.mean()
print("Mean of the means = %0.7f" % m.mean())
h,x = np.histogram(m, bins=B)
h = h / h.sum()
plt.bar(x[:-1]+0.5*(x[1]-x[0]), h, width=0.8*(x[1]-x[0]))
plt.xlabel("Mean")
plt.ylabel("Probability")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
#plt.savefig("central_limit.png", dpi=300)
plt.show()
plt.close()
from fldrf import fldr_preprocess_float_c
from fldr import fldr_sample
z = fldr_preprocess_float_c([0.1,0.6,0.1,0.1,0.1])
m = np.zeros(M)
for i in range(M):
t = np.array([fldr_sample(z) for i in range(M)])
m[i] = t.mean()
print("Mean of the means = %0.7f" % m.mean())
h,x = np.histogram(m, bins=B)
h = h / h.sum()
plt.bar(x[:-1]+0.5*(x[1]-x[0]), h, width=0.8*(x[1]-x[0]))
plt.xlabel("Mean")
plt.ylabel("Probability")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
#plt.savefig("central_limit_fldr.png", dpi=300)
plt.show()
plt.close()
t = np.array([fldr_sample(z) for i in range(M)])
h = np.bincount(t)
h = h / h.sum()
plt.bar(np.arange(5),h, width=0.8)
plt.xlabel("Value")
plt.ylabel("Probability")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
#plt.savefig("pmf_fldr.png", dpi=300)
plt.show()
plt.close()
# Law of large numbers
m = []
for n in np.linspace(1,8,30):
t = np.random.normal(1,1,size=int(10**n))
m.append(t.mean())
plt.plot(np.linspace(1,8,30), m)
plt.plot([1,8],[1,1], linestyle="--", color='k')
plt.xlabel("Exponent $10^n$")
plt.ylabel("Single sample mean")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
#plt.savefig("large_numbers.png", dpi=300)
plt.show()
|
from django import forms
class ContatoForm(forms.Form):
titulo = forms.CharField(label='Title')
CHOICES = [('1', 'music'), ('2', 'entertainment'),('3', 'news'),('4','politics'),('5','games')]
tema = forms.ChoiceField(widget=forms.RadioSelect, choices=CHOICES)
url = forms.CharField(label='YouTube URL Video',max_length=43,min_length=43)
|
def int_to_bin(number, width = 32):
if number < 0:
number += 1 << width
i = width - 1
bits = ["\x00"] * width
while number and i >= 0:
bits[i] = "\x00\x01"[number & 1]
number >>= 1
i -= 1
return "".join(bits)
_bit_values = {"\x00" : 0, "\x01" : 1, "0" : 0, "1" : 1}
def bin_to_int(bits, signed = False):
number = 0
bias = 0
if signed and _bit_values[bits[0]] == 1:
bits = bits[1:]
bias = 1 << len(bits)
for b in bits:
number <<= 1
number |= _bit_values[b]
return number - bias
def swap_bytes(bits, bytesize = 8):
i = 0
l = len(bits)
output = [""] * ((l // bytesize) + 1)
j = len(output) - 1
while i < l:
output[j] = bits[i : i + bytesize]
i += bytesize
j -= 1
return "".join(output)
_char_to_bin = {}
_bin_to_char = {}
for i in range(256):
ch = chr(i)
bin = int_to_bin(i, 8)
_char_to_bin[ch] = bin
_bin_to_char[bin] = ch
_bin_to_char[bin] = ch
def encode_bin(data):
return "".join(_char_to_bin[ch] for ch in data)
def decode_bin(data):
assert len(data) & 7 == 0, "data length must be a multiple of 8"
i = 0
j = 0
l = len(data) // 8
chars = [""] * l
while j < l:
chars[j] = _bin_to_char[data[i:i+8]]
i += 8
j += 1
return "".join(chars)
|
rioe=4916520
icp=24396850
dunk="fmnsiue"
efjuvs=396
xafnzbm="psed"
ipuvj=01793846
lukdmn=4937
tdke="helvx"
nebqf=843279
ulfhsji=78416530
zrsfb=95
jgkcxls=24810379
cuni="slf"
puorwvm="rbcvzs"
vfespko=08153
vpuomi=6425901
cld="rmk"
sfyblmg=5863
ohyelq="xeitadl"
ckedwg=093647
xdjt="tfyc"
ezskl="vyp"
jeshdu="rteadg"
ecxi="vqpt"
urhawe="dhmar"
rmfyzlh="mqg"
xyhftu=809
kglptxq=17246
lnvxho="xml"
owrap="vcbneyj"
rzqmb="odamjs"
qoywse="vusolga"
toak="lfcts"
lafuho=52
exyt="eloz"
xoweua=21 |
import gym
# noinspection PyUnresolvedReferences
import gym_multiplexer
from examples.acs2.boolean_multiplexer.utils import reliable_cl_exists
from lcs.agents import EnvironmentAdapter
from lcs.agents.acs2 import ACS2, Configuration
def mpx_metrics(pop, env):
return {
'population': len(pop),
'reliable_cl_exists': reliable_cl_exists(env, pop, ctrl_bits=2)
}
class MultiplexerAdapter(EnvironmentAdapter):
@classmethod
def to_genotype(cls, env_state):
return [str(x) for x in env_state]
if __name__ == '__main__':
# Load desired environment
mp = gym.make('boolean-multiplexer-6bit-v0')
# Create agent
cfg = Configuration(mp.env.observation_space.n, 2,
do_ga=False,
environment_adapter=MultiplexerAdapter(),
metrics_trial_frequency=50,
user_metrics_collector_fcn=mpx_metrics)
agent = ACS2(cfg)
# Explore the environment
population, explore_metrics = agent.explore(mp, 1500)
# Exploit the environment
agent = ACS2(cfg, population)
population, exploit_metrics = agent.exploit(mp, 50)
# See how it went
for metric in explore_metrics:
print(metric)
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
data = get_data(filters)
columns = get_columns(filters)
return columns, data
def get_data(filters):
data = []
bom_data = []
for d in frappe.db.sql("""
SELECT
bom.name, bom.item, bom.item_name, bom.uom,
bomps.operation, bomps.workstation, bomps.time_in_mins
FROM `tabBOM` bom, `tabBOM Operation` bomps
WHERE
bom.docstatus = 1 and bom.is_active = 1 and bom.name = bomps.parent
""", as_dict=1):
row = get_args()
if d.name not in bom_data:
bom_data.append(d.name)
row.update(d)
else:
row.update({
"operation": d.operation,
"workstation": d.workstation,
"time_in_mins": d.time_in_mins
})
data.append(row)
used_as_subassembly_items = get_bom_count(bom_data)
for d in data:
d.used_as_subassembly_items = used_as_subassembly_items.get(d.name, 0)
return data
def get_bom_count(bom_data):
data = frappe.get_all("BOM Item",
fields=["count(name) as count", "bom_no"],
filters= {"bom_no": ("in", bom_data)}, group_by = "bom_no")
bom_count = {}
for d in data:
bom_count.setdefault(d.bom_no, d.count)
return bom_count
def get_args():
return frappe._dict({
"name": "",
"item": "",
"item_name": "",
"uom": ""
})
def get_columns(filters):
return [{
"label": _("BOM ID"),
"options": "BOM",
"fieldname": "name",
"fieldtype": "Link",
"width": 140
}, {
"label": _("BOM Item Code"),
"options": "Item",
"fieldname": "item",
"fieldtype": "Link",
"width": 140
}, {
"label": _("Item Name"),
"fieldname": "item_name",
"fieldtype": "Data",
"width": 110
}, {
"label": _("UOM"),
"options": "UOM",
"fieldname": "uom",
"fieldtype": "Link",
"width": 140
}, {
"label": _("Operation"),
"options": "Operation",
"fieldname": "operation",
"fieldtype": "Link",
"width": 120
}, {
"label": _("Workstation"),
"options": "Workstation",
"fieldname": "workstation",
"fieldtype": "Link",
"width": 110
}, {
"label": _("Time (In Mins)"),
"fieldname": "time_in_mins",
"fieldtype": "Int",
"width": 140
}, {
"label": _("Sub-assembly BOM Count"),
"fieldname": "used_as_subassembly_items",
"fieldtype": "Int",
"width": 180
}]
|
#!/usr/bin/env python
import ConfigParser
import collections
import json
import logging
import os
import random
import re
import socket
import sys
import time
from datetime import datetime
from optparse import OptionParser
import requests
'''
This script gathers metric data from opentsdb and use http api to send to Insightfinder
'''
def get_parameters():
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-w", "--serverUrl",
action="store", dest="serverUrl", help="Server Url")
parser.add_option("-c", "--chunkLines",
action="store", dest="chunkLines", help="Timestamps per chunk for historical data.")
parser.add_option("-m", "--mode",
action="store", dest="mode", help="Data sending mode(streaming/historical)")
parser.add_option("-s", "--startDate",
action="store", dest="startDate", help="Historical data start date")
parser.add_option("-e", "--endDate",
action="store", dest="endDate", help="Historical data end date")
parser.add_option("-l", "--logLevel",
action="store", dest="logLevel", help="Change log verbosity(WARNING: 0, INFO: 1, DEBUG: 2)")
(options, args) = parser.parse_args()
params = {}
if options.serverUrl is None:
params['serverUrl'] = 'https://app.insightfinder.com'
else:
params['serverUrl'] = options.serverUrl
if options.chunkLines is None:
params['chunkLines'] = 50
else:
params['chunkLines'] = int(options.chunkLines)
if options.mode is None or options.mode != "historical":
params['mode'] = "streaming"
else:
params['mode'] = "historical"
if options.startDate is None or options.mode == "streaming":
params['startDate'] = ""
else:
params['startDate'] = options.startDate
if options.endDate is None or options.mode == "streaming":
params['endDate'] = ""
else:
params['endDate'] = options.endDate
params['logLevel'] = logging.INFO
if options.logLevel == '0':
params['logLevel'] = logging.WARNING
elif options.logLevel == '1':
params['logLevel'] = logging.INFO
elif options.logLevel >= '2':
params['logLevel'] = logging.DEBUG
return params
def get_agent_config_vars():
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini"))):
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini")))
try:
user_name = config_parser.get('insightfinder', 'user_name')
license_key = config_parser.get('insightfinder', 'license_key')
project_name = config_parser.get('insightfinder', 'project_name')
except ConfigParser.NoOptionError:
logger.error(
"Agent not correctly configured. Check config file.")
sys.exit(1)
if len(user_name) == 0:
logger.warning(
"Agent not correctly configured(user_name). Check config file.")
sys.exit(1)
if len(license_key) == 0:
logger.warning(
"Agent not correctly configured(license_key). Check config file.")
sys.exit(1)
if len(project_name) == 0:
logger.warning(
"Agent not correctly configured(project_name). Check config file.")
sys.exit(1)
config_vars = {
"userName": user_name,
"licenseKey": license_key,
"projectName": project_name
}
return config_vars
else:
logger.error(
"Agent not correctly configured. Check config file.")
sys.exit(1)
def get_reporting_config_vars():
reporting_config = {}
with open(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, "reporting_config.json")), 'r') as f:
config = json.load(f)
reporting_interval_string = config['reporting_interval']
if reporting_interval_string[-1:] == 's':
reporting_interval = float(config['reporting_interval'][:-1])
reporting_config['reporting_interval'] = float(reporting_interval / 60.0)
else:
reporting_config['reporting_interval'] = int(config['reporting_interval'])
reporting_config['keep_file_days'] = int(config['keep_file_days'])
reporting_config['prev_endtime'] = config['prev_endtime']
reporting_config['deltaFields'] = config['delta_fields']
reporting_config['keep_file_days'] = int(config['keep_file_days'])
reporting_config['prev_endtime'] = config['prev_endtime']
reporting_config['deltaFields'] = config['delta_fields']
return reporting_config
def get_opentsdb_config():
"""Read and parse Open TSDB config from config.ini"""
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini"))):
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini")))
try:
opentsdb_url = config_parser.get('opentsdb', 'opentsdb_server_url')
opentsdb_token = config_parser.get('opentsdb', 'token')
opentsdb_metrics = config_parser.get('opentsdb', 'metrics')
except ConfigParser.NoOptionError:
logger.error(
"Agent not correctly configured. Check config file.")
sys.exit(1)
if len(opentsdb_url) == 0:
logger.warning(
"Agent not correctly configured(OPENTSDB_URL). Check config file. Using \"127.0.0.1:4242\" as default.")
opentsdb_url = "http://127.0.0.1:4242"
if len(opentsdb_metrics) != 0:
opentsdb_metrics = opentsdb_metrics.split(",")
else:
opentsdb_metrics = []
opentsdb_config = {
"OPENTSDB_URL": opentsdb_url,
"OPENTSDB_METRICS": opentsdb_metrics,
"OPENTSDB_TOKEN": opentsdb_token
}
else:
logger.warning("No config file found. Using defaults.")
opentsdb_config = {
"OPENTSDB_URL": "http://127.0.0.1:4242",
"OPENTSDB_METRICS": "",
"OPENTSDB_TOKEN": ""
}
return opentsdb_config
def save_grouping(metric_grouping):
"""
Saves the grouping data to grouping.json
Parameters:
- `grouping_map` : metric_name-grouping_id dict
:return: None
"""
with open('grouping.json', 'w+') as f:
f.write(json.dumps(metric_grouping))
def load_grouping():
"""
Loads the grouping data from grouping.json
:return: grouping JSON string
"""
if os.path.isfile('grouping.json'):
logger.debug("Grouping file exists. Loading..")
with open('grouping.json', 'r+') as f:
try:
grouping_json = json.loads(f.read())
except ValueError:
grouping_json = json.loads("{}")
logger.debug("Error parsing grouping.json.")
else:
grouping_json = json.loads("{}")
return grouping_json
def get_grouping_id(metric_key, metric_grouping):
"""
Get grouping id for a metric key
Parameters:
- `metric_key` : metric key str to get group id.
- `metric_grouping` : metric_key-grouping id map
"""
for index in range(3):
grouping_candidate = random.randint(GROUPING_START, GROUPING_END)
if metric_key in metric_grouping:
grouping_id = int(metric_grouping[metric_key])
return grouping_id
else:
grouping_id = grouping_candidate
metric_grouping[metric_key] = grouping_id
return grouping_id
return GROUPING_START
def get_metric_list(config):
"""Get available metric list from Open TSDB API"""
metric_list = []
url = config["OPENTSDB_URL"] + "/api/suggest?type=metrics&q="
response = requests.get(url)
if response.status_code == 200:
metric_list = response.json()
logger.debug("Get metric list from opentsdb: " + str(metric_list))
return metric_list
def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):
"""Get metric data from Open TSDB API"""
def format_data_entry(json_data_entry):
metric_name = json_data_entry.get('metric')
host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'
dps = json_data_entry.get('dps', {})
metric_value = None
header_field = normalize_key(metric_name) + "[" + host_name + "]:" + str(
get_grouping_id(metric_name, metric_grouping))
mtime = 0
for stime, val in dps.items():
if int(stime) > mtime:
metric_value = val
mtime = int(stime)
epoch = mtime * 1000
if epoch in collected_data_map:
timestamp_value_map = collected_data_map[epoch]
else:
timestamp_value_map = {}
timestamp_value_map[header_field] = str(metric_value)
collected_data_map[epoch] = timestamp_value_map
json_data = {
"token": config['OPENTSDB_TOKEN'],
"start": start_time,
"end": end_time,
"queries": map(lambda m: {
"aggregator": "avg",
"downsample": "1m-avg",
"metric": m.encode('ascii')
}, metric_list)
}
url = config["OPENTSDB_URL"] + "/api/query"
response = requests.post(url, data=json.dumps(json_data))
if response.status_code == 200:
rawdata_list = response.json()
logger.debug("Get metric data from opentsdb: " + str(len(rawdata_list)))
# format metric and save to collected_data_map
map(lambda d: format_data_entry(d), rawdata_list)
def send_data(chunk_metric_data):
send_data_time = time.time()
# prepare data for metric streaming agent
to_send_data_dict = dict()
to_send_data_dict["metricData"] = json.dumps(chunk_metric_data)
to_send_data_dict["licenseKey"] = agent_config_vars['licenseKey']
to_send_data_dict["projectName"] = agent_config_vars['projectName']
to_send_data_dict["userName"] = agent_config_vars['userName']
to_send_data_dict["instanceName"] = socket.gethostname().partition(".")[0]
to_send_data_dict["samplingInterval"] = str(int(reporting_config_vars['reporting_interval'] * 60))
to_send_data_dict["agentType"] = "custom"
to_send_data_json = json.dumps(to_send_data_dict)
logger.debug("TotalData: " + str(len(bytearray(to_send_data_json))))
# send the data
post_url = parameters['serverUrl'] + "/customprojectrawdata"
response = requests.post(post_url, data=json.loads(to_send_data_json))
if response.status_code == 200:
logger.info(str(len(bytearray(to_send_data_json))) + " bytes of data are reported.")
else:
logger.info("Failed to send data.")
logger.debug("--- Send data time: %s seconds ---" % (time.time() - send_data_time))
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for index in xrange(0, len(l), n):
yield l[index:index + n]
def normalize_key(metric_key):
"""
Take a single metric key string and return the same string with spaces, slashes and
non-alphanumeric characters subbed out.
"""
metric_key = SPACES.sub("_", metric_key)
metric_key = SLASHES.sub("-", metric_key)
metric_key = NON_ALNUM.sub("", metric_key)
return metric_key
def set_logger_config(level):
"""Set up logging according to the defined log level"""
# Get the root logger
logger_obj = logging.getLogger(__name__)
# Have to set the root logger level, it defaults to logging.WARNING
logger_obj.setLevel(level)
# route INFO and DEBUG logging to stdout from stderr
logging_handler_out = logging.StreamHandler(sys.stdout)
logging_handler_out.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(threadName)s - %(levelname)s - %(message)s')
logging_handler_out.setFormatter(formatter)
logger_obj.addHandler(logging_handler_out)
logging_handler_err = logging.StreamHandler(sys.stderr)
logging_handler_err.setLevel(logging.WARNING)
logger_obj.addHandler(logging_handler_err)
return logger_obj
if __name__ == "__main__":
GROUPING_START = 15000
GROUPING_END = 20000
METRIC_CHUNKS = 50
SPACES = re.compile(r"\s+")
SLASHES = re.compile(r"\/+")
NON_ALNUM = re.compile(r"[^a-zA-Z_\-0-9\.]")
parameters = get_parameters()
log_level = parameters['logLevel']
logger = set_logger_config(log_level)
data_dir = 'data'
agent_config_vars = get_agent_config_vars()
reporting_config_vars = get_reporting_config_vars()
grouping_map = load_grouping()
# get agent configuration details
agent_config = get_opentsdb_config()
time_list = []
if parameters['mode'] == 'streaming':
# get data by cron
data_end_ts = int(time.time())
interval_in_secs = int(reporting_config_vars['reporting_interval'] * 60)
data_start_ts = data_end_ts - interval_in_secs
time_list = [(data_start_ts, data_end_ts)]
else:
# get data from history date
start_day = parameters['startDate']
end_day = parameters['endDate']
start_day_obj = datetime.strptime(start_day, "%Y-%m-%d")
start_ts = int(time.mktime(start_day_obj.timetuple()))
end_day_obj = datetime.strptime(end_day, "%Y-%m-%d")
end_ts = int(time.mktime(end_day_obj.timetuple()))
if start_ts >= end_ts:
logger.error(
"Agent not correctly configured(historical start date and end date). Check parameters")
sys.exit(1)
timeInterval = (end_ts - start_ts) / 60
time_list = [(start_ts + i * 60, start_ts + (i + 1) * 60) for i in range(timeInterval)]
try:
raw_data_map = collections.OrderedDict()
metric_data = []
chunk_number = 0
# get metric list
all_metrics_list = agent_config['OPENTSDB_METRICS']
if len(all_metrics_list) == 0:
all_metrics_list = get_metric_list(agent_config)
for data_start_ts, data_end_ts in time_list:
logger.debug("Getting data from OpenTSDB for range: {}-{}".format(data_start_ts, data_end_ts))
chunked_metric_list = chunks(all_metrics_list, METRIC_CHUNKS)
for sub_list in chunked_metric_list:
# get metric data from opentsdb every SAMPLING_INTERVAL
get_metric_data(agent_config, sub_list, grouping_map, data_start_ts, data_end_ts, raw_data_map)
if len(raw_data_map) == 0:
logger.error("No data for metrics received from OpenTSDB.")
sys.exit()
if len(raw_data_map) >= parameters['chunkLines']:
min_timestamp = sys.maxsize
max_timestamp = -sys.maxsize
for timestamp in raw_data_map.keys():
value_map = raw_data_map[timestamp]
value_map['timestamp'] = str(timestamp)
metric_data.append(value_map)
min_timestamp = min(min_timestamp, timestamp)
max_timestamp = max(max_timestamp, timestamp)
chunk_number += 1
logger.debug("Sending Chunk Number: " + str(chunk_number))
logger.info("Sending from OpenTSDB for range: {}-{}".format(min_timestamp, max_timestamp))
send_data(metric_data)
metric_data = []
raw_data_map = collections.OrderedDict()
# send final chunk
min_timestamp = sys.maxsize
max_timestamp = -sys.maxsize
for timestamp in raw_data_map.keys():
value_map = raw_data_map[timestamp]
value_map['timestamp'] = str(timestamp)
metric_data.append(value_map)
min_timestamp = min(min_timestamp, timestamp)
max_timestamp = max(max_timestamp, timestamp)
if len(metric_data) != 0:
chunk_number += 1
logger.debug("Sending Final Chunk: " + str(chunk_number))
logger.info("Sending from OpenTSDB for range: {}-{}".format(min_timestamp, max_timestamp))
send_data(metric_data)
save_grouping(grouping_map)
except Exception as e:
logger.error("Error sending metric data to InsightFinder.")
logger.error(e) |
import streamlit as st
import pandas as pd
import numpy as np
st.title('Sea ice test')
DATE_COLUMN = 'Date'
DATA_URL = ('https://www.seaice.de/nh_awi_amsr2_regional_extent_area.csv')
@st.cache
def load_data():
data = pd.read_csv(DATA_URL)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
data_load_state = st.text('Loading data...')
data = load_data()
data_load_state.text("Done! (using st.cache)")
st.subheader('Raw data')
st.write(data)
|
from dashboard.app import run_app
run_app(False)
|
import turtle
class Player :
# Drawing and positioning player and bullet
def __init__(self, dimension) :
# Window dimension
self.dimension = dimension
# Draw player
self.player = None
self.player = turtle.Turtle()
self.player.hideturtle()
self.player.color("blue")
self.player.shape("triangle")
self.player.penup()
self.player.speed(0)
# Player initial position
self.player.setposition(0, -(self.dimension/2) + 40)
self.player.setheading(90)
self.player.showturtle()
# Draw player bullet
self.bullet = turtle.Turtle()
self.bullet.hideturtle()
self.bullet.color("yellow")
self.bullet.shape("triangle")
self.bullet.penup()
self.bullet.speed(0)
# Bullet initial position
self.bullet.setheading(90)
self.bullet.shapesize(0.5, 0.5)
# Bullet state
self.bulletState = "ready"
# Setting player and bullet speed
self.playerSpeed = 15*self.dimension*0.001
self.bulletSpeed = 20*self.dimension*0.003
# Move player left
def moveLeft(self) :
x = self.player.xcor()
x -= self.playerSpeed
x = -(self.dimension/2) + 40 if (x < -(self.dimension/2) + 40) else x
self.player.setx(x)
# Move player right
def moveRight(self) :
x = self.player.xcor()
x += self.playerSpeed
x = (self.dimension/2) - 40 if (x > (self.dimension/2) - 40) else x
self.player.setx(x)
# Player gun
def fireBullet(self) :
if(self.bulletState == "ready") :
self.bulletState = "fire"
self.bullet.setposition(self.player.xcor(), self.player.ycor() + 10)
self.bullet.showturtle() |
import secrets
from flask import Flask
from flask_caching import Cache
from flask_cors import CORS
app = Flask(__name__)
config = {
'DEBUG': True,
'CACHE_TYPE': 'SimpleCache',
'CACHE_DEFAULT_TIMEOUT': 300,
'SECRET_KEY': '9317f2b87044b56c44ca2502d3f945af',
'CORS_HEADERS': 'Content-Type'
}
app.config.from_mapping(config)
# pylint: disable=wrong-import-position
from app import routes
|
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from fastmsa.core import Command
@dataclass
class Allocate(Command):
orderid: str
sku: str
qty: int
@dataclass
class CreateBatch(Command):
ref: str
sku: str
qty: int
eta: Optional[datetime] = None
@dataclass
class ChangeBatchQuantity(Command):
ref: str
qty: int
|
import os
import asyncio
class Display:
"""A class displaying all stuff."""
def __init__(self):
super().__init__()
self.query = []
self.updating = False
async def printIn(self):
while 1:
if self.updating is True:
os.system('clear' if not Exception else 'cls')
print("\n".join(self.query))
self.updating = False
await asyncio.sleep(1)
else:
await asyncio.sleep(1)
async def printQuery(self, *args):
query = " ".join(args)
if 'Sell' in query:
self.query.clear()
self.query.append(query)
self.updating = True
|
# coding: utf-8
import unittest
import warnings
import pronto
class TestDefinition(unittest.TestCase):
@classmethod
def setUpClass(cls):
warnings.simplefilter('error')
@classmethod
def tearDownClass(cls):
warnings.simplefilter(warnings.defaultaction)
def test_repr(self):
d1 = pronto.Definition("something")
self.assertEqual(repr(d1), "Definition('something')")
d2 = pronto.Definition("something", xrefs={pronto.Xref("Bgee:fbb")})
self.assertEqual(repr(d2), "Definition('something', xrefs={Xref('Bgee:fbb')})")
|
def _bundle_file_impl(ctx):
out = ctx.actions.declare_file(ctx.attr.name + ".go")
ctx.actions.run(
outputs = [out],
inputs = [ctx.file._bundle_file, ctx.file.src],
executable = "python",
arguments = [
ctx.file._bundle_file.path,
ctx.file.src.path,
out.path,
ctx.attr.package,
ctx.attr.name,
],
)
return [DefaultInfo(files = depset([out]))]
bundle_file = rule(
_bundle_file_impl,
attrs = {
"_bundle_file": attr.label(
allow_single_file = True,
default = Label("//cmd/bb_browser/assets:bundle_file.py"),
),
"package": attr.string(mandatory = True),
"src": attr.label(
mandatory = True,
allow_single_file = True,
),
},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.