content
stringlengths 5
1.05M
|
|---|
import unittest
import pandas as pd
from altcoin_max_price_prediction import preprocess_trade_data
class PreprocessTradeDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.main_class = preprocess_trade_data.PreprocessTradeData()
cls.main_class.history_size = 3
cls.main_class.future_size = 3
cls.sample_data_usd = [
{'id': 253, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3142.91636975,
'last': 5709.62, 'base_volume': 17860090.630093, 'time_stamp': '2017-10-16 23:04:02',
'bid': 5709.62, 'ask': 5719.9, 'open_buy_orders': 7744, 'open_sell_orders': 3574, 'prev_day': 5745.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:04:08'},
{'id': 515, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3134.80686624,
'last': 5709.62, 'base_volume': 17813438.7573089, 'time_stamp': '2017-10-16 23:05:02',
'bid': 5705.00000002, 'ask': 5719.9, 'open_buy_orders': 7746, 'open_sell_orders': 3574, 'prev_day': 5748.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:05:08'},
{'id': 777, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3129.40685073,
'last': 5710.0, 'base_volume': 17782400.6018975, 'time_stamp': '2017-10-16 23:06:03',
'bid': 5713.0, 'ask': 5719.9, 'open_buy_orders': 7747, 'open_sell_orders': 3581, 'prev_day': 5722.03723499,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:06:08'},
{'id': 1039, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3126.59574809,
'last': 5719.0, 'base_volume': 17766282.7100516, 'time_stamp': '2017-10-16 23:07:03',
'bid': 5713.0, 'ask': 5719.0, 'open_buy_orders': 7752, 'open_sell_orders': 3583, 'prev_day': 5725.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:07:08'},
{'id': 1301, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3130.87751397,
'last': 5723.9, 'base_volume': 17790785.9523331, 'time_stamp': '2017-10-16 23:07:56',
'bid': 5713.0, 'ask': 5723.9, 'open_buy_orders': 7746, 'open_sell_orders': 3583, 'prev_day': 5722.00742303,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:08:08'},
{'id': 1563, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3131.2529282,
'last': 5723.9, 'base_volume': 17792925.8450579, 'time_stamp': '2017-10-16 23:08:56',
'bid': 5710.00000003, 'ask': 5723.9, 'open_buy_orders': 7748, 'open_sell_orders': 3586, 'prev_day': 5740.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:09:08'},
{'id': 1825, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3133.34108944,
'last': 5722.9, 'base_volume': 17804873.9598486, 'time_stamp': '2017-10-16 23:10:04',
'bid': 5715.0, 'ask': 5722.9, 'open_buy_orders': 7750, 'open_sell_orders': 3589, 'prev_day': 5722.00000001,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:10:08'},
{'id': 2087, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3136.80773744,
'last': 5723.9, 'base_volume': 17824697.2302864, 'time_stamp': '2017-10-16 23:11:05',
'bid': 5722.7, 'ask': 5723.9, 'open_buy_orders': 7750, 'open_sell_orders': 3585, 'prev_day': 5725.01,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:11:08'},
{'id': 2349, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3144.62472261,
'last': 5716.16200005, 'base_volume': 17869370.704901, 'time_stamp': '2017-10-16 23:12:08',
'bid': 5716.16200005, 'ask': 5725.0, 'open_buy_orders': 7751, 'open_sell_orders': 3573, 'prev_day': 5735.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:12:08'},
{'id': 2611, 'market_name': 'USDT-BTC', 'high': 5756.0, 'low': 5565.25000001, 'volume': 3142.805971,
'last': 5735.0, 'base_volume': 17858922.0652518, 'time_stamp': '2017-10-16 23:13:07',
'bid': 5727.00000001, 'ask': 5735.0, 'open_buy_orders': 7753, 'open_sell_orders': 3561, 'prev_day': 5740.0,
'created': '2015-12-11 06:31:40', 'updated_at': '2017-10-17 01:13:08'}]
cls.sample_data = [
{'id': 106, 'market_name': 'BTC-MUSIC', 'high': 3.74e-06, 'low': 3.21e-06, 'volume': 7769724.20671862,
'last': 3.32e-06, 'base_volume': 26.12908177, 'time_stamp': '2017-10-16 23:04:00',
'bid': 3.32e-06, 'ask': 3.33e-06, 'open_buy_orders': 257, 'open_sell_orders': 8848, 'prev_day': 3.74e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:04:08'},
{'id': 368, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7767876.50128578,
'last': 3.34e-06, 'base_volume': 26.1216032, 'time_stamp': '2017-10-16 23:04:58',
'bid': 3.32e-06, 'ask': 3.34e-06, 'open_buy_orders': 257, 'open_sell_orders': 8824, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:05:08'},
{'id': 630, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:06:08',
'bid': 3.32e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:06:08'},
{'id': 892, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:06:08',
'bid': 3.32e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:07:08'},
{'id': 1154, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:07:47',
'bid': 3.33e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:08:08'},
{'id': 1416, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:07:47',
'bid': 3.33e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:09:08'},
{'id': 1678, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:07:47',
'bid': 3.33e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:10:08'},
{'id': 1940, 'market_name': 'BTC-MUSIC', 'high': 3.71e-06, 'low': 3.21e-06, 'volume': 7761950.97742083,
'last': 3.38e-06, 'base_volume': 26.0989871, 'time_stamp': '2017-10-16 23:07:47',
'bid': 3.33e-06, 'ask': 3.38e-06, 'open_buy_orders': 258, 'open_sell_orders': 8845, 'prev_day': 3.71e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:11:08'},
{'id': 2202, 'market_name': 'BTC-MUSIC', 'high': 3.69e-06, 'low': 3.21e-06, 'volume': 7746010.66199945,
'last': 3.38e-06, 'base_volume': 26.04065161, 'time_stamp': '2017-10-16 23:11:26',
'bid': 3.33e-06, 'ask': 3.38e-06, 'open_buy_orders': 265, 'open_sell_orders': 8855, 'prev_day': 3.59e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:12:08'},
{'id': 2464, 'market_name': 'BTC-MUSIC', 'high': 3.69e-06, 'low': 3.21e-06, 'volume': 7746180.1648616,
'last': 3.33e-06, 'base_volume': 26.04121605, 'time_stamp': '2017-10-16 23:12:44',
'bid': 3.33e-06, 'ask': 3.37e-06, 'open_buy_orders': 263, 'open_sell_orders': 8856, 'prev_day': 3.59e-06,
'created': '2017-03-27 19:59:13', 'updated_at': '2017-10-17 01:13:08'}]
def test_get_old(self):
sample_df = pd.DataFrame(self.sample_data_usd)
sample_df['usdt_btc_last'] = (1 / sample_df['last']).round(8)
sample_df_with_old = self.main_class.get_old(sample_df,['usdt_btc_last'])
self.assertEqual(sample_df_with_old['usdt_btc_last_old_1'].iloc[0], 1)
def test_get_new(self):
sample_df = pd.DataFrame(self.sample_data)
sample_df_with_new = self.main_class.get_new(sample_df)
self.assertEqual(sample_df_with_new['last_max'].iloc[0], 0.01807)
def test_remove_no_calculated_old_and_new_data(self):
sample_df = pd.DataFrame(self.sample_data)
sample_df_with_new = self.main_class.remove_no_calculated_old_and_new_data(sample_df)
self.assertEqual(sample_df_with_new.shape[0], 3)
def test_ticker_merge_with_usd(self):
sample_df_usd = pd.DataFrame(self.sample_data_usd)
sample_df = pd.DataFrame(self.sample_data)
sample_df_merge = self.main_class.ticker_merge_with_usd(sample_df, sample_df_usd)
self.assertEqual(sample_df_merge.shape[0], 10)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import os
from ..glue.datasets import GlueDataset
from ..common import SentenceExample
__all__ = ["AGNEWSDataset", "TRECDataset", "DBPEDIADataset", "YELPDataset"]
class AGNEWSDataset(GlueDataset):
def __init__(self, dara_dir):
self.name = "agnews"
self.data_dir = dara_dir
self.trn_egs = self.get_split_examples("train_split")
self.val_egs = self.get_split_examples("dev_split")
self.tst_egs = self.get_split_examples("test_split")
def get_split_examples(self, which_split):
where_ = os.path.join(self.data_dir, "{}.tsv".format(which_split))
print("[INFO] {} is looking for {}".format(self.__class__.__name__, where_))
return self._create_examples(where_, which_split)
def get_labels(self):
return ["1", "2", "3", "4"]
def _create_examples(self, input_file, which_split):
"""parse and convert raw string to SentencePairExample"""
sentence_egs = []
with open(input_file, "r") as f:
for idx, line in enumerate(f):
line = line.strip().split("\t")
label = line[0]
assert int(label) in [1, 2, 3, 4]
text_a = line[1]
uid = "%s-%s" % (which_split, idx)
sentence_egs.append(
SentenceExample(uid=uid, text_a=text_a, label=label)
)
return sentence_egs
class TRECDataset(GlueDataset):
def __init__(self, dara_dir):
self.name = "trec"
self.data_dir = dara_dir
self.trn_egs = self.get_split_examples("train_split")
self.val_egs = self.get_split_examples("dev_split")
self.tst_egs = self.get_split_examples("test_split")
def get_split_examples(self, which_split):
where_ = os.path.join(self.data_dir, "{}.tsv".format(which_split))
print("[INFO] {} is looking for {}".format(self.__class__.__name__, where_))
return self._create_examples(where_, which_split)
def get_labels(self):
return ["DESC", "ENTY", "ABBR", "HUM", "NUM", "LOC"]
def _create_examples(self, input_file, which_split):
"""parse and convert raw string to SentencePairExample"""
sentence_egs = []
with open(input_file, "r") as f:
for idx, line in enumerate(f):
line = line.strip().split("\t")
label = line[0]
text_a = line[1]
uid = "%s-%s" % (which_split, idx)
sentence_egs.append(
SentenceExample(uid=uid, text_a=text_a, label=label)
)
return sentence_egs
class DBPEDIADataset(GlueDataset):
def __init__(self, dara_dir):
self.name = "dbpedia"
self.data_dir = dara_dir
self.trn_egs = self.get_split_examples("train_split")
self.val_egs = self.get_split_examples("dev_split")
self.tst_egs = self.get_split_examples("test_split")
def get_split_examples(self, which_split):
where_ = os.path.join(self.data_dir, "{}.tsv".format(which_split))
print("[INFO] {} is looking for {}".format(self.__class__.__name__, where_))
return self._create_examples(where_, which_split)
def get_labels(self):
return [str(x) for x in range(1, 15)]
def _create_examples(self, input_file, which_split):
"""parse and convert raw string to SentencePairExample"""
sentence_egs = []
with open(input_file, "r") as f:
for idx, line in enumerate(f):
line = line.strip().split("\t")
label = line[0]
text_a = line[1]
uid = "%s-%s" % (which_split, idx)
sentence_egs.append(
SentenceExample(uid=uid, text_a=text_a, label=label)
)
return sentence_egs
class YELPDataset(GlueDataset):
def __init__(self, dara_dir):
self.name = "yelp2"
self.data_dir = dara_dir
self.trn_egs = self.get_split_examples("train_split")
self.val_egs = self.get_split_examples("dev_split")
# self.tst_egs = self.get_split_examples("test_split")
def get_split_examples(self, which_split):
where_ = os.path.join(self.data_dir, "{}.tsv".format(which_split))
print("[INFO] {} is looking for {}".format(self.__class__.__name__, where_))
return self._create_examples(where_, which_split)
def get_labels(self):
return ["0", "1"]
def _create_examples(self, input_file, which_split):
"""parse and convert raw string to SentencePairExample"""
sentence_egs = []
with open(input_file, "r") as f:
for idx, line in enumerate(f):
line = line.strip().split("\t")
label = line[0]
text_a = line[1]
uid = "%s-%s" % (which_split, idx)
sentence_egs.append(
SentenceExample(uid=uid, text_a=text_a, label=label)
)
return sentence_egs
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipe for feeding examples to a Seq2Label model graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from research.seq2species.protos import seq2label_pb2
from research.seq2species import seq2label_utils
DNA_BASES = tuple('ACGT')
NUM_DNA_BASES = len(DNA_BASES)
# Possible FASTA characters/IUPAC ambiguity codes.
# See https://en.wikipedia.org/wiki/Nucleic_acid_notation.
AMBIGUITY_CODES = {
'K': 'GT',
'M': 'AC',
'R': 'AG',
'Y': 'CT',
'S': 'CG',
'W': 'AT',
'B': 'CGT',
'V': 'ACG',
'H': 'ACT',
'D': 'AGT',
'X': 'ACGT',
'N': 'ACGT'
}
def load_dataset_info(dataset_info_path):
"""Load a `Seq2LabelDatasetInfo` from a serialized text proto file."""
dataset_info = seq2label_pb2.Seq2LabelDatasetInfo()
with tf.gfile.Open(dataset_info_path, 'r') as f:
text_format.Parse(f.read(), dataset_info)
return dataset_info
class _InputEncoding(object):
"""A helper class providing the graph operations needed to encode input.
Instantiation of an _InputEncoding will write on the default TF graph, so it
should only be instantiated inside the `input_fn`.
Attributes:
mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}.
targets: list of strings; the names of the labels of interest (e.g.
"species").
dna_bases: a tuple of the recognized DNA alphabet.
n_bases: the size of the DNA alphabet.
all_characters: list of recognized alphabet, including ambiguity codes.
label_values: a tuple of strings, the possible label values of the
prediction target.
n_labels: the size of label_values
fixed_read_length: an integer value of the statically-known read length, or
None if the read length is to be determined dynamically.
"""
def __init__(self,
dataset_info,
mode,
targets,
noise_rate=0.0,
fixed_read_length=None):
self.mode = mode
self.targets = targets
self.dna_bases = DNA_BASES
self.n_bases = NUM_DNA_BASES
self.all_characters = list(DNA_BASES) + sorted(AMBIGUITY_CODES.keys())
self.character_encodings = np.concatenate(
[[self._character_to_base_distribution(char)]
for char in self.all_characters],
axis=0)
all_legal_label_values = seq2label_utils.get_all_label_values(dataset_info)
# TF lookup tables.
self.characters_table = tf.contrib.lookup.index_table_from_tensor(
mapping=self.all_characters)
self.label_tables = {
target: tf.contrib.lookup.index_table_from_tensor(
all_legal_label_values[target])
for target in targets
}
self.fixed_read_length = fixed_read_length
self.noise_rate = noise_rate
def _character_to_base_distribution(self, char):
"""Maps the given character to a probability distribution over DNA bases.
Args:
char: character to be encoded as a probability distribution over bases.
Returns:
Array of size (self.n_bases,) representing the identity of the given
character as a distribution over the possible DNA bases, self.dna_bases.
Raises:
ValueError: if the given character is not contained in the recognized
alphabet, self.all_characters.
"""
if char not in self.all_characters:
raise ValueError(
'Base distribution requested for unrecognized character %s.' % char)
possible_bases = AMBIGUITY_CODES[char] if char in AMBIGUITY_CODES else char
base_indices = [self.dna_bases.index(base) for base in possible_bases]
probability_weight = 1.0 / len(possible_bases)
distribution = np.zeros((self.n_bases))
distribution[base_indices] = probability_weight
return distribution
def encode_read(self, string_seq):
"""Converts the input read sequence to one-hot encoding.
Args:
string_seq: tf.String; input read sequence.
Returns:
Input read sequence as a one-hot encoded Tensor, with depth and ordering
of one-hot encoding determined by the given bases. Ambiguous characters
such as "N" and "S" are encoded as a probability distribution over the
possible bases they represent.
"""
with tf.variable_scope('encode_read'):
read = tf.string_split([string_seq], delimiter='').values
read = self.characters_table.lookup(read)
read = tf.cast(tf.gather(self.character_encodings, read), tf.float32)
if self.fixed_read_length:
read = tf.reshape(read, (self.fixed_read_length, self.n_bases))
return read
def encode_label(self, target, string_label):
"""Converts the label value to an integer encoding.
Args:
target: str; the target name.
string_label: tf.String; value of the label for the current input read.
Returns:
Given label value as an index into the possible_target_values.
"""
with tf.variable_scope('encode_label/{}'.format(target)):
return tf.cast(self.label_tables[target].lookup(string_label), tf.int32)
def _empty_label(self):
return tf.constant((), dtype=tf.int32, shape=())
def parse_single_tfexample(self, serialized_example):
"""Parses a tf.train.Example proto to a one-hot encoded read, label pair.
Injects noise into the incoming tf.train.Example's read sequence
when noise_rate is non-zero.
Args:
serialized_example: string; the serialized tf.train.Example proto
containing the read sequence and label value of interest as
tf.FixedLenFeatures.
Returns:
Tuple (features, labels) of dicts for the input features and prediction
targets.
"""
with tf.variable_scope('parse_single_tfexample'):
features_spec = {'sequence': tf.FixedLenFeature([], tf.string)}
for target in self.targets:
features_spec[target] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(
serialized_example, features=features_spec)
if self.noise_rate > 0.0:
read_sequence = tf.py_func(seq2label_utils.add_read_noise,
[features['sequence'], self.noise_rate],
(tf.string))
else:
read_sequence = features['sequence']
read_sequence = self.encode_read(read_sequence)
read_features = {'sequence': read_sequence}
if self.mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
label = {
target: self.encode_label(target, features[target])
for target in self.targets
}
else:
label = {target: self._empty_label() for target in self.targets}
return read_features, label
class InputDataset(object):
"""A class providing access to input data for the Seq2Label model.
Attributes:
mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}.
targets: list of strings; the names of the labels of interest (e.g.
"species").
dataset_info: a `Seq2LabelDatasetInfo` message reflecting the dataset
metadata.
initializer: the TF initializer op for the underlying iterator, which
will rewind the iterator.
is_train: Boolean indicating whether or not the execution mode is TRAIN.
"""
def __init__(self,
mode,
targets,
dataset_info,
train_epochs=None,
noise_rate=0.0,
random_seed=None,
input_tfrecord_files=None,
fixed_read_length=None,
ensure_constant_batch_size=False,
num_parallel_calls=32):
"""Constructor for InputDataset.
Args:
mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}.
targets: list of strings; the names of the labels of interest (e.g.
"species").
dataset_info: a `Seq2LabelDatasetInfo` message reflecting the dataset
metadata.
train_epochs: the number of training epochs to perform, if mode==TRAIN.
noise_rate: float [0.0, 1.0] specifying rate at which to inject
base-flipping noise into the read sequences.
random_seed: seed to be used for shuffling, if mode==TRAIN.
input_tfrecord_files: a list of filenames for TFRecords of TF examples.
fixed_read_length: an integer value of the statically-known read length,
or None if the read length is to be determined dynamically. The read
length must be known statically for TPU execution.
ensure_constant_batch_size: ensure a constant batch size at the expense of
discarding the last "short" batch. This also gives us a statically
constant batch size, which is essential for e.g. the TPU platform.
num_parallel_calls: the number of dataset elements to process in parallel.
If None, elements will be processed sequentially.
"""
self.input_tfrecord_files = input_tfrecord_files
self.mode = mode
self.targets = targets
self.dataset_info = dataset_info
self._train_epochs = train_epochs
self._noise_rate = noise_rate
self._random_seed = random_seed
if random_seed is not None:
np.random.seed(random_seed)
self._fixed_read_length = fixed_read_length
self._ensure_constant_batch_size = ensure_constant_batch_size
self._num_parallel_calls = num_parallel_calls
@staticmethod
def from_tfrecord_files(input_tfrecord_files, *args, **kwargs):
return InputDataset(
*args, input_tfrecord_files=input_tfrecord_files, **kwargs)
@property
def is_train(self):
return self.mode == tf.estimator.ModeKeys.TRAIN
def input_fn(self, params):
"""Supplies input for the model.
This function supplies input to our model as a function of the mode.
Args:
params: a dictionary, containing:
- params['batch_size']: the integer batch size.
Returns:
A tuple of two values as follows:
1) the *features* dict, containing a tensor value for keys as follows:
- "sequence" - the encoded read input sequence.
2) the *labels* dict. containing a key for `target`, whose value is:
- a string Tensor value (in TRAIN/EVAL mode), or
- a blank Tensor (PREDICT mode).
"""
randomize_input = self.is_train
batch_size = params['batch_size']
encoding = _InputEncoding(
self.dataset_info,
self.mode,
self.targets,
noise_rate=self._noise_rate,
fixed_read_length=self._fixed_read_length)
dataset = tf.data.TFRecordDataset(self.input_tfrecord_files)
dataset = dataset.map(
encoding.parse_single_tfexample,
num_parallel_calls=self._num_parallel_calls)
dataset = dataset.repeat(self._train_epochs if self.is_train else 1)
if randomize_input:
dataset = dataset.shuffle(
buffer_size=max(1000, batch_size), seed=self._random_seed)
if self._ensure_constant_batch_size:
# Only take batches of *exactly* size batch_size; then we get a
# statically knowable batch shape.
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = dataset.batch(batch_size)
# Prefetch to allow infeed to be in parallel with model computations.
dataset = dataset.prefetch(2)
# Use initializable iterator to support table lookups.
iterator = dataset.make_initializable_iterator()
self.initializer = iterator.initializer
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
features, labels = iterator.get_next()
return (features, labels)
|
def make_matrix(rows=0, columns=0, list_of_list=[[]]):
'''
(int, int, list of list) -> list of list (i.e. matrix)
Return a list of list (i.e. matrix) from "list_of_list" if given
or if not given a "list_of_list" parameter,
then prompt user to type in values for each row
and return a matrix with dimentions: rows x columns.
'''
if list_of_list == [[]]:
matrix = make_matrix_manually(rows, columns)
return matrix
else:
rows = size_of(list_of_list)
columns = size_of(list_of_list[0])
for item in list_of_list:
if size_of(item) != size_of(list_of_list[0]):
print('The number of columns in every row should be equal, but isn\'t!')
return None
matrix = list_of_list
return matrix
def make_matrix_manually(rows=0, columns=0):
'''
(int, int) -> list of list (i.e. matrix)
Prompt user to type in values for each row and return a matrix
with dimentions: rows x columns.
'''
matrix = []
for i in range(rows):
print('Type in values for ROW', i+1, 'seperated by commas: ', end='')
current_row = convert_str_into_list(input())
matrix.append(current_row)
if size_of(current_row) != columns:
print('Number of values different then declared columns!')
return None
return matrix
def make_Id_matrix(size=1):
'''
(int) -> list of list (i.e. matrix)
Return an Identity Matrix (1's across the diagonal and all other entries 0's)
with dimentions: size x size.
'''
Id_matrix = []
for i in range(1, size+1):
current_row = convert_str_into_list('0,'*(i-1) + '1,' + '0,'*(size-i))
Id_matrix.append(current_row)
return Id_matrix
def convert_str_into_list(string):
'''
(str)-> list of numbers
Return a list of numbers from a string.
Precondition: the string should consist of numbers separated by commas.
'''
list = []
# step 1: remove all empty spaces.
i = 0
length = len(string)
while i <=(length-1):
if string[i] == ' ':
string = string[:i] + string[i+1:]
length = len(string)
else:
i += 1 # (a += b) is equivalent to (a = a + b)
# step 2: extract sections seperated by commas, turn them into floats
# and append them to the list.
j = 0
i = 0
for j in range(len(string)+1):
if j==(len(string)) or string[j]==',':
item = string[i:j]
i = j+1
if item =='':
pass
else:
list.append(float(item))
j = j + 1
return list
# *values - means, that we do not know up front, what number of
# parameters (atributes) we're going to pass to the function.
def convert_into_list(*values):
'''
(items separated by commas) -> list
Return a list of values.
(Return values in the form a variable of type LIST.)
'''
list = []
for value in values:
list.append(value)
return list
def size_of(list):
'''
(list) -> int
Return the number of entries (items) in a given list.
'''
size = 0
for item in list:
size = size+1
return size
def add_matrix(matrix1, matrix2):
'''
(list of list, list of list) -> list of list
Return the result of addition of two matrices: matrix1 and matrix2.
Precondition: matrix1 and matix2 have to have the same dimentions.
'''
if size_of(matrix1) != size_of(matrix2):
print('Error: matrices do not have the same dimentions (size)!')
return None
matrix_sum = []
for i in range(size_of(matrix1)):
if size_of(matrix1[i]) != size_of(matrix2[i]):
print('Error: matrices do not have the same dimentions (size)!')
return None
matrix_sum.append([])
for j in range(size_of(matrix1[i])):
matrix_sum[i].append(matrix1[i][j] + matrix2[i][j])
return matrix_sum
def neg_matrix(matrix1):
'''
(list of list) -> list of list
Return the result of the operation of negation on matrix1.
'''
matrix_n = []
for i in range(size_of(matrix1)):
matrix_n.append([])
for j in range(size_of(matrix1[i])):
matrix_n[i].append(-matrix1[i][j])
return matrix_n
def substract_matrix(matrix1, matrix2):
'''
(list of list, list of list) -> list of list
Return the result of substraction of two matrices: matrix1 and matrix2.
Precondition: matrix1 and matix2 have to have the same dimentions.
'''
sub_matrix = add_matrix(matrix1, neg_matrix(matrix2))
return sub_matrix
def multiply_matrix_by_float(arg1, matrix1):
'''
(number, list of list) -> list of list
Return the result of multiplication of matrix1 by arg1.
'''
matrix_new = []
for i in range(size_of(matrix1)):
matrix_new.append([])
for j in range(size_of(matrix1[i])):
matrix_new[i].append(arg1 * matrix1[i][j])
return matrix_new
def multiply_matrix_by_matrix(matrix1, matrix2):
'''
(list of list, list of list) -> list of list
Return the result of multiplication of matrix1 by matrix2.
'''
matrix_new = []
#
# Checking if matrices can be multiplied.
#
# rows = matrix_and_list_functions.size_of(Matrix_name)
# columns = matrix_and_list_functions.size_of(Matrix_name[0])
#
if size_of(matrix1[0]) == size_of(matrix2):
#
# implementing Matrix multiplication here.
#
for i in range(size_of(matrix1)):
matrix_new.append([])
for j in range(size_of(matrix2[0])):
ABij = 0
for k in range(size_of(matrix1[0])):
ABij = ABij + (matrix1[i][k]*matrix2[k][j])
matrix_new[i].append(ABij)
return matrix_new
else:
print('Error: The number of columns in matrix1 has to be equal to the number of rows in matrix2!')
return []
|
"""Classes for the configuration of hermes-audio-server."""
import json
from pathlib import Path
from hermes_audio_server.config.mqtt import MQTTConfig
from hermes_audio_server.config.vad import VADConfig
from hermes_audio_server.exceptions import ConfigurationFileNotFoundError
# Default values
DEFAULT_CONFIG = '/etc/hermes-audio-server.json'
DEFAULT_SITE = 'default'
# Keys in the JSON configuration file
SITE = 'site'
MQTT = 'mqtt'
VAD = 'vad'
# TODO: Define __str__() with explicit settings for debugging.
class ServerConfig:
"""This class represents the configuration of a Hermes audio server.
Attributes:
site (str): The site ID of the audio server.
mqtt (:class:`.MQTTConfig`): The MQTT options of the configuration.
vad (:class:`.VADConfig`): The VAD options of the configuration.
"""
def __init__(self, site='default', mqtt=None, vad=None):
"""Initialize a :class:`.ServerConfig` object.
Args:
site (str): The site ID of the Hermes audio server. Defaults
to 'default'.
mqtt (:class:`.MQTTConfig`, optional): The MQTT connection
settings. Defaults to a default :class:`.MQTTConfig` object.
vad (:class:`.VADConfig`, optional): The VAD settings. Defaults
to a default :class:`.VADConfig` object, which disables voice
activity detection.
"""
if mqtt is None:
self.mqtt = MQTTConfig()
else:
self.mqtt = mqtt
if vad is None:
self.vad = VADConfig()
else:
self.vad = vad
self.site = site
@classmethod
def from_json_file(cls, filename=None):
"""Initialize a :class:`.ServerConfig` object with settings
from a JSON file.
Args:
filename (str): The filename of a JSON file with the settings.
Defaults to '/etc/hermes-audio-server'.
Returns:
:class:`.ServerConfig`: An object with the settings
of the Hermes Audio Server.
The :attr:`mqtt` attribute of the :class:`.ServerConfig`
object is initialized with the MQTT connection settings from the
configuration file, or the default values (hostname 'localhost' and
port number 1883) if the settings are not specified.
The :attr:`site` attribute of the :class:`.ServerConfig`
object is initialized with the setting from the configuration file,
or 'default' is the setting is not specified.
The :attr:`vad` attribute of the :class:`.ServerConfig` object is
initialized with the settings from the configuration file, or not
enabled when not specified.
Raises:
:exc:`ConfigurationFileNotFoundError`: If :attr:`filename` doesn't
exist.
:exc:`PermissionError`: If we have no read permissions for
:attr:`filename`.
:exc:`JSONDecodeError`: If :attr:`filename` doesn't have a valid
JSON syntax.
The JSON file should have the following format:
{
"site": "default",
"mqtt": {
"host": "localhost",
"port": 1883,
"authentication": {
"username": "foobar",
"password": "secretpassword"
},
"tls": {
"ca_certificates": "",
"client_certificate": "",
"client_key": ""
}
},
"vad": {
"mode": 0,
"silence": 2,
"status_messages": true
}
}
"""
if not filename:
filename = DEFAULT_CONFIG
try:
with Path(filename).open('r') as json_file:
configuration = json.load(json_file)
except FileNotFoundError as error:
raise ConfigurationFileNotFoundError(error.filename)
return cls(site=configuration.get(SITE, DEFAULT_SITE),
mqtt=MQTTConfig.from_json(configuration.get(MQTT)),
vad=VADConfig.from_json(configuration.get(VAD)))
|
import tensorflow as tf
import numpy as np
import time
import json
import hparams as hp
from model import transformer
from optimizer import get_optimizer
from preprocess import get_vocab
from pathlib import Path
from absl import app, flags
# Training hparams
hp.add("shuffle_buffer", 1, help="Shuffle buffer")
hp.add("max_tokens", 100, help="Max tokens")
hp.add("max_seq_len", 600, help="Max sequence len")
def get_dataset(dataset_path: Path, max_tokens: int, max_seq_len: int, shuffle_buffer: int, skip: int = 0):
def parse_json(json_string_tensor):
encoded = json.loads(json_string_tensor.numpy())["encoded"]
return tf.constant(encoded, dtype=tf.int64, shape=[len(encoded)])
def parse_json_fn(text):
return tf.py_function(parse_json, inp=[text], Tout=tf.int64)
boundaries = [i for i in range(1, max_seq_len + 1) if max_tokens % i == 0]
batch_sizes = [int(max_tokens / i) for i in range(1, max_seq_len + 1) if max_tokens % i == 0] + [1]
ds = tf.data.TextLineDataset(str(dataset_path))
ds = ds.map(parse_json_fn)
ds = ds.apply(tf.data.experimental.bucket_by_sequence_length(lambda x: tf.shape(x),
boundaries,
batch_sizes, padded_shapes=[None]))
ds = ds.shuffle(buffer_size=shuffle_buffer, seed=42)
ds = ds.repeat()
ds = ds.skip(skip)
ds = ds.prefetch(100)
return ds
def train_loop(ds, transformer_decoder, global_step, num_examples_processed, ckpt_manager, optimizer, learning_rate,
train_summary_writer, checkpoint_every, summarize_every, continuous=True):
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
train_step_signature = [tf.TensorSpec(shape=(None, None), dtype=tf.int64)]
def calculate_loss(real, pred):
# Masks padded tokens from loss_object
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
return tf.reduce_mean(tf.boolean_mask(loss_, mask))
@tf.function(input_signature=train_step_signature, experimental_relax_shapes=True)
def train_step(batch):
tar_inp = batch[:, :-1]
tar_real = batch[:, 1:]
mask = transformer.create_masks(tar_inp)
with train_summary_writer.as_default():
with tf.summary.record_if(tf.math.equal(tf.math.mod(global_step, summarize_every), 0)):
with tf.GradientTape() as tape:
predictions, _ = transformer_decoder(tar_inp, True, mask)
loss = calculate_loss(tar_real, predictions)
vars = transformer_decoder.trainable_variables
gradients = tape.gradient(loss, vars)
optimizer.apply_gradients(zip(gradients, transformer_decoder.trainable_variables))
for i in range(len(vars)):
tf.summary.scalar("gradient/" + vars[i].name, tf.linalg.norm(gradients[i]))
tf.summary.scalar("loss", loss)
tf.summary.scalar("gradient_norm", tf.linalg.global_norm(gradients))
tf.summary.scalar("learning_rate",
learning_rate if type(learning_rate) is float else learning_rate(global_step))
return loss
steps_start = time.time()
for batch in ds:
global_step.assign_add(1)
num_examples_processed.assign_add(tf.cast(tf.shape(batch)[0], num_examples_processed.dtype))
tf.summary.experimental.set_step(global_step)
# Take a gradient step
loss = train_step(batch)
if global_step.numpy() == 1:
print("Number of trainable parameters: {}".format(
np.sum([np.prod(v.get_shape().as_list()) for v in transformer_decoder.trainable_variables])))
# Print intermediate metrics
if global_step.numpy() % 100 == 0:
print('Step: {}\tLoss: {:.4f}\tNum examples: {}\tTime: {:.3f}s'.format(
global_step.numpy(), loss, num_examples_processed.numpy(), time.time() - steps_start))
steps_start = time.time()
# Checkpoint every X step
if global_step.numpy() % checkpoint_every == 0:
ckpt_save_path = ckpt_manager.save(checkpoint_number=global_step)
print("Saving checkpoint at '{}'".format(ckpt_save_path))
if not continuous:
break
def main(argv):
vocab_size = get_vocab(Path(flags.FLAGS.vocab)).vocab_size
# Model
transformer_decoder = transformer.TransformerOnlyDecoder(vocab_size)
# Optimizer
optimizer, learning_rate = get_optimizer()
# Counters
global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int64)
num_examples_processed = tf.Variable(0, name="num_examples_processed", trainable=False, dtype=tf.int64)
# Checkpointing
checkpoint_path = Path(flags.FLAGS.checkpoint_path)
ckpt = tf.train.Checkpoint(transformer_decoder=transformer_decoder, optimizer=optimizer,
global_step=global_step, num_examples_processed=num_examples_processed)
ckpt_manager = tf.train.CheckpointManager(ckpt, str(checkpoint_path), max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Restored checkpoint from: {}".format(ckpt_manager.latest_checkpoint))
# Tensorboard events
train_log_dir = str(checkpoint_path / "events")
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
# Training dataset
ds = get_dataset(Path(flags.FLAGS.data), hp.get("max_tokens"), hp.get("max_seq_len"), hp.get("shuffle_buffer"),
skip=global_step.numpy())
try:
train_loop(ds, transformer_decoder, global_step, num_examples_processed, ckpt_manager, optimizer,
learning_rate, train_summary_writer, flags.FLAGS.checkpoint_every, flags.FLAGS.summarize_every,
flags.FLAGS.continuous)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
flags.DEFINE_string("data", None, help="Training data tfrecord file")
flags.DEFINE_string("vocab", None, help="Vocab file")
flags.DEFINE_string("checkpoint_path", None, help="Checkpoint path")
flags.DEFINE_integer("checkpoint_every", 1000, help="Checkpoint every X step")
flags.DEFINE_boolean("continuous", True, help="Whether to continue training after checkpointing")
flags.DEFINE_integer("summarize_every", 50, help="Summarize model stats every X step")
flags.mark_flags_as_required(["data", "vocab", "checkpoint_path"])
app.run(main)
|
"""
Django settings for rl_arena project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
from core.settings import *
INSTALLED_APPS = [
'core.apps.Config',
'publisher.apps.Config',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
|
# Copyright (c) 2014 University of California, Davis
#
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class BasicRelation:
def __init__(self, name, latex, logic):
self.latexSymbol = latex
self.name = name
self.logicSymbol = logic
def __str__(self):
return self.name
class RelationSet:
def __init__(self, name="", relations=[]):
self.name = name
self.relations = relations ## a list of basic relations
class RCCType:
def __init__(self, name="", relationSets=[]):
self.name = ""
self.relationSets = [] ## a list of relations sets, RCC5 would be a RCCType
relationDict = {}
relationDict["equals"] = BasicRelation("equals","$x \leftrightarrow $y","=")
relationDict["includes"] = BasicRelation("includes","$y > $x","PPC")
relationDict["is_included_in"] = BasicRelation("is_included_in","$x < $y","PP")
relationDict["disjoint"] = BasicRelation("disjoint","$x ! $y","DC")
relationDict["overlaps"] = BasicRelation("overlaps","$x \otimes $y","PO")
#relationDict["overlaps includes is_included_in disjoint"] = BasicRelation("does_not_equal","$x != $y","-(all x ($x(x) <-> $y(x)))")
#relationDict["equals includes"] = BasicRelation("isa", "$x \rightarrow $y","$y(x) -> $x(x)")
#relationDict["equals is_included_in"] = BasicRelation("asi", "$y \rightarrow $x","$x(x) -> $y(x)")
isa = RelationSet()
isa.name = "is a"
isa.relations = [relationDict["equals"], relationDict["is_included_in"]]
|
def display_menu():
print("The Book Catalog program")
print()
print("COMMAND MENU")
print("show - Show book info")
print("add - Add book")
print("edit - Edit book")
print("del - Delete book")
print("exit - Exit program")
def show_book(book_catalog):
i = 1
for key, value in book_catalog.items():
print(f"{i}. {key}")
i += 1
print()
selected_book = int(input(f"Please select a book (1-{i - 1}): ")) - 1
selected_book_title = [key for key in book_catalog.keys()][selected_book]
selected_book_author = book_catalog[selected_book_title]["author"]
selected_book_year = book_catalog[selected_book_title]["pubyear"]
print()
# print(f"Book Name: {book_catalog[]}")
print(f"Title: {selected_book_title}")
print(f"Author: {selected_book_author}")
print(f"Publication Year: {selected_book_year}")
def add_edit_book(book_catalog, mode):
if mode == "add":
new_book_info = {}
new_book_title = input("Enter book name: ")
new_book_info["author"] = input("Enter book author: ")
new_book_info["pubyear"] = input("Enter book publication year: ")
book_catalog[new_book_title] = new_book_info
if mode == "edit":
i = 1
for key, value in book_catalog.items():
print(f"{i}. {key}")
i += 1
print()
selected_book = int(input(f"Please select a book (1-{i - 1}): ")) - 1
new_book_info = {}
new_book_title = input("Enter book name: ")
new_book_info["author"] = input("Enter book author: ")
new_book_info["pubyear"] = input("Enter book publication year: ")
[key for key in book_catalog.keys()][selected_book] = new_book_title
book_catalog[new_book_title] = new_book_info
def delete_book(book_catalog):
i = 1
for key, value in book_catalog.items():
print(f"{i}. {key}")
i += 1
print()
selected_book = int(input(f"Please select a book (1-{i - 1}): ")) - 1
book_catalog.pop(selected_book)
def main():
book_catalog = {
"Moby Dick":
{"author": "Herman Melville",
"pubyear": "1851"},
"The Hobbit":
{"author": "J. R. R. Tolkien",
"pubyear": "1937"},
"Slaughterhouse Five":
{"author": "Kurt Vonnegut",
"pubyear": "1969"}
}
display_menu()
while True:
print()
command = input("Command: ").lower()
if command == "show":
show_book(book_catalog)
elif command == "add":
add_edit_book(book_catalog, mode="add")
elif command == "edit":
add_edit_book(book_catalog, mode="edit")
elif command == "del":
delete_book(book_catalog)
elif command == "exit":
print("Bye!")
break
else:
print("Unknown command. Please try again.")
if __name__ == "__main__":
main()
|
def foo(x: int):
pass
|
from .GridEmitter import *
from .ForceEmitter import *
from .HardCodeEmitter import *
|
from time import sleep, strftime, time
import matplotlib.pyplot as plt
import numpy as np
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
#What Data Do We Need?
Print = True#For Debugging and Calibration
Chart = True#Calibration Chart
voltOffset = 0#
relayOffset = 0
ampsOffset = 20991#16750
#voltSlope
#ampSlope
# Create the ADC object using the I2C bus
ads0 = ADS.ADS1115( i2c, address=0x4b )# Make Sure it's the right Address
#ads1 = ADS.ADS1115( i2c, address=0x49 )
# Create single-ended input on each channel 0-3
chan0 = AnalogIn(ads0, ADS.P0)
chan1 = AnalogIn(ads0, ADS.P1)
chan2 = AnalogIn(ads0, ADS.P2)
chan3 = AnalogIn(ads0, ADS.P3)
# chan1 = AnalogIn(ads1, ADS.P1)
#LOG Data to CSV
def write_ADS():
with open("/home/pi/ADS1115/ADS1115_RAWdata.csv", "a") as log:
#with open("/home/pi/ADS1115/ADS1115_VOLTdata.csv", "a") as log:
log.write("{0},{1},{2},{3},{4}\n".format(strftime("%Y-%m-%d %H:%M:%S"),chan0.value, chan1.value, chan2.value, chan3.value))
#log.write("{0},{1},{2},{3},{4}\n".format(strftime("%Y-%m-%d %H:%M:%S"),chan0.voltage, chan1.voltage, chan2.voltage, chan3.voltage))
#log.write("{0},{1}\n".format(strftime("%Y-%m-%d %H:%M:%S"),str(temp3)))
# Create differential input between channel 0 and 1
# chan = AnalogIn(ads, ADS.P0, ADS.P1)
plt.ion()
plt.figure(1)
plt.figure("Relay")
v0 = []#Array for A0
v1 = []
v2 = []
v3 = []
#y = []
x = []
a = []
v = []
def runningMeanFast(z, N):
return np.convolve(z, np.ones((N,))/N)[(N-1):]
def graph():
#amps = (chan2.voltage - 2.36) * 19.2
#volts = (chan1.voltage) * 186.5 #186.5 24.2v nightime, no solar #184.4#triggered at 23.3
amps = battamps
volts = battvoltage
a.append(amps)#AMPS - 24v
v.append(volts)#VOLTS - 24v
v0.append(chan0.value)#v0.append(chan0.voltage)#
v1.append(chan1.value)#v1.append(chan0.voltage)
v2.append(chan2.value)#v2.append(chan0.voltage)
v3.append(chan3.voltage)#v3.append(chan0.value)#Fix Conditions Depending on Voltage Reading First
x.append(time())
plt.clf()
#glow plug amps is very Stable ~2.5v ~= 24v. MPPT turns on inverter @ 25V State of Charge
#plt.plot(x,v0)
#24v battery voltage
plt.figure(1)
plt.subplot(2,1,1)
if amps > 5:
plt.ylim(5,20)
elif amps < 0.5 && amps > -0.5:
plt.ylim(-5,10)
elif amps < -0.5:
plt.ylim(-5,0)
elif amps < 2:
plt.ylim(0,2)
elif amps < 5:
plt.ylim(0,5)
plt.plot(x,a)
plt.xlabel("Time")
#b = np.smooth(a, window='flat', window_len=11)
#plt.plot(x,b)
#plt.plot(runningMeanFast(a,N))
#plt.plot(np.convolve(np.ones(200), np.ones(50)/50, mode='valid'));
#plt.plot(x,cumsum)
#plt.plot(x, cumsum)
#plt.plot(x, moving_aves)
plt.ylabel("Amps")
#~2.50v ~= 0.5 Amps Charging, ~2.30v ~= 5A Dischargeing
plt.subplot(2,1,2)
if volts > 28:
plt.ylim(28,30)
elif volts < 24:
plt.ylim(22,24)
elif volts < 28:
plt.ylim(22,28)
plt.plot(x,v)
plt.plot(x,v3)
plt.ylabel("Battery Voltage")
plt.xlabel("Time")
#plt.subplot(2,1,3)#Didn't like this 3
#Relay Chart
if Chart == True:
plt.figure("Relay")
plt.subplot(2,1,1)
plt.plot(x,v0)
plt.xlabel("Time")
plt.ylabel("Amps-RAW")
plt.subplot(2,1,2)
plt.plot(x,v3)
plt.xlabel("Time")
#ylabel("Volts")
plt.draw()
if Print == True:
print("{:>5}\t{:>5}{:>5}\t{:>5}{:>5}\t{:>5}{:>5}\t{:>5}".format('raw0', 'v0', 'raw1', 'v1', 'raw2', 'v2', 'raw3', 'v3'))
while True:
battvoltage = (chan1.value * .0234945045) - relayOffset #0.0234945045 = good while on Grid, Grid Relay on = 1453raw 0am
#sleep(0.2)
battamps = ((chan2.value - ampsOffset) * 0.0034638)#0.002986)#0.00272)#Needs to be slightly larger multiplyer 1/11/21 #jan10:offset:16884#attempt~offset:19050, new new offset: 17540, new offset:20103 multiplyer/slope:0.00272 #27.46 #13,884=2.35v??? #
#battvoltage = (chan1.value * voltageSlope) - voltageOffset
#battamps = ((chan2.value - ampOffset) * ampSlope)
relayVoltage = chan3.value * 0.0007846528
relayCurrent = (chan0.value - 31163)#IF value is greater than 0, grid is off. else grid on
if relayVoltage > 24:
relayOffset = 10.1#Make this zero and correct multiplyer slope
#battAmpsCoeff = 0.00272
ampsOffset = 20991#20668-20640->(Guessed with 0.5 inverter load, 0.5 solar for 0)#20991->(Recorded at night MPPT Load Disconnected)
#voltageCurve = 0.0234945045
elif relayVoltage < 20:
relayOffset = 0
voltageOffset = 0.0234945045#Original Slope
ampsOffset = 20654##16800(OldValue)#20480(-0.5A Inverter Only)
#battAmpsCoeff = 0.00349
#voltageCurve = 0.0234945045
#else:
#print("Battery Within Window")
write_ADS()
graph()
if Print == True:
#print("{:>5}\t{:>5.3f}{:>5}\t{:>5.3f}{:>5}\t{:>5.3f}{:>5}\t{:>5.3f}".format(chan0.value, chan0.voltage, chan1.value, chan1.voltage, chan2.value, chan2.voltage, chan3.value, chan3.voltage))
print("{:.2f}"format(battvoltage), "{:2f}"format(battamps), chan1.value, chan2.value, "Amps:", chan0.value, "Volts:", chan3.value)
#print("A0=RAW:",chan0.value,"A1=RAW:",chan1.value,"A2=RAW:",chan2.value,"A3=RAW:",chan3.value)
#print(time())
if amps < 0.25 && amps > -0.25:
plt.pause(60)
else:
plt.pause(0.3)
#print("{:>5}\t{:>5.3f}".format(chan0.value, chan0.voltage))
#time.sleep(0.5)
#print("{:>5}\t{:>5.3f}".format(chan1.value, chan1.voltage))
#time.sleep(0.5)
#print("{:>5}\t{:>5.3f}".format(chan2.value, chan2.voltage))
#time.sleep(0.5)
#print("{:>5}\t{:>5.3f}".format(chan3.value, chan3.voltage))
#time.sleep(0.5)
#print("{:>5}\t{:>5.3f}".format(chan1.value, chan1.voltage))
|
from .imports import *
class CubeVisualizer(Talker):
'''
A tool for visualizing a squishable cube.
'''
def __init__(self, cube=None):
'''
Initialize a visualizer, and link it to a Squishable or Binned cube.
'''
Talker.__init__(self)
self.cube = cube
@property
def loupe(self):
try:
return self._loupe
except AttributeError:
self._loupe = loupe()
return self._loupe
def explore(self, image=None, key='raw_counts', star=None, vmin=None, vmax=None, **kw):
'''
Visually explore the cube, displaying flux vs. time + wavelength.
image = (ntimepoints x nwavelengths) image to visualize,
by imshowing the 2D array, and showing slices of it along
both the horizontal and vertical
'''
color = self.cube.starcolor(self.cube.target)
if image is None:
# make sure a star is defined
if star == None:
star = self.cube.target
# do different things for different keys
if key in self.cube.cubekeys:
z = self.cube.cubes[key][star]
vmin, vmax = 0, np.nanpercentile(z, 99)
if key == 'corrected':
z = self.corrected()
vmin, vmax = 0.979, 1.01
if key == 'wavelengthed':
z = self.wavelengthed()
vmin, vmax = 0.72, 1.1
else:
z = image
if vmin is None:
vmin = np.nanpercentile(z, 1)
if vmax is None:
vmin = np.nanpercentile(z, 99)
aspect_ratio = 6.0/4.0
tiny = 0.1
width_ratios = [1.0 - tiny, tiny]
height_ratios = [tiny*aspect_ratio, 1.0 - tiny*aspect_ratio]
space = 0.05
hspace = space*aspect_ratio
wspace = space
figsize= np.array([aspect_ratio, 1.0])*8
wavelength = self.cube.spectral['wavelength']
times = self.cube.temporal['bjd']
times -= np.min(times)
try:
ok = self.ok
except AttributeError:
ok = np.ones_like(z).astype(np.bool)
self.loupe.setup(z, ok=ok, yaxis=wavelength, xaxis=times,
aspect='auto',
hspace=hspace, wspace=wspace,
width_ratio=width_ratios,
height_ratios=height_ratios,
figsize=figsize,
labelfontsize=None,
datacolor=color,
crosshaircolor=color,
left=0.1, bottom=0.1,
**kw)
self.loupe.ax['2d'].set_xlabel('Time from Start of Observation (days)')
self.loupe.ax['2d'].set_ylabel('Wavelength ($\AA$)')
self.loupe.ax['slicey'].set_ylim(np.max(wavelength), np.min(wavelength))
# make the light curves into scattered points
for thing in ['slicex', 'slicex_bad']:
self.loupe.plotted[thing].set_marker('.')
self.loupe.plotted[thing].set_linewidth(0)
self.loupe.plotted['slicex_bad'].set_marker('x')
#self.loupe.ax['slicex'].yaxis.tick_right()
#self.loupe.ax['slicex'].yaxis.set_label_position('right')
# make the lines thicker
linewidth=3
for line in ['slicey', 'crossy', 'crossyextend', 'crossx', 'crossxextend']:
self.loupe.plotted[line].set_linewidth(linewidth)
self.loupe.set_limits(vmin, vmax)
plt.draw()
#self.loupe.run()
def corrected(self, key='raw_counts'):
# normalize along the wavelength axis
star = self.cube.target
self.speak('calculating [{}] for [{}], corrected by the mega-calibrator'.format(key, star))
target = self.cube.cubes[key][star]
# KLUDGE!!!
comparison = self.cube.cubes[key][self.cube.comparisons[0]]
z = target/comparison
oned = np.nanmedian(z, 0)
z = z/oned[np.newaxis,:]
return z
def wavelengthed(self, key='raw_counts', star=None):
if star == None:
star = self.cube.target
# normalize along the wavelength axis
self.speak('calculating [{}] for [{}], normalized by its median spectrum'.format(key, star))
z = self.cube.cubes[key][star]
oned = np.nanmedian(z, 0)
return z/oned[np.newaxis,:]
def overlayQuality(self, color='tomato'):
'''
Plot the cosmic rays on top of the plot.
### KLUDGE -- test this; it's not clear it's working yet!
'''
cmap = craftroom.cmaps.one2another(color, color, alphabottom=1.0, alphatop=0.0)
a = self.loupe.ax['2d']
try:
ok = self.ok
except AttributeError:
ok = self.cube.cubes['ok'][self.cube.target]
self.overlay = a.imshow(ok.T,
cmap=cmap,
extent=self.loupe.extent,
interpolation='nearest',
zorder=5,
origin='lower',
aspect=self.loupe.ax['2d'].get_aspect()
)
"""
def zapCosmics(self, remake=False):
# the
cosmicfilename = os.path.join(self.cube.binneddirectory, 'cosmics.npy')
try:
notcosmics = np.load(cosmicfilename)[()]
assert(remake == False)
except (IOError, AssertionError):
self.speak('trying to zap cosmics')
z = self.corrected()
filtered = scipy.signal.medfilt(z, (5, 15))
cosmics = z - filtered
wavelengthstd = 1.48*np.nanmedian(np.abs(cosmics - np.nanmedian(cosmics, 0)[np.newaxis,:]), 0)
#mad(cosmics, 0)
normalized = cosmics/wavelengthstd[np.newaxis, :]
notcosmics = np.abs(normalized) < 5
np.save(cosmicfilename, notcosmics)
self.ok = notcosmics
for star in self.cube.stars:
self.cube.binned_cubes['ok'][star] *= self.ok
"""
def makeSliceMovies(self, keys=['wavelengthed', 'corrected', 'raw_counts'],
remake=False,
stride=1):
'''
Make movies slicing through wavelength.
'''
wavelength = self.cube.spectral['wavelength']
z = self.cube.cubes['raw_counts'][self.cube.target]
spectrum = np.nanmedian(z, 0)
for key in keys:
if key == 'raw_counts':
axislabel = 'Photons/$\AA$'
else:
axislabel = 'Relative\nFlux'
self.explore(key=key)
self.loupe.moveCrosshair(y=np.min(wavelength), x=None)
self.loupe.plotted['slicey'].set_data(spectrum, wavelength)
self.loupe.ax['slicey'].set_xlim(0, np.nanpercentile(spectrum, 99)*1.1)
plt.setp(self.loupe.ax['slicey'].get_xticklabels(), visible=False)
self.loupe.ax['slicex'].set_ylabel(axislabel)
plotfilename = os.path.join(self.cube.directory, '{}.pdf'.format(key))
plt.savefig(plotfilename, dpi=1000)
filename = os.path.join(self.cube.directory, '{}.mp4'.format(key))
self.speak('saving movie to {}'.format(filename))
self.loupe.movieSlice(direction='y', filename=filename, remake=remake, stride=stride)
|
import base64
import datetime
import hashlib
import hmac
import json
import requests
# Update the customer ID to your Log Analytics workspace ID
customer_id = ""
# For the shared key, use either the primary or the secondary Connected Sources client authentication key
shared_key = ""
# The log type is the name of the event that is being submitted
#####################
######Functions######
#####################
# Build the API signature
def build_signature(
customer_id, shared_key, date, content_length, method, content_type, resource
):
x_headers = "x-ms-date:" + date
string_to_hash = (
method
+ "\n"
+ str(content_length)
+ "\n"
+ content_type
+ "\n"
+ x_headers
+ "\n"
+ resource
)
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(
hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()
).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
# Build and send a request to the POST API
def post_data(customer_id, shared_key, body, log_type):
method = "POST"
content_type = "application/json"
resource = "/api/logs"
rfc1123date = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")
content_length = len(body)
signature = build_signature(
customer_id,
shared_key,
rfc1123date,
content_length,
method,
content_type,
resource,
)
uri = (
"https://"
+ customer_id
+ ".ods.opinsights.azure.com"
+ resource
+ "?api-version=2016-04-01"
)
headers = {
"content-type": content_type,
"Authorization": signature,
"Log-Type": log_type,
"x-ms-date": rfc1123date,
}
response = requests.post(uri, data=body, headers=headers)
if response.status_code >= 200 and response.status_code <= 299:
print("Accepted")
else:
print("Response code: {}".format(response.status_code))
def post_to_log_aggregator(body, log_type):
post_data(customer_id, shared_key, body, log_type)
|
class Solution:
def longestPalindrome(self, s: str) -> str:
if s==None or len(s)<1: return ''
start, end = 0, 0
for i in range(len(s)):
len1 = self.expandAroundCenter(s,i,i)
len2 = self.expandAroundCenter(s,i,i+1)
max_len = max(len1, len2)
if max_len > (end-start):
start = i - (max_len-1)//2
end = i + max_len//2
return s[start:end+1]
def expandAroundCenter(self, s: str, left: int, right: int) -> int:
l, r = left, right
while (l>=0 and r<len(s) and s[l]==s[r]):
l -= 1
r += 1
return r - l - 1
|
import os
class BadConfigFile(Exception):
pass
class Config:
''' Configurations class for the server '''
def __init__(self, args, config_file='boring.config'):
self.args = args
self.file = config_file # self.load(config_file)
self._options = {}
def load(self):
path = os.path.abspath(self.file)
if not os.path.exists(path):
raise BadConfigFile('path to config file not found %s' % path)
try:
config = open(self.file)
except PermissionError as e:
raise OSError('Failed to open config file [%s]' % e)
while 1:
line = config.readline()
if not line:
break
if line.strip().startswith('#') or line.isspace():
continue
try:
key, value = line.split('=')
except ValueError as e:
raise BadConfigFile("Bad config file %s [%s]" % (line, e))
key = key.strip().rstrip()
value = value.strip().rstrip('\n')
self._options[key] = value
def __getitem__(self, name):
value = self._options.get(name)
if not value:
if not self.args:
# the server is not started from command line
return ''
return getattr(self.args, name, '')
return value
def __setitem__(self, key, value):
self._options[key] = value
__getattr__ = __getitem__
def __bool__(self):
return True
class DummyConfig:
# pylint: disable=unused-argument
def __init__(self, *args):
pass
def __getitem__(self, name):
return ''
def __setitem__(self, *args):
pass
__getattr__ = __getitem__
def __bool__(self):
return False
|
#
# Example on the use of the CastImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.US, dim]
OIType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New( FileName=argv[1] )
filter = itk.CastImageFilter[IType, OIType].New( reader )
writer = itk.ImageFileWriter[OIType].New( filter, FileName=argv[2] )
writer.Update()
|
from __future__ import annotations
from .basis import Basis, _gaps2x
from . import grid
from .util import roarray, input_as_list, compute_angles, qhull_interpolation_driver, _piece2bounds
from .attrs import check_vectors_inv, convert_vectors_inv, convert_coordinates, check_coordinates, convert_values,\
check_values
from .triangulation import unique_counts, simplex_volumes, Triangulation
import numpy as np
from numpy import ndarray
from scipy.sparse import csr_matrix
from scipy.spatial import KDTree, Delaunay
from attr import attrs, attrib
from typing import Union
from functools import cached_property
@attrs(frozen=True, eq=False)
class Cell(Basis):
"""Describes a unit cell."""
coordinates = attrib(type=Union[ndarray, list, tuple], converter=convert_coordinates, validator=check_coordinates)
values = attrib(type=Union[ndarray, list, tuple, str], converter=convert_values, validator=check_values)
meta = attrib(type=dict, factory=dict, converter=dict)
_vectors_inv = attrib(type=Union[ndarray, list, tuple], default=None, converter=convert_vectors_inv,
validator=check_vectors_inv)
@classmethod
def from_cartesian(cls, vectors: Union[ndarray, Basis, list, tuple], cartesian: Union[ndarray, list, tuple],
values: Union[ndarray, list, tuple, str], *args, proto: type = None,
vectors_inv: ndarray = None, **kwargs) -> Cell:
"""
Constructs a cell using cartesian coordinates.
Parameters
----------
vectors : ndarray
Cell basis.
cartesian : ndarray
A 2D array with cartesian coordinates.
values : ndarray
An array with values per each coordinate.
args
Other arguments.
proto : class
Class of the returned object.
vectors_inv : ndarray
Basis inverse.
kwargs
Other keyword arguments.
Returns
-------
The resulting Cell.
"""
basis = Basis(vectors, vectors_inv=vectors_inv)
if vectors_inv is None:
vectors_inv = basis.vectors_inv
if proto is None:
proto = cls
return proto(basis, basis.transform_from_cartesian(cartesian), values, *args, vectors_inv=vectors_inv, **kwargs)
@classmethod
def random(cls, density: float, atoms: dict, shape: str = "box") -> Cell:
"""
Prepares a cell with random coordinates.
Parameters
----------
density : float
Atomic density.
atoms : dict
A dictionary with specimen-count pairs.
shape : {"box"}
The shape of the resulting cell.
Returns
-------
result : Cell
The resulting unit cell.
"""
n_atoms = sum(atoms.values())
coords = np.random.rand(n_atoms, 3)
values = sum(([k] * v for k, v in atoms.items()), [])
if shape == "box":
a = (n_atoms / density) ** (1./3)
return cls(np.eye(3) * a, coords, values)
else:
raise ValueError(f"Unknown shape={shape}")
@cached_property
def cartesian(self) -> ndarray:
return roarray(self.transform_to_cartesian(self.coordinates))
@cached_property
def size(self) -> int:
return len(self.coordinates)
@cached_property
def values_uq(self) -> ndarray:
values_uq, values_encoded = np.unique(self.values, return_inverse=True, axis=0)
self.__dict__["values_encoded"] = roarray(values_encoded.astype(np.int32))
return roarray(values_uq)
@cached_property
def values_encoded(self) -> ndarray:
_ = self.values_uq # trigger attribute which sets this as well
return self.values_encoded # not a recursion
@cached_property
def values_lookup(self) -> dict:
return dict(zip(self.values_uq, np.arange(len(self.values_uq))))
def __eq__(self, other):
return super().__eq__(other) and np.array_equal(self.coordinates, other.coordinates) and \
np.array_equal(self.values, other.values)
def normalized(self, left: float = 0, sort: Union[ndarray, str, int] = None) -> Cell:
"""
Puts all points inside box boundaries and returns a copy.
Parameters
----------
left : float
The left edge of the normalized box in cell
coordinates. For example, ``left=-0.3`` stands
for coordinates being placed in a ``[-0.3, 0.7)``
interval.
sort : ndarray
An optional vector to sort along. Also accepts integers
corresponding indicating basis vectors or one of 'xyz'
to sort along cartesian axes.
Returns
-------
result : Cell
A copy of self with normalized coordinates.
"""
d = self.state_dict(mark_type=False)
d["coordinates"] = new_coordinates = ((self.coordinates - left) % 1) + left
if sort is not None:
if isinstance(sort, int):
sort = self.vectors[sort]
elif sort in ('x', 'y', 'z'):
_sort = np.zeros(3)
_sort['xyz'.index(sort)] = 1
sort = _sort
else:
sort = np.asanyarray(sort)
order = np.argsort(self.transform_to_cartesian(new_coordinates) @ sort)
d["coordinates"] = d["coordinates"][order]
d["values"] = d["values"][order]
return self.copy(**d)
@input_as_list
def distances(self, ids: list, cutoff: float = None, other: Union[Cell, ndarray] = None) -> Union[ndarray, csr_matrix]:
"""
Computes distances between Cell points.
Parameters
----------
ids : ndarray
Specimen IDs to compute distances between.
Several shapes are accepted:
* *empty*: returns a 2D matrix of all possible distances
* nx2 array of ints: returns n distances between each pair
of [i, 0]-[i, 1] species;
* 1D array of ints of length n: returns n-1 distances
between each pair of [i-1]-[i] species;
cutoff : float
Cutoff for obtaining distances. Only if ids is empty.
other : Cell
Other cell to compute distances to.
Returns
-------
The resulting distance matrix in dense or sparse forms.
"""
this = self.cartesian
if other is None:
other = this
elif isinstance(other, Cell):
other = other.cartesian
if len(ids) == 0:
if cutoff is None:
return np.linalg.norm(this[:, None] - other[None, :], axis=-1)
else:
return KDTree(this).sparse_distance_matrix(KDTree(other), max_distance=cutoff)
ids = np.asanyarray(ids, dtype=int)
if ids.ndim == 1:
if ids.shape[0] < 2:
raise ValueError(f"Only {len(ids)} points are found, at least 2 required")
return np.linalg.norm(this[ids[:-1], :] - other[ids[1:], :], axis=1)
elif ids.ndim == 2:
if ids.shape[1] != 2:
raise ValueError(f"ids.shape={ids.shape}, required (n, 2)")
return np.linalg.norm(this[ids[:, 0], :] - other[ids[:, 1], :], axis=1)
else:
raise ValueError(f"ids.ndim={ids.ndim}, required 1 or 2")
def cartesian_delta(self, other: Cell, pbc: bool = True) -> ndarray:
"""
Computes the distance between the corresponding pairs in two cells.
Parameters
----------
other : Cell
Other cell to compute distance to.
pbc : bool
Periodic boundary conditions.
Returns
-------
The resulting distances, one per pair.
"""
assert self.size == other.size
n_dims = len(self.vectors)
if pbc:
this_cartesian = self.normalized(-.5).cartesian
other_replica = other.repeated(*(3,) * n_dims)
other_replica_cartesian = other_replica.normalized(-.5).cartesian
return np.min(np.linalg.norm(
this_cartesian[None, ...] - other_replica_cartesian.reshape((3 ** n_dims,) + other.cartesian.shape),
axis=-1,
), axis=0)
else:
return np.linalg.norm(self.cartesian - other.cartesian, axis=-1)
def cartesian_copy(self, **kwargs) -> Cell:
"""
Same as ``copy`` but accepts cartesian coordinates instead of crystal
coordinates. Does exact same thing as ``copy`` if no arguments
provided.
Parameters
----------
kwargs
Arguments to ``self.from_cartesian``.
Returns
-------
The resulting Cell.
"""
state_dict = self.state_dict(mark_type=False)
del state_dict["coordinates"]
state_dict["cartesian"] = self.cartesian
return self.from_cartesian(**{**state_dict, **kwargs})
@input_as_list
def angles(self, ids: ndarray) -> ndarray:
"""
Computes angles between points in this cell.
Parameters
----------
ids : ndarray
Point indexes to compute angles between.
Several shapes are accepted:
* nx3 array: computes n cosines of angles [i, 0]-[i, 1]-[i, 2];
* 1D array of length n: computes n-2 cosines of angles along
the path ...-[i-1]-[i]-[i+1]-...;
Returns
-------
An array with cosines.
Examples
--------
>>> cell = UnitCell(Basis((1, 2, 3), kind="orthorhombic"), numpy.random.rand((4, 3)), numpy.arange(4))
>>> cell.angles((0, 1, 2)) # angle between vectors connecting {second and first} and {second and third} pts
>>> cell.angles(0, 1, 2) # a simplified version of the above
>>> cell.angles(0, 1, 3, 2) # two angles along path: 0-1-3 and 1-3-2
>>> cell.angles(tuple(0, 1, 3, 2)) # same as the above
>>> cell.angles((0, 1, 3),(1, 3, 2)) # same as the above
"""
v = self.cartesian
ids = np.asanyarray(ids, dtype=int)
if len(ids.shape) == 1:
if ids.shape[0] < 3:
raise ValueError(f"Only {len(ids)} points are found, at least 3 required")
vectors = v[ids[:-1], :] - v[ids[1:], :]
nonzero = np.argwhere((vectors ** 2).sum(axis=1) > 0)[:, 0]
if nonzero.shape[0] == 0:
raise ValueError("All points coincide")
vectors[:nonzero[0]] = vectors[nonzero[0]]
vectors[nonzero[-1] + 1:] = vectors[nonzero[-1]]
vectors_1 = vectors[:-1]
vectors_2 = -vectors[1:]
for i in range(nonzero.shape[0] - 1):
vectors_1[nonzero[i] + 1:nonzero[i + 1]] = vectors_1[nonzero[i]]
vectors_2[nonzero[i]:nonzero[i + 1] - 1] = vectors_2[nonzero[i + 1] - 1]
elif len(ids.shape) == 2:
if ids.shape[1] != 3:
raise ValueError(f"ids.shape={ids.shape}, required (n, 3)")
vectors_1 = v[ids[:, 0], :] - v[ids[:, 1], :]
vectors_2 = v[ids[:, 2], :] - v[ids[:, 1], :]
else:
raise ValueError(f"ids.ndim={ids.ndim}, required 1 or 2")
return compute_angles(vectors_1, vectors_2)
def centered(self) -> Cell:
"""
Generates a new cell where all points are shifted to maximize margins.
Returns
-------
A new cell with centered coordinates.
"""
sorted_coordinates = np.sort(self.coordinates % 1, axis=0)
gaps = sorted_coordinates - np.roll(sorted_coordinates, 1, axis=0)
gaps[0] = 1 - gaps[0]
max_gap = np.argmax(gaps, axis=0)
r = np.arange(self.coordinates.shape[1])
gap_center = (self.coordinates[max_gap, r] + self.coordinates[max_gap - 1, r] + (max_gap == 0)) / 2
return self.copy(coordinates=(self.coordinates - gap_center[None, :]) % 1)
def ws_packed(self) -> Cell:
"""
Generates a new cell where all points are replaced by their periodic images
closest to the origin (i.e. appear inside Wigner-Seitz cell).
Returns
-------
A new cell with packed coordinates.
"""
result = self.normalized()
cartesian = result.cartesian
vertices = result.vertices
d = cartesian[:, None, :] - vertices[None, :, :]
d = (d ** 2).sum(axis=-1)
d = np.argmin(d, axis=-1)
return self.cartesian_copy(cartesian=cartesian - vertices[d, :])
@input_as_list
def isolated(self, gaps: list, units="crystal") -> Cell:
"""
Isolates points from their images in this cell or grid by elongating basis
vectors while keeping distances between the points fixed.
Parameters
----------
gaps : list
The elongation amount in cartesian or in crystal units.
units : str
Units of `gaps`: 'cartesian' or 'crystal'.
Returns
-------
A bigger cell where points are spatially isolated from their images.
"""
gaps = _gaps2x(self, gaps, units)
vectors = self.vectors * gaps[..., None]
coordinates = self.coordinates / gaps[None, ...]
coordinates += (0.5 * (gaps - 1) / gaps)[None, ...]
return self.copy(vectors=vectors, coordinates=coordinates)
def isolated2(self, gap: float) -> Cell:
"""
Isolates points from their images in this cell by constructing a new
larger orthorhombic cell.
Parameters
----------
gap : float
The minimal gap size between the cloud of points and its periodic images.
Returns
-------
An orthorhombic unit cell with the points.
"""
c = self.normalized()
cartesian = c.cartesian + gap
shape = np.amax(c.vertices, axis=0) + 2 * gap
return self.cartesian_copy(vectors=Basis.orthorhombic(shape), cartesian=cartesian)
@input_as_list
def select(self, piece: list) -> ndarray:
"""
Selects points in this cell inside a box defined in the crystal basis.
Images are not included.
Parameters
----------
piece : list
Box dimensions ``[x_from, y_from, ..., z_from, x_to, y_to, ..., z_to]``,
where x, y, z are basis vectors.
Returns
-------
A numpy array with the selection mask.
Examples
--------
>>> cell = Cell(Basis((1, 2, 3), kind="orthorhombic"), np.random.rand((4, 3)), np.arange(4))
>>> cell.select((0,0,0,1,1,1)) # select all species with coordinates within (0,1) range
>>> cell.select(0,0,0,1,1,1) # a simplified version of above
>>> cell.select(0,0,0,0.5,1,1) # select the 'left' part
>>> cell.select(0.5,0,0,1,1,1) # select the 'right' part
"""
p1, p2 = _piece2bounds(piece, len(self.vectors))
return np.all(self.coordinates < p2[None, :], axis=1) & np.all(self.coordinates >= p1[None, :], axis=1)
@input_as_list
def apply(self, selection: list) -> Cell:
"""
Applies a mask to this cell to keep a subset of points.
Parameters
----------
selection
A bool mask with selected species.
Returns
-------
The resulting cell.
Examples
--------
>>> cell = Cell(Basis((1, 2, 3), kind="orthorhombic"), np.random.rand((4, 3)), np.arange(4))
>>> selection = cell.select((0,0,0,0.5,1,1)) # Selects species in the 'left' part of the unit cell.
>>> result = cell.apply(selection) # Applies selection. Species outside the 'left' part are discarded.
"""
selection = np.asanyarray(selection)
return self.copy(coordinates=self.coordinates[selection, :], values=self.values[selection, ...])
@input_as_list
def discard(self, selection: list) -> Cell:
"""
Discards points from this cell according to the mask specified.
Complements ``self.apply``.
Parameters
----------
selection
Points to discard.
Returns
-------
The resulting cell.
Examples
--------
>>> cell = Cell(Basis.orthorhombic((1, 2, 3)), np.random.rand((4, 3)), np.arange(4))
>>> selection = cell.select((0,0,0,0.5,1,1)) # Selects species in the 'left' part of the unit cell.
>>> result = cell.discard(selection) # Discards selection. Species inside the 'left' part are removed.
"""
return self.apply(~np.asanyarray(selection))
@input_as_list
def cut(self, piece: list, selection: Union[ndarray, list, tuple] = None) -> Cell:
"""
Selects a box inside this cell grid and returns it in a smaller cell.
Basis vectors of the resulting instance are collinear to those of `self`.
Parameters
----------
piece
Box dimensions ``[x_from, y_from, ..., z_from, x_to, y_to, ..., z_to]``,
where x, y, z are basis vectors.
selection
A custom selection mask or None if all points in the selected box
have to be included.
Returns
-------
A smaller instance with a subset of points.
"""
if selection is None:
selection = self.select(piece)
p1, p2 = _piece2bounds(piece, len(self.vectors))
vectors = self.vectors * (p2 - p1)[:, None]
return self.cartesian_copy(vectors=vectors, cartesian=self.cartesian - p1 @ self.vectors).apply(selection)
@input_as_list
def merge(self, cells: list) -> Cell:
"""
Merges points from several unit cells with the same basis.
Parameters
----------
cells : list
Cells to be merged.
Returns
-------
A new unit cell with all points merged.
"""
c = [self.coordinates]
v = [self.values]
for cell in cells:
if not np.all(cell.vectors == self.vectors):
raise ValueError(f'basis mismatch: {self.vectors} != {cell.vectors}')
c.append(cell.coordinates)
v.append(cell.values)
return self.copy(coordinates=np.concatenate(c, axis=0), values=np.concatenate(v, axis=0))
def stack(self, *cells: list, vector: int, **kwargs) -> Cell:
"""
Stack multiple cells along the provided vector.
Parameters
----------
cells : list
Cells and bases to stack.
vector : int
Basis vector to stack along.
kwargs
Other arguments to ``Basis.stack``.
Returns
-------
The resulting cells stacked.
"""
cells = (self, *cells)
other_vectors = list(range(self.vectors.shape[0]))
del other_vectors[vector]
dims = self.vectors.shape[0]
basis = Basis.stack(*cells, vector=vector, **kwargs)
values = np.concatenate(tuple(cell.values for cell in cells if isinstance(cell, Cell)), axis=0)
cartesian = []
shift = np.zeros(dims, dtype=float)
for c in cells:
if isinstance(c, Cell):
# Fix for not-exactly-the-same vectors
hvecs = c.vectors.copy()
hvecs[other_vectors] = self.vectors[other_vectors]
cartesian.append(Basis(hvecs).transform_to_cartesian(c.coordinates) + shift[None, :])
shift += c.vectors[vector, :]
cartesian = np.concatenate(cartesian, axis=0)
return self.cartesian_copy(vectors=basis, cartesian=cartesian, values=values)
@input_as_list
def supercell(self, vec: list) -> Cell:
"""
Produces a supercell from this cell.
Parameters
----------
vec : ndarray
New vectors expressed in this basis.
Returns
-------
A supercell.
Examples
--------
>>> cell = Cell(Basis.orthorhombic((1, 2, 3)), np.random.rand((4, 3)), np.arange(4))
>>> s_cell = cell.supercell(np.eye(cell.size)) # returns a copy
>>> r_cell = cell.supercell(np.diag((1, 2, 3))) # same as cell.repeated(1, 2, 3)
"""
vec = np.array(vec, dtype=int) # integer-valued supercell vectors
vec_inv = np.linalg.inv(vec)
vec_det = int(abs(np.linalg.det(vec)))
cofactor = (vec_inv * vec_det).astype(int) # integer-valued vector cofactor matrix
gcd = np.array(list(
np.gcd.reduce(i)
for i in cofactor
)) # greatest common divisor of cofactor vectors
axes_steps = vec_det // gcd # diag(axes_steps) are (minimal) supercell "diagonal" vectors
# compose the volume out of divisors of axes_steps
volume = vec_det
recipe = np.ones_like(axes_steps)
for i, step in enumerate(axes_steps):
g = np.gcd(volume, step)
volume //= g
recipe[i] = g
if volume == 1:
break
else:
raise RuntimeError("Failed to compose the equivalent diagonal supercell")
return self.repeated(recipe).cartesian_copy(vectors=vec @ self.vectors).normalized()
def species(self) -> dict:
"""
Counts atomic species.
Returns
-------
A dictionary with unique point values as keys and numbers of their occurrences as values.
"""
answer = {}
for s in self.values:
try:
answer[s] += 1
except KeyError:
answer[s] = 1
return answer
@input_as_list
def transpose_vectors(self, new: list) -> Cell:
"""
Reorders basis vectors without changing cartesian coordinates.
Parameters
----------
new
The new order as a list of integers.
Returns
-------
A new unit cell with reordered vectors.
"""
return self.copy(vectors=super().transpose_vectors(new), coordinates=self.coordinates[:, new])
def rounded(self, decimals: int = 8) -> Cell:
"""
Rounds this cell down to the provided number of decimals.
Parameters
----------
decimals
Decimals.
Returns
-------
A new Cell with rounded vectors.
"""
return self.copy(vectors=super().rounded(decimals), coordinates=np.around(self.coordinates, decimals=decimals))
def joggled(self, joggle_eps: float = 1e-5, vectors=True):
"""
Breaks possible symmetries in this cell by joggling coordinates and
vectors.
Parameters
----------
joggle_eps
The amplitude of random displacements for breaking possible
coordinate symmetries.
vectors
If True, adds a random displacement to vectors as well.
Returns
-------
result
The resulting cell.
"""
joggle_c = joggle_eps * (np.random.rand(*self.coordinates.shape) - 0.5)
if vectors:
joggle_v = self.vectors_len[:, None] * (np.random.rand(*self.vectors.shape) - 0.5) * joggle_eps
else:
joggle_v = 0
return self.copy(
vectors=self.vectors + joggle_v,
coordinates=self.coordinates + joggle_c,
)
def as_grid(self, fill: float = np.nan) -> grid.Grid:
"""
Converts this unit cell into a grid.
Parameters
----------
fill
The value to use for missing grid points.
Returns
-------
A grid with the data from this cell.
"""
# Convert coordinates
coordinates = list(
np.sort(
np.unique(self.coordinates[:, i])
) for i in range(self.coordinates.shape[1])
)
# A coordinates lookup table
coord2index = list(
dict(zip(a, range(a.size))) for a in coordinates
)
# Convert values
data = fill * np.ones(tuple(a.size for a in coordinates) + self.values.shape[1:], dtype=self.values.dtype)
for c, v in zip(self.coordinates, self.values):
indexes = tuple(coord2index[i][cc] for i, cc in enumerate(c))
data[indexes] = v
return grid.Grid(self, coordinates, data)
@input_as_list
def interpolate(self, points: list, driver=None, periodic: bool = True, **kwargs) -> Cell:
"""
Interpolates values between points in this cell and returns the interpolated Cell.
Parameters
----------
points : list
Interpolation points in this basis.
driver : Callable
Interpolation driver.
periodic : bool
If True, interpolates data in periodic boundary conditions.
kwargs
Driver arguments.
Returns
-------
A new unit cell with the interpolated data.
"""
points = np.asanyarray(points, dtype=float)
if driver is None:
driver = qhull_interpolation_driver
if periodic:
# Avoid edge problems by creating copies of this cell
supercell = self.repeated((3,) * self.vectors.shape[0]).normalized()
data_points = supercell.cartesian
data_values = supercell.values
# Shift points to the central unit cell
points_i = self.transform_to_cartesian(points % 1) + self.vectors.sum(axis=0)[None, :]
else:
data_points = self.cartesian
data_values = self.values
points_i = self.transform_to_cartesian(points)
# Interpolate
return self.copy(coordinates=points, values=driver(data_points, data_values, points_i, **kwargs))
def compute_embedding(self, size: int = 1) -> Cell:
"""
Computes embedding of this cell.
Values are replaced by an array of indices enumerating cell points.
`values[:, 0]` points to entries in this cell and `values[:, 1]` enumerates
cell images with 0 being the middle cell embedded.
Parameters
----------
size
Embedding size in unit cells count.
Returns
-------
result
The resulting cell.
"""
sc_size = 2 * size + 1
sc_offset = (sc_size ** self.ndim - 1) // 2
result = self.copy(values=np.empty((self.size, 0))).repeated([sc_size] * self.ndim)
values = np.arange(result.size, dtype=np.int32)
values_hi = values // self.size - sc_offset
values_lo = values % self.size
values = np.concatenate([values_lo[:, None], values_hi[:, None]], axis=1)
return result.copy(values=values)
def compute_triangulation(self, joggle_eps: float = 1e-5):
"""
Computes Delaunay triangulation.
Parameters
----------
joggle_eps
The amplitude of random displacements for breaking possible
coordinate symmetries.
Returns
-------
result
The resulting triangulation embedded in images of this cell.
"""
embedding = self.compute_embedding()
embedding_j = self.joggled(joggle_eps, vectors=True).compute_embedding()
ix_lo = embedding.values[:, 0]
ix_hi = embedding.values[:, 1]
tri = Delaunay(embedding_j.cartesian).simplices
# Take only inside/boundary simplices
tri_hi = ix_hi[tri]
tri_relevant = np.any(tri_hi == 0, axis=1)
tri = tri[tri_relevant, :]
tri_hi = tri_hi[tri_relevant, :]
weights = simplex_volumes(embedding.cartesian[tri, :]) / unique_counts(tri_hi) / self.volume
return Triangulation(
points=embedding.cartesian,
points_i=ix_lo,
simplices=tri,
weights=weights,
)
def tetrahedron_density(self, points: ndarray, resolved: bool = False, weights: ndarray = None,
joggle_eps: float = 1e-5) -> Union[ndarray, tuple]:
"""
Computes the density of points' values (states).
Modified tetrahedron method from PRB 49, 16223 by E. Blochl et al.
3D only.
Parameters
----------
points
Values to calculate density at.
resolved
If True, returns a higher-dimensional tensor with spatially-
and index-resolved density. The dimensions of the returned
array are `self.values.shape + points.shape`.
weights
Assigns weights to points before computing the density.
Only for `resolved=False`.
joggle_eps
The amplitude of random displacements for breaking coordinate
symmetries.
Returns
-------
density
A 1D ``[n_points]`` or a 2D ``[n_tri, n_points]`` density array.
triangulation
For ``resolved=True`` returns triangulation.
"""
assert self.ndim == 3
tri = self.compute_triangulation(joggle_eps=joggle_eps)
points = np.asanyarray(points, dtype=np.float64)
values = self.values.reshape(self.size, -1)
result = tri.compute_band_density(values, points, weights=weights, resolve_bands=False)
if resolved:
return tri, result
else:
return result.sum(axis=0)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This script allows controlling a P9813 light strip connected to the Raspberry device
It listens to the STDIN stream and expect an array of values to apply [R, G, B, R, G, B ....]
Note 1: You MUST enable SPI communication in your Raspberry, for example by executing the 'raspi-config' utility
Note 2: You can easily adapted to any other light strip
'''
from __future__ import division
from signal import *
# Import other modules
import time
import sys
import math
import json
import atexit
import sys
# Import the P9813 module.
import RPi.GPIO as GPIO
import P9813
# LED strip configuration:
LED_COUNT = int(sys.argv[1])
CLOCK_PIN = int(sys.argv[4])
DATA_PIN = int(sys.argv[5])
# Init GPI Pins
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(CLOCK_PIN, GPIO.OUT)
GPIO.setup(DATA_PIN, GPIO.OUT)
strip = P9813.P9813(CLOCK_PIN, DATA_PIN, LED_COUNT)
def listen():
global strip
try:
while True:
for line in iter(sys.stdin.readline, ''):
colors = json.loads(line)
for j in range(LED_COUNT):
strip[j] = (int(colors[j][0]), int(colors[j][2]), int(colors[j][1]))
strip.write()
time.sleep(0.002)
except KeyboardInterrupt:
pass
@atexit.register
def clear():
global strip
strip.fill((255,0,0))
strip.write()
if __name__ == '__main__':
for sig in (SIGABRT, SIGINT, SIGTERM):
signal(sig, clear)
try:
clear()
listen()
except:
pass
|
#!/us/bin/python
impot sys
if len(sys.agv)>=4:
filetype = sys.agv[1]
sub_filename = sys.agv[2]
output_filename = sys.agv[3]
else:
pint("Remove ovelapping/complementay tails fom long eads, keep a single full pass of the inset sequence")
pint("usage: ./RemoveBothTails.py filetype sub_file output_sub_filename")
pint("o python RemoveBothTails.py filetype sub_file output_sub_filename")
sys.exit(1)
################################################################################
def pocessls(d):
esult_d = {}
i=0
fo angex in d:
ls = angex.split('_')
L = int(ls[1]) -int(ls[0])
if i==0:
ef_L = L
ef_seq = d[angex]
ef_ange = angex
i+=1
continue
if L >= ef_L :
esult_d[angex]=d[angex]
elif L < ef_L:
esult_d[ef_ange]=ef_seq
ef_L =L
ef_seq = d[angex]
ef_ange = angex
etun esult_d
################################################################################
if (filetype == "fa" ):
stat_cha = ">"
else:
stat_cha = "@"
sub_file = open(sub_filename,'')
output = open(output_filename,'w')
intact_MS = open(output_filename+"_intact_MS",'w')
TF = 0
sub_dt ={}
while (Tue):
line = sub_file.eadline()
if (line == ""):
beak
if (line[0] == stat_cha):
ls = (">" + line[1:]).stip().split('/')
if ls[-1] == "ccs":
output.wite(">" + line[1:])
TF = 1
else:
sub_name = '/'.join(ls[0:-1])+ '/'
if not sub_dt.has_key(sub_name):
sub_dt[sub_name] = {}
sub_dt[sub_name][ls[-1]] = ""
TF = 0
continue
if TF:
output.wite(line)
else:
sub_dt[sub_name][ls[-1]] = sub_dt[sub_name][ls[-1]] + line.stip()
if (filetype == "fq"):
line = sub_file.eadline() # skip quality lines
if (line[0] != "+"):
pint "E in LR fastq file fomat"
exit(1)
line = sub_file.eadline()
sub_file.close()
fo sub_name in sub_dt:
if len(sub_dt[sub_name])>2:
intact_MS.wite( sub_name[1:] + '\t' + st(len(sub_dt[sub_name])) + '\n')
if len(sub_dt[sub_name])==2:
sub_dt[sub_name] = pocessls(sub_dt[sub_name])
elif len(sub_dt[sub_name])>2:
sub_dt[sub_name] = pocessls(sub_dt[sub_name])
sub_dt[sub_name] = pocessls(sub_dt[sub_name])
fo angex in sub_dt[sub_name]:
output.wite(sub_name+angex+'\n')
output.wite(sub_dt[sub_name][angex]+'\n')
output.close()
intact_MS.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pokemongo_bot.constants import Constants
from pokemongo_bot.step_walker import StepWalker
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.base_task import BaseTask
from utils import distance, format_dist, fort_details
class MoveToFort(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.lure_distance = 0
self.lure_attraction = self.config.get("lure_attraction", True)
self.lure_max_distance = self.config.get("lure_max_distance", 2000)
self.ignore_item_count = self.config.get("ignore_item_count", False)
def should_run(self):
has_space_for_loot = self.bot.has_space_for_loot()
if not has_space_for_loot:
self.emit_event(
'inventory_full',
formatted="Inventory is full. You might want to change your config to recycle more items if this message appears consistently."
)
return has_space_for_loot or self.ignore_item_count or self.bot.softban
def is_attracted(self):
return (self.lure_distance > 0)
def work(self):
if not self.should_run():
return WorkerResult.SUCCESS
nearest_fort = self.get_nearest_fort()
if nearest_fort is None:
return WorkerResult.SUCCESS
lat = nearest_fort['latitude']
lng = nearest_fort['longitude']
fortID = nearest_fort['id']
details = fort_details(self.bot, fortID, lat, lng)
fort_name = details.get('name', 'Unknown')
unit = self.bot.config.distance_unit # Unit to use when printing formatted distance
dist = distance(
self.bot.position[0],
self.bot.position[1],
lat,
lng
)
if dist > Constants.MAX_DISTANCE_FORT_IS_REACHABLE:
fort_event_data = {
'fort_name': u"{}".format(fort_name),
'distance': format_dist(dist, unit),
}
if self.is_attracted() > 0:
fort_event_data.update(lure_distance=format_dist(self.lure_distance, unit))
self.emit_event(
'moving_to_lured_fort',
formatted="Moving towards pokestop {fort_name} - {distance} (attraction of lure {lure_distance})",
data=fort_event_data
)
else:
self.emit_event(
'moving_to_fort',
formatted="Moving towards pokestop {fort_name} - {distance}",
data=fort_event_data
)
step_walker = StepWalker(
self.bot,
self.bot.config.walk,
lat,
lng
)
if not step_walker.step():
return WorkerResult.RUNNING
self.emit_event(
'arrived_at_fort',
formatted='Arrived at fort.'
)
return WorkerResult.SUCCESS
def _get_nearest_fort_on_lure_way(self, forts):
if not self.lure_attraction:
return None, 0
lures = filter(lambda x: True if x.get('lure_info', None) != None else False, forts)
if (len(lures)):
dist_lure_me = distance(self.bot.position[0], self.bot.position[1],
lures[0]['latitude'],lures[0]['longitude'])
else:
dist_lure_me = 0
if dist_lure_me > 0 and dist_lure_me < self.lure_max_distance:
self.lure_distance = dist_lure_me
for fort in forts:
dist_lure_fort = distance(
fort['latitude'],
fort['longitude'],
lures[0]['latitude'],
lures[0]['longitude'])
dist_fort_me = distance(
fort['latitude'],
fort['longitude'],
self.bot.position[0],
self.bot.position[1])
if dist_lure_fort < dist_lure_me and dist_lure_me > dist_fort_me:
return fort, dist_lure_me
if dist_fort_me > dist_lure_me:
break
return lures[0], dist_lure_me
else:
return None, 0
def get_nearest_fort(self):
forts = self.bot.get_forts(order_by_distance=True)
# Remove stops that are still on timeout
forts = filter(lambda x: x["id"] not in self.bot.fort_timeouts, forts)
next_attracted_pts, lure_distance = self._get_nearest_fort_on_lure_way(forts)
# Remove all forts which were spun in the last ticks to avoid circles if set
if self.bot.config.forts_avoid_circles:
forts = filter(lambda x: x["id"] not in self.bot.recent_forts, forts)
self.lure_distance = lure_distance
if (lure_distance > 0):
return next_attracted_pts
if len(forts) > 0:
return forts[0]
else:
return None
|
from pathlib import Path
import sys
import tokens
def bytes_debug_explicit(b) :
# variable passed was a single int : convert it to byte
if isinstance(b, int):
b = b.to_bytes(1, 'big')
result = ""
count = 0
for b_element in b:
count+=1
result += format(b_element,'08b') + " "
if (count % 8 == 0): result += "\n"
return result.rstrip("\n")
def bytes_debug_hex(b, groupby=1) :
# variable passed was a single int : convert it to byte
if isinstance(b, int):
b = b.to_bytes(1, 'big')
result = ""
count = 0
for b_element in b:
count+=1
result += '{:02x}'.format(b_element)
if (count % groupby == 0):
result += " "
return result.rstrip("\n")
def main() :
if(len(sys.argv) < 2):
print("Syntax: TI83toTXT.py (filename)")
exit(1)
print("Parsing", sys.argv[1])
data = Path(sys.argv[1]).read_bytes()
checkFixedHeader(data)
def checkFixedHeader(data):
# First 11 bytes should read "**TI83F*"
spec_b = data[0:11]
if (spec_b == bytes.fromhex("2a2a54493833462a1a0a00")) :
print("Header is **TI83F* 0x1a 0x0A 0x00")
# Next 42 bytes are comment
comment_b = data[11:52]
# print(bytes_debug_explicit(comment_b))
# print(bytes_debug_hex(comment_b, groupby=2))
print("Comment is \"", str(comment_b.decode('utf-8')), "\"", sep="")
if __name__ == "__main__": main()
|
from django.db import models
from djgeojson.fields import PointField
# Create your models here.
class Generalenquiries(models.Model):
contact_person= models.CharField(max_length=200, null=True, blank=True)
contact_email = models.EmailField(max_length=200, null=True, blank=True)
contact_telephone = models.CharField(max_length=200, null=True, blank=True)
contact_fax = models.CharField(max_length=200, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "General Enquiries"
def __str__(self):
return self.contact_person + ' | ' + str (self.contact_email) + ' | ' + str (self.contact_telephone) + ' | ' + str (self.contact_fax)
class Physicaladdress(models.Model):
physical_address= models.TextField(max_length=2000, null=True, blank=True)
class Meta:
verbose_name_plural = "Physical Address"
def __str__(self):
return self.physical_address + ' | '
class Postaladdress(models.Model):
postal_address= models.TextField(max_length=2000, null=True, blank=True)
class Meta:
verbose_name_plural = "Postal Address"
def __str__(self):
return self.postal_address + ' | '
class Place(models.Model): # model for sacema locations
name = models.CharField(max_length=200)
location = PointField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Sacema Locations"
def __str__(self):
return self.name
|
import sys, re, urllib.request, xmltv, xml.etree.ElementTree as ET
from datetime import datetime, date, time, timedelta
BASE_URL = "https://appletv.redbull.tv/products/tv"
request = urllib.request.Request(BASE_URL, headers={"Accept" : "application/xml"})
response = urllib.request.urlopen(request)
xml = ET.parse(response)
items = xml.findall('.//twoLineMenuItem')
w = xmltv.Writer()
w.addChannel({'display-name': [(u'Red Bull TV', u'en')],'id': u'hls.redbulltv'})
for i, element in enumerate(items):
programme = {"channel":'hls.redbulltv', "title":[], "sub-title":[], "desc":[], "start":'', "stop":''}
label = u''
if element.find('.//label') is not None:
label = element.find('.//label').text
label2 = ''
if element.find('.//label2') is not None:
label2 = element.find('.//label2').text
start = ''
if element.find('.//rightLabel') is not None:
start = element.find('.//rightLabel').text
if start is not None:
start = datetime.utcfromtimestamp(float(start))
else:
start = datetime.utcnow()#.strftime('%Y%m%d%H%M%S%z')
# Set end has start to calculate duration from
end = ''
if i < (len(items) - 1):
#End this program at the start of the next program
end = items[i + 1].find('.//rightLabel').text
if end is not None:
end = datetime.utcfromtimestamp(float(end))
else:
end = datetime.utcnow()
else:
#This is the last program, so use duraction to calculate end
if element.find('.//footnote') is not None:
end = element.find('.//footnote').text
if end is not None:
duration = end.replace('Duration: ', '')
duration = duration.split(',')
totalDuration = start
for i in duration:
if 'hour' in i:
totalDuration = totalDuration + timedelta(hours=int(re.sub(r' hours?', "",i)))
elif 'minute' in i:
totalDuration = totalDuration + timedelta(minutes=int(re.sub(r' minutes?', "", i)))
elif 'second' in i:
totalDuration = totalDuration + timedelta(seconds=int(re.sub(r' seconds?', "", i)))
end = totalDuration
summary = ''
if element.find('.//summary') is not None:
summary = element.find('.//summary').text
programme["title"] = [(label, u'')]
programme["sub-title"] = [(label2, u'')]
programme["desc"] = [(summary, u'')]
programme["start"] = start.strftime('%Y%m%d%H%M%S%z')
programme["stop"] = end.strftime('%Y%m%d%H%M%S%z')
# print(programme)
w.addProgramme(programme)
w.write('redbull.xml', pretty_print=True)
|
import pstats
p=pstats.Stats("profile.stats")
p.sort_stats("cumulative")
#p.print_stats()
#p.print_callers()
p.print_callees()
|
#!/usr/bin/env python3
"""
Bitmine Courier
"""
import requests
import os
import random
import json
import time
url = os.environ.get('URL')
mine_count = int(os.environ.get('MINE_COUNT'))
def main():
for i in range(mine_count):
headers = {"Ce-Id": f"{i}",
"Ce-Specversion": "1.0",
"Ce-Type": f"{random_type()}",
"Ce-Source": "bitmine",
"Content-Type": "application/json",
}
req = requests.post(url, data=bitmine_data_json(), headers=headers)
print(req.text)
time.sleep(1)
def random_type():
return random.choice(["quarkers", "noders"])
def random_bitmine_type():
return random.choice(["Audamandium",
"Aethez",
"Karbonadium",
"Uruu",
"Vybranium",
"Transformyum",
"Dhiotimoline",
"Cryptonite",
"Katschin",
"Enerqon"])
def random_bitmine_weight():
return round(random.uniform(0, 10), 2)
def bitmine_data_json():
data = {}
data["type"] = random_bitmine_type()
data["weight"] = random_bitmine_weight()
return json.dumps(data)
if __name__ == "__main__":
main()
|
'''conditional logit and nested conditional logit
nested conditional logit is supposed to be the random utility version
(RU2 and maybe RU1)
References:
-----------
currently based on:
Greene, Econometric Analysis, 5th edition and draft (?)
Hess, Florian, 2002, Structural Choice analysis with nested logit models,
The Stats Journal 2(3) pp 227-252
not yet used:
Silberhorn Nadja, Yasemin Boztug, Lutz Hildebrandt, 2008, Estimation with the
nested logit model: specifications and software particularities,
OR Spectrum
Koppelman, Frank S., and Chandra Bhat with technical support from Vaneet Sethi,
Sriram Subramanian, Vincent Bernardin and Jian Zhang, 2006,
A Self Instructing Course in Mode Choice Modeling: Multinomial and
Nested Logit Models
Author: josef-pktd
License: BSD (simplified)
'''
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
import numpy.lib.recfunctions as recf
from scipy import optimize
class TryCLogit(object):
'''
Conditional Logit, data handling test
Parameters
----------
endog : array (nobs,nchoices)
dummy encoding of realized choices
exog_bychoices : list of arrays
explanatory variables, one array of exog for each choice. Variables
with common coefficients have to be first in each array
ncommon : int
number of explanatory variables with common coefficients
Notes
-----
Utility for choice j is given by
$V_j = X_j * beta + Z * gamma_j$
where X_j contains generic variables (terminology Hess) that have the same
coefficient across choices, and Z are variables, like individual-specific
variables that have different coefficients across variables.
If there are choice specific constants, then they should be contained in Z.
For identification, the constant of one choice should be dropped.
'''
def __init__(self, endog, exog_bychoices, ncommon):
self.endog = endog
self.exog_bychoices = exog_bychoices
self.ncommon = ncommon
self.nobs, self.nchoices = endog.shape
self.nchoices = len(exog_bychoices)
#TODO: rename beta to params and include inclusive values for nested CL
betaind = [exog_bychoices[ii].shape[1]-ncommon for ii in range(4)]
zi = np.r_[[ncommon], ncommon + np.array(betaind).cumsum()]
beta_indices = [np.r_[np.array([0, 1]),z[zi[ii]:zi[ii+1]]]
for ii in range(len(zi)-1)]
self.beta_indices = beta_indices
#for testing only
beta = np.arange(7)
betaidx_bychoices = [beta[idx] for idx in beta_indices]
def xbetas(self, params):
'''these are the V_i
'''
res = np.empty((self.nobs, self.nchoices))
for choiceind in range(self.nchoices):
res[:,choiceind] = np.dot(self.exog_bychoices[choiceind],
params[self.beta_indices[choiceind]])
return res
def loglike(self, params):
#normalization ?
xb = self.xbetas(params)
expxb = np.exp(xb)
sumexpxb = expxb.sum(1)#[:,None]
probs = expxb/expxb.sum(1)[:,None] #we don't really need this for all
loglike = (self.endog * np.log(probs)).sum(1)
#is this the same: YES
#self.logliketest = (self.endog * xb).sum(1) - np.log(sumexpxb)
#if self.endog where index then xb[self.endog]
return -loglike.sum() #return sum for now not for each observation
def fit(self, start_params=None):
if start_params is None:
start_params = np.zeros(6) # need better np.zeros(6)
return optimize.fmin(self.loglike, start_params, maxfun=10000)
class TryNCLogit(object):
'''
Nested Conditional Logit (RUNMNL), data handling test
unfinished, doesn't do anything yet
'''
def __init__(self, endog, exog_bychoices, ncommon):
self.endog = endog
self.exog_bychoices = exog_bychoices
self.ncommon = ncommon
self.nobs, self.nchoices = endog.shape
self.nchoices = len(exog_bychoices)
#TODO rename beta to params and include inclusive values for nested CL
betaind = [exog_bychoices[ii].shape[1]-ncommon for ii in range(4)]
zi = np.r_[[ncommon], ncommon + np.array(betaind).cumsum()]
beta_indices = [np.r_[np.array([0, 1]),z[zi[ii]:zi[ii+1]]]
for ii in range(len(zi)-1)]
self.beta_indices = beta_indices
#for testing only
beta = np.arange(7)
betaidx_bychoices = [beta[idx] for idx in beta_indices]
def xbetas(self, params):
'''these are the V_i
'''
res = np.empty((self.nobs, self.nchoices))
for choiceind in range(self.nchoices):
res[:,choiceind] = np.dot(self.exog_bychoices[choiceind],
params[self.beta_indices[choiceind]])
return res
def loglike_leafbranch(self, params, tau):
#normalization ?
#check/change naming for tau
xb = self.xbetas(params)
expxb = np.exp(xb/tau)
sumexpxb = expxb.sum(1)#[:,None]
logsumexpxb = np.log(sumexpxb)
#loglike = (self.endog * xb).sum(1) - logsumexpxb
probs = expxb/sumexpxb[:,None]
return probs, logsumexpxp
#if self.endog where index then xb[self.endog]
#return -loglike.sum() #return sum for now not for each observation
def loglike_branch(self, params, tau):
#not yet sure how to keep track of branches during walking of tree
ivs = []
for b in branches:
probs, iv = self.loglike_leafbranch(params, tau)
ivs.append(iv)
#ivs = np.array(ivs) #note ivs is (nobs,nbranchchoices)
ivs = np.column_stack(ivs) # this way ?
exptiv = np.exp(tau*ivs)
sumexptiv = exptiv.sum(1)
logsumexpxb = np.log(sumexpxb)
probs = exptiv/sumexptiv[:,None]
####### obsolete version to try out attaching data,
####### new in treewalkerclass.py, copy new version to replace this
####### problem with bzr I will disconnect history when copying
testxb = 0 #global to class
class RU2NMNL(object):
'''Nested Multinomial Logit with Random Utility 2 parameterization
'''
def __init__(self, endog, exog, tree, paramsind):
self.endog = endog
self.datadict = exog
self.tree = tree
self.paramsind = paramsind
self.branchsum = ''
self.probs = {}
def calc_prob(self, tree, keys=None):
'''walking a tree bottom-up based on dictionary
'''
endog = self.endog
datadict = self.datadict
paramsind = self.paramsind
branchsum = self.branchsum
if isinstance(tree, tuple): #assumes leaves are int for choice index
name, subtree = tree
print(name, datadict[name])
print('subtree', subtree)
keys = []
if testxb:
branchsum = datadict[name]
else:
branchsum = name #0
for b in subtree:
print(b)
#branchsum += branch2(b)
branchsum = branchsum + self.calc_prob(b, keys)
print('branchsum', branchsum, keys)
for k in keys:
self.probs[k] = self.probs[k] + ['*' + name + '-prob']
else:
keys.append(tree)
self.probs[tree] = [tree + '-prob' +
'(%s)' % ', '.join(self.paramsind[tree])]
if testxb:
leavessum = sum((datadict[bi] for bi in tree))
print('final branch with', tree, ''.join(tree), leavessum) #sum(tree)
return leavessum #sum(xb[tree])
else:
return ''.join(tree) #sum(tree)
print('working on branch', tree, branchsum)
return branchsum
#Trying out ways to handle data
#------------------------------
#travel data from Greene
dta = np.genfromtxt('TableF23-2.txt', skip_header=1,
names='Mode Ttme Invc Invt GC Hinc PSize'.split())
endog = dta['Mode'].reshape(-1,4).copy() #I don't want a view
nobs, nchoices = endog.shape
datafloat = dta.view(float).reshape(-1,7)
exog = datafloat[:,1:].reshape(-1,6*nchoices).copy() #I don't want a view
print(endog.sum(0))
varnames = dta.dtype.names
print(varnames[1:])
modes = ['Air', 'Train', 'Bus', 'Car']
print(exog.mean(0).reshape(nchoices, -1)) # Greene Table 23.23
#try dummy encoding for individual-specific variables
exog_choice_names = ['GC', 'Ttme']
exog_choice = np.column_stack([dta[name] for name in exog_choice_names])
exog_choice = exog_choice.reshape(-1,len(exog_choice_names)*nchoices)
exog_choice = np.c_[endog, exog_choice] # add constant dummy
exog_individual = dta['Hinc'][:,None]
#exog2 = np.c_[exog_choice, exog_individual*endog]
# we can also overwrite and select in original datafloat
# e.g. Hinc*endog{choice)
choice_index = np.arange(dta.shape[0]) % nchoices
hinca = dta['Hinc']*(choice_index==0)
dta2=recf.append_fields(dta, ['Hinca'],[hinca], usemask=False)
#another version
xi = []
for ii in range(4):
xi.append(datafloat[choice_index==ii])
#one more
dta1 = recf.append_fields(dta, ['Const'],[np.ones(dta.shape[0])], usemask=False)
xivar = [['GC', 'Ttme', 'Const', 'Hinc'],
['GC', 'Ttme', 'Const'],
['GC', 'Ttme', 'Const'],
['GC', 'Ttme']] #need to drop one constant
xi = []
for ii in range(4):
xi.append(dta1[xivar[ii]][choice_index==ii])
#this doesn't change sequence of columns, bug report by Skipper I think
ncommon = 2
betaind = [len(xi[ii].dtype.names)-ncommon for ii in range(4)]
zi=np.r_[[ncommon], ncommon+np.array(betaind).cumsum()]
z=np.arange(7) #what is n?
betaindices = [np.r_[np.array([0, 1]),z[zi[ii]:zi[ii+1]]]
for ii in range(len(zi)-1)]
beta = np.arange(7)
betai = [beta[idx] for idx in betaindices]
#examples for TryCLogit
#----------------------
#get exogs as float
xifloat = [xx.view(float).reshape(nobs,-1) for xx in xi]
clogit = TryCLogit(endog, xifloat, 2)
debug = 0
if debug:
res = optimize.fmin(clogit.loglike, np.ones(6))
#estimated parameters from Greene:
tab2324 = [-0.15501, -0.09612, 0.01329, 5.2074, 3.8690, 3.1632]
if debug:
res2 = optimize.fmin(clogit.loglike, tab2324)
res3 = optimize.fmin(clogit.loglike, np.zeros(6),maxfun=10000)
#this has same numbers as Greene table 23.24, but different sequence
#coefficient on GC is exactly 10% of Greene's
#TODO: get better starting values
'''
Optimization terminated successfully.
Current function value: 199.128369
Iterations: 957
Function evaluations: 1456
array([-0.0961246 , -0.0155019 , 0.01328757, 5.20741244, 3.86905293,
3.16319074])
'''
res3corr = res3[[1, 0, 2, 3, 4, 5]]
res3corr[0] *= 10
print(res3corr - tab2324) # diff 1e-5 to 1e-6
#199.128369 - 199.1284 #llf same up to print(precision of Greene
print(clogit.fit())
tree0 = ('top',
[('Fly',['Air']),
('Ground', ['Train', 'Car', 'Bus'])
])
datadict = dict(zip(['Air', 'Train', 'Bus', 'Car'],
[xifloat[i]for i in range(4)]))
#for testing only (mock that returns it's own name
datadict = dict(zip(['Air', 'Train', 'Bus', 'Car'],
['Airdata', 'Traindata', 'Busdata', 'Cardata']))
datadict.update({'top' : [],
'Fly' : [],
'Ground': []})
paramsind = {'top' : [],
'Fly' : [],
'Ground': [],
'Air' : ['GC', 'Ttme', 'ConstA', 'Hinc'],
'Train' : ['GC', 'Ttme', 'ConstT'],
'Bus' : ['GC', 'Ttme', 'ConstB'],
'Car' : ['GC', 'Ttme']
}
modru = RU2NMNL(endog, datadict, tree0, paramsind)
print(modru.calc_prob(modru.tree))
print('\nmodru.probs')
print(modru.probs)
|
# -*- coding: utf-8 -*-
"""
@file
@brief Module *code_beatrix*.
.. faqref::
:title: Pourquoi Python?
`Python <https://www.python.org/>`_
est un langage de programmation très répandu aujourd'hui
qui fut choisi à l'`ENSAE <http://www.ensae.fr/ensae/fr/>`_ en
2005 pour remplacer le `C++ <https://fr.wikipedia.org/wiki/C%2B%2B>`_.
Dès la première année, il est apparu que ce nouveau langage permettait
aux étudiants de mettre leurs idées plus rapidement en forme.
Les opinions ont commencé alors un peu à changer à propos de la programmation.
Il est très rare maintenant qu'un étudiant quitte une grande école
d'ingénieurs sans programmer.
Il a été choisi pour trois raisons. La première est sa syntaxe
car il oblige les dévelopeurs à aligner leurs instructions
ce qui rend les programmes plus lisibles.
La seconde parce que sa `grammaire <https://docs.python.org/3/reference/grammar.html>`_
est une des plus courte (voir aussi
`The Python Language Reference <https://docs.python.org/3/reference/>`_).
Enfin, beaucoup de librairies existantes mais codées en C++ étaient déjà
disponibles à l'époque. 10 ans plus tard, le langage est quasi incontournable
dès qu'on touche au traitement de données.
"""
import os
__version__ = "0.6.674"
__author__ = "Xavier Dupré"
__github__ = "https://github.com/sdpython/code_beatrix"
__url__ = "http://www.xavierdupre.fr/app/code_beatrix/helpsphinx/"
__license__ = "MIT License"
__blog__ = os.path.abspath(
os.path.join(os.path.dirname(__file__), "rss_blog_list.xml"))
def _setup_hook(add_print=False, unit_test=False):
"""
if this function is added to the module,
the help automation and unit tests call it first before
anything goes on as an initialization step.
It should be run in a separate process.
@param add_print print *Success: _setup_hook*
@param unit_test used only for unit testing purpose
"""
# we can check many things, needed module
# any others things before unit tests are started
if add_print:
print("Success: _setup_hook")
def check(log=False, kind=None, fLOG=None):
"""
Checks the library is working.
It raises an exception.
@param log if True, display information, otherwise
@param kind None or ``'scratch'`` or ``'video'``
@param fLOG logging function
@return 0 or exception
"""
r = True
if kind is None or kind == "scratch":
from .scratchs import check as check_sc
r &= check_sc()
if kind is None or kind == "video":
from .art.video import check as check_vid
r &= check_vid(fLOG=fLOG)
return r
def load_ipython_extension(ip):
"""
to allow the call ``%load_ext code_beatrix``
@param ip from ``get_ipython()``
"""
from .ipythonhelper.magic_scratch import register_scratch_magics
register_scratch_magics(ip)
|
#
# Copyright 2018 National Renewable Energy Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .iohandler import Output
from .mbdyn_types import Vec3
from .mbdyn_types import StructuralNode
from .mbdyn_types import Body
from .mbdyn_types import Beam3
import numpy as np
class BaseComponent():
"""
BaseComponent is the super class from which all non-rotor components are created.
It handles parsing the component dictionary and instantiating all subcomponents
like Beam and StructuralNode. Finally, export routines for MBDyn input files
are included here.
"""
def __init__(self, component_name, model_dict, interpolator, mbdyn_ref_index, primary_axis):
"""
inputs:
component_name: str - a unique identifier for this component
model_dict: dict - a description of this component as parsed from the input file;
it should contain the following key-values:
{
"keypoint": Vec3,
"element_end_nodes": [Vec3],
"twist": [float],
"component": [str],
"point_mass": [float],
"stiffness_matrix": [[float]],
"mass_distribution": [[float]],
"cm_offsets": [[float]],
"inertias": [[float]]
}
interpolator: func - the function to use when interpolating values between node locations;
this allows subclasses to determine on which axis to do interpolations
mbdyn_ref_index: int - a value to use when identifying this component in mbdyn
primary_axis: str - string representation of this component's primary axis; [x1 | x2 | x3]
outputs:
self: BaseComponent - an instantiated BaseComponent object
"""
# inputs
self.component_name = component_name
self.mbdyn_ref_index = mbdyn_ref_index # starting index for nodes, beams
self.primary_axis = primary_axis
# unpack the component dictionary
self.mip = model_dict["keypoint"]
self.coordinate_list = []
for n in model_dict["element_end_nodes"]:
if self.primary_axis == "x1":
node = Vec3(n, 0.0, 0.0)
elif self.primary_axis == "x2":
node = Vec3(0.0, n, 0.0)
elif self.primary_axis == "x3":
node = Vec3(0.0, 0.0, n)
node += self.mip
self.coordinate_list.append(node)
self.twist = model_dict["twist"]
self.component = model_dict["component"]
self.point_mass = model_dict["point_mass"]
self.stiffness_constant = model_dict["stiffness_constant"]
self.stiffness_matrix = model_dict["stiffness_matrix"]
self.mass_distribution = model_dict["mass_distribution"]
self.cm_offset = model_dict["cm_offset"]
self.inertias = model_dict["inertias"]
self.interpolator = interpolator
# sort coordinates and associated quantities in ascending order
zipped = list(zip(self.coordinate_list,
self.twist,
self.component,
self.point_mass,
self.stiffness_matrix,
self.mass_distribution,
self.cm_offset,
self.inertias
)
)
if self.primary_axis == "x1":
zipped.sort(key=lambda element: element[0].x1)
elif self.primary_axis == "x2":
zipped.sort(key=lambda element: element[0].x2)
elif self.primary_axis == "x3":
zipped.sort(key=lambda element: element[0].x3)
unzipped = list(zip(*zipped))
# self.coordinate_list,
# self.twist,
# self.component,
# self.point_mass,
# self.stiffness_matrix,
# self.mass_distribution,
# self.cm_offset,
# self.inertias = unzipped
self.coordinate_list = unzipped[0]
self.twist = unzipped[1]
self.component = unzipped[2]
self.point_mass = unzipped[3]
self.stiffness_matrix = unzipped[4]
self.mass_distribution = unzipped[5]
self.cm_offset = unzipped[6]
self.inertias = unzipped[7]
self._preprocess()
self._postprocess()
# Store a local orientation with respect to the global reference frame
# so that the beam can be defined appropriately
# TODO: Make this permanent
if self.primary_axis == "x1":
self.local_orientation = "1, 1.0, 0.0, 0.0, 3, 0.0, 1.0, 0.0"
elif self.primary_axis == "x2":
self.local_orientation = "1, 0.0, 1.0, 0.0, 3, 0.0, 0.0, 1.0"
elif self.primary_axis == "x3":
self.local_orientation = "1, 0.0, 0.0, 1.0, 2, -1.0, 0.0, 0.0"
def _preprocess(self):
self.nodes, self.node_count = self._preprocess_nodes()
self.bodies, self.body_count, self.beams, self.beam_count = self._preprocess_bodies_beams()
def _postprocess(self):
"""
Calculates the mass and center of mass.
component_mass: the total mass of this physical component only
added_mass: the total mass of any additional point masses
total_mass: the sum of component and added masses
The component's mip is subtracted from the center of mass
so the resulting CG is relative to the component.
"""
# sum the point masses to get total mass
self.component_mass = sum([body.mass for body in self.bodies])
self.added_mass = sum([body.added_mass for body in self.bodies])
self.total_mass = self.component_mass + self.added_mass
# place the point masses on the nodes and give the added nodes 0 point mass
self.nodal_point_masses = [self.point_mass[0]]
for i in range(1, len(self.point_mass)):
self.nodal_point_masses.append(0.0)
self.nodal_point_masses.append(self.point_mass[i])
# calculate the total center of mass
cg = Vec3(0.0, 0.0, 0.0)
for i, beam in enumerate(self.beams):
body_index = i * 4
body1 = self.bodies[body_index]
body2_1 = self.bodies[body_index + 1]
body2_2 = self.bodies[body_index + 2]
body3 = self.bodies[body_index + 3]
cg += Vec3(
body1.mass * beam.node_first.position.x1 + (body2_1.mass + body2_2.mass) * beam.node_mid.position.x1 + body3.mass * beam.node_last.position.x1,
body1.mass * beam.node_first.position.x2 + (body2_1.mass + body2_2.mass) * beam.node_mid.position.x2 + body3.mass * beam.node_last.position.x2,
body1.mass * beam.node_first.position.x3 + (body2_1.mass + body2_2.mass) * beam.node_mid.position.x3 + body3.mass * beam.node_last.position.x3
)
cg /= self.component_mass
self.center_of_gravity = cg - self.mip
def _preprocess_nodes(self):
"""
Builds the StructuralNodes for this component.
The central nodes are added here by linearly interpolating the position.
output:
nodes: [StructuralNode]
node_count: len(nodes)
"""
# add the midpoint nodes for the elements
total_coords = [self.coordinate_list[0]]
for i in range(1, len(self.coordinate_list)):
prev = self.coordinate_list[i - 1]
this = self.coordinate_list[i]
total_coords.append((this + prev) / 2)
total_coords.append(this)
nodes = np.array([])
for i, c in enumerate(total_coords):
nodes = np.append(
nodes,
StructuralNode(
parent_component=self.component_name,
root_offset_index=i,
position=c
)
)
return nodes, nodes.shape[0]
def _preprocess_bodies_beams(self):
"""
Builds the Body and Beam3 for this component.
The central node properties are computed here by linearly interpolating
all properties.
"""
# add mid points for the masses
total_masses = [self.mass_distribution[0]]
for i in range(1, len(self.mass_distribution)):
prev = self.mass_distribution[i - 1]
this = self.mass_distribution[i]
mid = (this + prev) / 2
total_masses.append(mid)
total_masses.append(this)
# populate the added mass array...
# this does not require interpolation
# but it does need to be the correct length
total_added_mass = [self.point_mass[0]]
for i in range(1, len(self.point_mass)):
total_added_mass.append(0)
total_added_mass.append(self.point_mass[i])
# add mid points for the cm_offsets
total_cm_offsets = [self.cm_offset[0]]
for i in range(1, len(self.cm_offset)):
prev = self.cm_offset[i - 1]
this = self.cm_offset[i]
mid = [
(this[0] + prev[0]) / 2,
(this[1] + prev[1]) / 2
]
total_cm_offsets.append(mid)
total_cm_offsets.append(this)
# add mid points for the inertias
total_inertias = [self.inertias[0]]
for i in range(1, len(self.inertias)):
prev = self.inertias[i - 1]
this = self.inertias[i]
mid = [sum(x) / 2 for x in zip(prev, this)]
total_inertias.append(mid)
total_inertias.append(this)
beams = []
beam_count = self.node_count // 2
bodies = []
body_count = beam_count * 4
for i in range(beam_count):
# these are the physical nodes
node1 = self.nodes[i * 2]
node2 = self.nodes[i * 2 + 1]
node3 = self.nodes[i * 2 + 2]
mass1 = total_masses[i * 2]
mass2 = total_masses[i * 2 + 1]
mass3 = total_masses[i * 2 + 2]
addedmass1 = total_added_mass[i * 2]
addedmass2 = total_added_mass[i * 2 + 1]
addedmass3 = total_added_mass[i * 2 + 2]
inertia1 = total_inertias[i * 2]
inertia2 = total_inertias[i * 2 + 1]
inertia3 = total_inertias[i * 2 + 2]
cm_offset1 = total_cm_offsets[i * 2]
cm_offset2 = total_cm_offsets[i * 2 + 1]
cm_offset3 = total_cm_offsets[i * 2 + 2]
# these are at the gauss nodes
twist1 = self.twist[i]
twist2 = self.twist[i + 1]
stiff1 = self.stiffness_matrix[i]
stiff2 = self.stiffness_matrix[i + 1]
beam_length = np.sqrt(
np.power(node1.position.x1 - node3.position.x1, 2)
+ np.power(node1.position.x2 - node3.position.x2, 2)
+ np.power(node1.position.x3 - node3.position.x3, 2)
)
unit_vector = Vec3(
node3.position.x1 - node1.position.x1,
node3.position.x2 - node1.position.x2,
node3.position.x3 - node1.position.x3) / beam_length
# gauss point locations
gauss_first_displacement = (beam_length / 2.0) * (1 - 1 / np.sqrt(3))
gauss_last_displacement = (beam_length / 2.0) * (1 + 1 / np.sqrt(3))
gauss_first_point = (node1.position + gauss_first_displacement) * unit_vector
gauss_last_point = (node1.position + gauss_last_displacement) * unit_vector
# guass point twist
gauss_first_twist = self.interpolator(
node1.position,
node3.position,
twist1,
twist2,
gauss_first_point
)
gauss_last_twist = self.interpolator(
node1.position,
node3.position,
twist1,
twist2,
gauss_last_point
)
# guass point stiffness
gauss_first_stiffness, gauss_last_stiffness = [], []
for j in range(21):
gauss_first_stiffness.append(
self.interpolator(
node1.position,
node3.position,
stiff1[j],
stiff2[j],
gauss_first_point
)
)
gauss_last_stiffness.append(
self.interpolator(
node1.position,
node3.position,
stiff1[j],
stiff2[j],
gauss_last_point
)
)
# lumped mass and inertia calculation
if self.primary_axis == "x1":
# global - local beam
# x (x1) = x (x1)
# y (x2) = y (x2)
# z (x3) = z (x3)
m1, cmx1, cmy1, cmz1, \
ixx1, iyy1, izz1, ixy1, ixz1, iyz1, \
m2_1, cmx2_1, cmy2_1, cmz2_1, \
ixx2_1, iyy2_1, izz2_1, ixy2_1, ixz2_1, iyz2_1 = self.mass_inertia_distribution(
node1.position.x1,
cm_offset1[0], cm_offset1[1],
node2.position.x1,
cm_offset2[0], cm_offset2[1],
mass1, mass2,
inertia1[0], inertia1[1], inertia1[2],
inertia1[3], inertia1[4], inertia1[5],
inertia2[0], inertia2[1], inertia2[2],
inertia2[3], inertia2[4], inertia2[5]
)
m2_2, cmx2_2, cmy2_2, cmz2_2, \
ixx2_2, iyy2_2, izz2_2, ixy2_2, ixz2_2, iyz2_2, \
m3, cmx3, cmy3, cmz3, ixx3, iyy3, izz3, ixy3, ixz3, iyz3 = self.mass_inertia_distribution(
node2.position.x1,
cm_offset2[0], cm_offset2[1],
node3.position.x1,
cm_offset3[0], cm_offset3[1],
mass2, mass3,
inertia2[0], inertia2[1], inertia2[2],
inertia2[3], inertia2[4], inertia2[5],
inertia3[0], inertia3[1], inertia3[2],
inertia3[3], inertia3[4], inertia3[5]
)
cmx1 -= node1.position.x1
cmx2_1 -= node2.position.x1
cmx2_2 -= node2.position.x1
cmx3 -= node3.position.x1
elif self.primary_axis == "x2":
# permutate the indeces to pass the global coordinate into the beams reference frame
# global - local beam
# y (x2) = x (x1)
# z (x3) = y (x2)
# x (x1) = z (x3)
#
# in place of these => pass these
# x y z => y z x
# xy xz yz => yz xy xz
m1, cmy1, cmz1, cmx1, \
iyy1, izz1, ixx1, iyz1, ixy1, ixz1, \
m2_1, cmy2_1, cmz2_1, cmx2_1, \
iyy2_1, izz2_1, ixx2_1, iyz2_1, ixy2_1, ixz2_1 = self.mass_inertia_distribution(
node1.position.x2,
cm_offset1[1], cm_offset1[0],
node2.position.x2,
cm_offset2[1], cm_offset2[0],
mass1, mass2,
inertia1[1], inertia1[2], inertia1[0],
inertia1[5], inertia1[3], inertia1[4],
inertia2[1], inertia2[2], inertia2[0],
inertia2[5], inertia2[3], inertia2[4]
)
m2_2, cmy2_2, cmz2_2, cmx2_2, \
iyy2_2, izz2_2, ixx2_2, iyz2_2, ixy2_2, ixz2_2, \
m3, cmy3, cmz3, cmx3, \
iyy3, izz3, ixx3, iyz3, ixy3, ixz3 = self.mass_inertia_distribution(
node2.position.x2,
cm_offset2[1], cm_offset2[0],
node3.position.x2,
cm_offset3[1], cm_offset3[0],
mass2, mass3,
inertia2[1], inertia2[2], inertia2[0],
inertia2[5], inertia2[3], inertia2[4],
inertia3[1], inertia3[2], inertia3[0],
inertia3[5], inertia3[3], inertia3[4]
)
cmy1 -= node1.position.x2
cmy2_1 -= node2.position.x2
cmy2_2 -= node2.position.x2
cmy3 -= node3.position.x2
elif self.primary_axis == "x3":
# permutate the indeces to pass the global coordinate into the beams reference frame
# global - local beam
# z (x3) = x (x1)
# x (x1) = y (x2)
# y (x2) = z (x3)
#
# in place of these => pass these
# x y z => z x y
# xy xz yz => xz yz xy
m1, cmz1, cmx1, cmy1, \
izz1, ixx1, iyy1, ixz1, iyz1, ixy1, \
m2_1, cmz2_1, cmx2_1, cmy2_1, \
izz2_1, ixx2_1, iyy2_1, ixz2_1, iyz2_1, ixy2_1 = self.mass_inertia_distribution(
node1.position.x3,
cm_offset1[0], cm_offset1[1],
node2.position.x3,
cm_offset2[0], cm_offset2[1],
mass1, mass2,
inertia1[2], inertia1[0], inertia1[1],
inertia1[4], inertia1[5], inertia1[3],
inertia2[2], inertia2[0], inertia2[1],
inertia2[4], inertia2[5], inertia2[3]
)
m2_2, cmz2_2, cmx2_2, cmy2_2, \
izz2_2, ixx2_2, iyy2_2, ixz2_2, iyz2_2, ixy2_2, \
m3, cmz3, cmx3, cmy3, \
izz3, ixx3, iyy3, ixz3, iyz3, ixy3 = self.mass_inertia_distribution(
node2.position.x3,
cm_offset2[0], cm_offset2[1],
node3.position.x3,
cm_offset3[0], cm_offset3[1],
mass2, mass3,
inertia2[2], inertia2[0], inertia2[1],
inertia2[4], inertia2[5], inertia2[3],
inertia3[2], inertia3[0], inertia3[1],
inertia3[4], inertia3[5], inertia3[3]
)
cmz1 -= node1.position.x3
cmz2_1 -= node2.position.x3
cmz2_2 -= node2.position.x3
cmz3 -= node3.position.x3
# create the Bodies and Beam3
id_base = self.component_name + "_beam + "
index = 10 * i
body1 = Body(
id_base + str(index + 0),
node1,
m1,
addedmass1,
Vec3(cmx1, cmy1, cmz1),
ixx1, iyy1, izz1, ixy1, ixz1, iyz1
)
body2_1 = Body(
id_base + str(index + 1),
node2,
m2_1,
addedmass2, # this is always 0
Vec3(cmx2_1, cmy2_1, cmz2_1),
ixx2_1, iyy2_1, izz2_1, ixy2_1, ixz2_1, iyz2_1
)
body2_2 = Body(
id_base + str(index + 2),
node2,
m2_2,
addedmass2, # this is always 0
Vec3(cmx2_2, cmy2_2, cmz2_2),
ixx2_2, iyy2_2, izz2_2, ixy2_2, ixz2_2, iyz2_2
)
body3 = Body(
id_base + str(index + 3),
node3,
m3,
addedmass3,
Vec3(cmx3, cmy3, cmz3),
ixx3, iyy3, izz3, ixy3, ixz3, iyz3
)
bodies += [body1, body2_1, body2_2, body3]
this_beam = Beam3(id_base + str(index),
node1, node2, node3,
body1, body2_1, body2_2, body3,
gauss_first_twist,
gauss_last_twist,
gauss_first_stiffness,
gauss_last_stiffness)
beams.append(this_beam)
return bodies, body_count, beams, beam_count
def mass_inertia_distribution(self,
x1,
yg1, zg1,
x2,
yg2, zg2,
m1, m2,
ixx1, iyy1, izz1,
ixy1, ixz1, iyz1,
ixx2, iyy2, izz2,
ixy2, ixz2, iyz2):
"""
See the preprocessor documentation; in particular, section 0.5.
inputs:
x1: Float - location in the primary axis direction of the first node
yg1: Float - center of mass offset in a secondary axis direction for the first node
zg1: Float - center of mass offset in a secondary axis direction for the first node
x2: Float - location in the primary axis direction of the second node
yg2: Float - center of mass offset in a secondary axis direction for the second node
zg2: Float - center of mass offset in a secondary axis direction for the second node
m1: Float - mass per unit length of the first node
m2: Float - mass per unit length of the second node
ixx1: Float - inertia per unit length of the first node
iyy1: Float - inertia per unit length of the first node
izz1: Float - inertia per unit length of the first node
ixy1: Float - inertia per unit length of the first node
ixz1: Float - inertia per unit length of the first node
iyz1: Float - inertia per unit length of the first node
ixx2: Float - inertia per unit length of the second node
iyy2: Float - inertia per unit length of the second node
izz2: Float - inertia per unit length of the second node
ixy2: Float - inertia per unit length of the second node
ixz2: Float - inertia per unit length of the second node
iyz2: Float - inertia per unit length of the second node
outputs:
M1: Float - lumped mass of the first node
Xg: Float - center of mass in the primary axis direction of the first node
Yg: Float - center of mass in the secondary axis direction of the first node
Zg: Float - center of mass in the secondary axis direction of the first node
Ixx1G: Float - lumped inertia of the first node
Iyy1G: Float - lumped inertia of the first node
Izz1G: Float - lumped inertia of the first node
Ixy1G: Float - lumped inertia of the first node
Ixz1G: Float - lumped inertia of the first node
Iyz1G: Float - lumped inertia of the first node
M2: Float - lumped mass of the second node
Xg: Float - center of mass in the primary axis direction of the second node
Yg: Float - center of mass in the secondary axis direction of the second node
Zg: Float - center of mass in the secondary axis direction of the second node
Ixx2G: Float - lumped inertia of the second node
Iyy2G: Float - lumped inertia of the second node
Izz2G: Float - lumped inertia of the second node
Ixy2G: Float - lumped inertia of the second node
Ixz2G: Float - lumped inertia of the second node
Iyz2G: Float - lumped inertia of the second node
"""
Lb = x2 - x1
M = (m1 + m2) / 2.0 * Lb
XgM = (Lb / 6.0) * ((2 * m2 + m1) * Lb + 3 * x1 * (m1 + m2))
YgM = (Lb / 6.0) * ((2 * m1 + m2) * yg1 + (2 * m2 + m1) * yg2)
ZgM = (Lb / 6.0) * ((2 * m1 + m2) * zg1 + (2 * m2 + m1) * zg2)
M1 = (3 * m1 + m2) / 8 * Lb
M2 = M - M1
Ixx = (Lb / 12.0) \
* (
6 * (ixx1 + ixx2)
+ m1 * (3 * yg1**2 + 2 * yg1 * yg2 + yg2**2 +
3 * zg1**2 + 2 * zg1 * zg2 + zg2**2)
+ m2 * (yg1**2 + 2 * yg1 * yg2 + 3 * yg2**2 +
zg1**2 + 2 * zg1 * zg2 + 3 * zg2**2)
)
Iyy = (Lb / 12.0) \
* (
6 * (iyy1 + iyy2)
+ m1 * (Lb**2 + 4 * Lb * x1 + 6 * x1**2 +
3 * zg1**2 + 2 * zg1 * zg2 + zg2**2)
+ m2 * (3 * Lb**2 + 8 * Lb * x1 + 6 * x1**2 +
zg1**2 + 2 * zg1 * zg2 + 3 * zg2**2)
)
Izz = (Lb / 12.0) \
* (
6 * (izz1 + izz2)
+ m1 * (Lb**2 + 4 * Lb * x1 + 6 * x1**2 +
3 * yg1**2 + 2 * yg1 * yg2 + yg2**2)
+ m2 * (3 * Lb**2 + 8 * Lb * x1 + 6 * x1**2 +
yg1**2 + 2 * yg1 * yg2 + 3 * yg2**2)
)
Ixy = (Lb / 12.0) \
* (
6 * (ixy1 + ixy2)
+ m1 * (Lb * (yg1 + yg2) + 2 * x1 * (2 * yg1 + yg2))
+ m2 * (Lb * (yg1 + 3 * yg2) + 2 * x1 * (yg1 + 2 * yg2))
)
Ixz = (Lb / 12.0) \
* (
6 * (ixz1 + ixz2)
+ m1 * (Lb * (zg1 + zg2) + 2 * x1 * (2 * zg1 + zg2))
+ m2 * (Lb * (zg1 + 3 * zg2) + 2 * x1 * (zg1 + 2 * zg2))
)
Iyz = (Lb / 12.0) \
* (
6 * (iyz1 + iyz2)
+ m1 * (yg2 * (zg1 + zg2) + yg1 * (3 * zg1 + zg2))
+ m2 * (yg1 * (zg1 + zg2) + yg2 * (zg1 + 3 * zg2))
)
Ixx1 = (Lb / 192.0) \
* (
24 * (3 * ixx1 + ixx2)
+ m1 * (45 * yg1**2 + 22 * yg1 * yg2 + 5 * yg2**2 +
45 * zg1**2 + 22 * zg1 * zg2 + 5 * zg2**2)
+ m2 * (11 * yg1**2 + 10 * yg1 * yg2 + 3 * yg2**2 +
11 * zg1**2 + 10 * zg1 * zg2 + 3 * zg2**2)
)
Iyy1 = (Lb / 192.0) \
* (
24 * (3 * iyy1 + iyy2)
+ m1 * (5 * Lb**2 + 32 * Lb * x1 + 72 * x1**2 +
45 * zg1**2 + 22 * zg1 * zg2 + 5 * zg2**2)
+ m2 * (3 * Lb**2 + 16 * Lb * x1 + 24 * x1**2 +
11 * zg1**2 + 10 * zg1 * zg2 + 3 * zg2**2)
)
Izz1 = (Lb / 192.0) \
* (
24 * (3 * izz1 + izz2)
+ m1 * (5 * Lb**2 + 32 * Lb * x1 + 72 * x1**2 +
45 * yg1**2 + 22 * yg1 * yg2 + 5 * yg2**2)
+ m2 * (3 * Lb**2 + 16 * Lb * x1 + 24 * x1**2 +
11 * yg1**2 + 10 * yg1 * yg2 + 3 * yg2**2)
)
Ixy1 = (Lb / 192.0) \
* (
24 * (3 * ixy1 + ixy2)
+ m1 * (11 * Lb * yg1 + 56 * x1 * yg1 +
5 * Lb * yg2 + 16 * x1 * yg2)
+ m2 * (5 * Lb * yg1 + 16 * x1 * yg1 + 3 * Lb * yg2 + 8 * x1 * yg2)
)
Ixz1 = (Lb / 192.0) \
* (
24 * (3 * ixz1 + ixz2)
+ m1 * (11 * Lb * zg1 + 56 * x1 * zg1 +
5 * Lb * zg2 + 16 * x1 * zg2)
+ m2 * (5 * Lb * zg1 + 16 * x1 * zg1 + 3 * Lb * zg2 + 8 * x1 * zg2)
)
Iyz1 = (Lb / 192.0) \
* (
24 * (3 * iyz1 + iyz2)
+ m1 * (45 * yg1 * zg1 + 11 * yg2 * zg1 +
11 * yg1 * zg2 + 5 * yg2 * zg2)
+ m2 * (11 * yg1 * zg1 + 5 * yg2 * zg1 +
5 * yg1 * zg2 + 3 * yg2 * zg2)
)
Xg = XgM / M
Yg = YgM / M
Zg = ZgM / M
Xg_star = x1 + (Lb / 3.0) * (2 * m1 + m2) / (3 * m1 + m2)
Yg_star = ((7 * m1 + 2 * m2) * yg1 + (2 * m1 + m2) * yg2) / (9 * m1 + 3 * m2)
Zg_star = ((7 * m1 + 2 * m2) * zg1 + (2 * m1 + m2) * zg2) / (9 * m1 + 3 * m2)
R = -1 * np.array([Xg, Yg, Zg])
R_star = -1 * np.array([Xg_star, Yg_star, Zg_star])
R_prime = np.array(
[
Xg - Xg_star,
Yg - Yg_star,
Zg - Zg_star
]
)
Ixx1G = Ixx1 + M1 \
* (
np.sum(R_prime**2) - np.sum(R_star**2)
- (R_prime[0] * R_prime[0] - R_star[0] * R_star[0])
)
Iyy1G = Iyy1 + M1 \
* (
np.sum(R_prime**2) - np.sum(R_star**2)
- (R_prime[1] * R_prime[1] - R_star[1] * R_star[1])
)
Izz1G = Izz1 + M1 \
* (
np.sum(R_prime**2) - np.sum(R_star**2)
- (R_prime[2] * R_prime[2] - R_star[2] * R_star[2])
)
Ixy1G = Ixy1 + M1 \
* (
R_prime[0] * R_prime[1] - R_star[0] * R_star[1]
)
Ixz1G = Ixz1 + M1 \
* (
R_prime[0] * R_prime[2] - R_star[0] * R_star[2]
)
Iyz1G = Iyz1 + M1 \
* (
R_prime[1] * R_prime[2] - R_star[1] * R_star[2]
)
Ixx2G = Ixx - Ixx1G - M * (np.sum(R**2) - R[0] * R[0])
Iyy2G = Iyy - Iyy1G - M * (np.sum(R**2) - R[1] * R[1])
Izz2G = Izz - Izz1G - M * (np.sum(R**2) - R[2] * R[2])
Ixy2G = Ixy - Ixy1G - M * (R[0] * R[1])
Ixz2G = Ixz - Ixz1G - M * (R[0] * R[2])
Iyz2G = Iyz - Iyz1G - M * (R[1] * R[2])
return M1, \
Xg, Yg, Zg, \
Ixx1G, Iyy1G, Izz1G, \
Ixy1G, Ixz1G, Iyz1G, \
M2, \
Xg, Yg, Zg, \
Ixx2G, Iyy2G, Izz2G, \
Ixy2G, Ixz2G, Iyz2G
### export routines ###
def export_all(self, output_directory):
self.export_node_file(output_directory)
self.export_body_file(output_directory)
self.export_beam_file(output_directory)
self.export_element_file(output_directory)
def export_node_file(self, output_directory):
output = Output("{}/{}.nodes".format(output_directory, self.component_name))
output.write_line(
"# Structural nodes for the {}".format(self.component_name))
output.write_empty_line()
output.write_line("# *** nodes ***")
for n in self.nodes:
output.write_line(str(n))
output.write_empty_line()
output.end()
def export_element_file(self, output_directory):
output = Output("{}/{}.structural".format(output_directory, self.component_name))
output.write_line("# Beam elements for the {}".format(self.component_name))
output.write_empty_line()
output.write_line("set: integer {}_beam = {};".format(self.component_name, self.mbdyn_ref_index))
output.write_line("set: integer {}_body = {};".format(self.component_name, self.mbdyn_ref_index))
output.write_empty_line()
output.write_line("# *** beam elements ***")
for beam in self.beams:
output.write_line(str(beam))
output.write_line(
"include: \"{}.beam\";".format(self.component_name))
output.write_empty_line()
output.end()
def export_body_file(self, output_directory):
output = Output("{}/{}.body".format(output_directory, self.component_name))
output.write_line("# Bodies for the {}".format(self.component_name))
output.write_empty_line()
for body in self.bodies:
output.write_line(str(body))
output.write_empty_line()
output.end()
def export_beam_file(self, output_directory):
output = Output("{}/{}.beam".format(output_directory, self.component_name))
output.write_line("# Generic beam element properties for the beams")
output.write_empty_line()
output.write_line("# *** elastic properties ***")
output.write_line("beam3: current_beam,")
output.write_line(" beam_node1, reference, node, null,")
output.write_line(" beam_node2, reference, node, null,")
output.write_line(" beam_node3, reference, node, null,")
output.write_line(" reference, mip_rf, {},".format(self.local_orientation))
output.write_line(" linear time variant viscoelastic generic, sym,")
output.write_line(" k1_11, k1_12, k1_13, k1_14, k1_15, k1_16,")
output.write_line(" k1_22, k1_23, k1_24, k1_25, k1_26,")
output.write_line(" k1_33, k1_34, k1_35, k1_36,")
output.write_line(" k1_44, k1_45, k1_46,")
output.write_line(" k1_55, k1_56,")
output.write_line(" k1_66,")
output.write_line(" const, 1.0,")
output.write_line(" proportional, {},".format(self.stiffness_constant))
output.write_line(" ramp, 1.0, 0.0, 1.0, 0.0,")
output.write_line(" reference, mip_rf, {},".format(self.local_orientation))
output.write_line(" linear time variant viscoelastic generic, sym,")
output.write_line(" k2_11, k2_12, k2_13, k2_14, k2_15, k2_16,")
output.write_line(" k2_22, k2_23, k2_24, k2_25, k2_26,")
output.write_line(" k2_33, k2_34, k2_35, k2_36,")
output.write_line(" k2_44, k2_45, k2_46,")
output.write_line(" k2_55, k2_56,")
output.write_line(" k2_66,")
output.write_line(" const, 1.0,")
output.write_line(" proportional, {},".format(self.stiffness_constant))
output.write_line(" ramp, 1.0, 0.0, 1.0, 0.0;")
output.end()
|
import unittest
from robotide.controller.basecontroller import WithNamespace
from robotide.namespace.namespace import Namespace
from robotide.preferences.settings import Settings
class TestWithNamespace(unittest.TestCase):
def test_get_all_cached_library_names(self):
with_namespace = WithNamespace()
with_namespace._set_namespace(namespace=self._create_namespace())
print with_namespace.get_all_cached_library_names()
def _create_namespace(self):
settings = lambda:0
settings.get = lambda k, d: d
settings.add_change_listener = lambda *args:0
namespace = Namespace(settings=settings)
return namespace
if __name__ == '__main__':
unittest.main()
|
import unittest
from pybox.containers.pqueue import PQueue
class PQueueTest(unittest.TestCase):
def setUp(self):
self.q1 = PQueue()
self.q2 = PQueue([('baz', 3), ('foo', 1)])
def tearDown(self):
pass
def test_init(self):
self.assertEqual(len(self.q1), 0)
self.assertEqual(len(self.q2), 2)
def test_clear(self):
self.q1.clear()
self.q2.clear()
self.assertEqual(len(self.q1), 0)
self.assertEqual(len(self.q2), 0)
def test_enqueue(self):
self.q1.enqueue(('bar', 2))
self.q2.enqueue(('bar', 2))
self.assertEqual(len(self.q1), 1)
self.assertEqual(len(self.q2), 3)
def test_enqueueException(self):
with self.assertRaises(Exception) as context:
q3 = PQueue([('bar', 2), 'foo'])
self.assertTrue('Element must be tuple, you entered: foo' in context.exception)
def test_dequeue(self):
q1Value = self.q1.dequeue()
q2Value = self.q2.dequeue()
self.assertEqual(q1Value, None)
self.assertEqual(q2Value, ('foo', 1))
def test_enqueue_dequeue(self):
q3 = PQueue()
q3.enqueue(('bar', 2))
q3.enqueue(('baz', 3))
q3.enqueue(('foo', 1))
q3Value = q3.dequeue()
self.assertEqual(q3Value, ('foo', 1))
self.assertEqual(len(q3), 2)
def test_peek(self):
self.assertEqual(self.q1.peek(), None)
self.assertEqual(self.q2.peek(), ('foo', 1))
def test_isEmpty(self):
self.assertTrue(self.q1.isEmpty())
self.assertFalse(self.q2.isEmpty())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# encoding: utf-8
import torch as th
import torch.nn as nn
default_act = 'relu'
Act_REGISTER = {}
Act_REGISTER[None] = lambda: lambda x: x
Act_REGISTER['relu'] = nn.ReLU
Act_REGISTER['elu'] = nn.ELU
Act_REGISTER['gelu'] = nn.GELU
Act_REGISTER['leakyrelu'] = nn.LeakyReLU
Act_REGISTER['tanh'] = nn.Tanh
Act_REGISTER['softplus'] = nn.Softplus
Act_REGISTER['mish'] = nn.Mish
Act_REGISTER['sigmoid'] = nn.Sigmoid
Act_REGISTER['log_softmax'] = lambda: nn.LogSoftmax(-1)
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
"""
def forward(self, inp: th.Tensor) -> th.Tensor:
return inp * th.sigmoid(inp)
Act_REGISTER['swish'] = Swish
|
# Used by other modules to determine if proxy will be
# used instead of direct connection.
proxy = None
def set_proxy(ip):
global proxy
import re
# I thinks tcp ports are around 65000? What do you think?
m = re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{2,5}$', ip)
if m:
proxy = 'http://' + m.group()
return True
else:
return False # Well just inform the client that nothing has been change.
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.views.generic import FormView
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from guardian.shortcuts import assign_perm
from core.spaces import url_names as urln
from core.spaces.models import Space, Event
from core.spaces.forms import SpaceForm, EventForm
from helpers.cache import get_or_insert_object_in_cache
class AddEvent(FormView):
"""
Returns an empty MeetingForm to create a new Meeting. Space and author
fields are automatically filled with the request data.
:permissions required: admin_space, mod_space
:rtype: HTML Form
:context: form, get_place
"""
form_class = EventForm
template_name = 'spaces/event_form.html'
def dispatch(self, request, *args, **kwargs):
space = get_object_or_404(Space, url=kwargs['space_url'])
if (request.user.has_perm('admin_space', space) or
request.user.has_perm('mod_space', space)):
return super(AddEvent, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
def get_success_url(self):
space = self.kwargs['space_url']
return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})
def form_valid(self, form):
self.space = get_object_or_404(Space, url=self.kwargs['space_url'])
form_uncommited = form.save(commit=False)
form_uncommited.event_author = self.request.user
form_uncommited.space = self.space
form_uncommited.save()
form.save_m2m()
return super(AddEvent, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(AddEvent, self).get_context_data(**kwargs)
place = get_object_or_404(Space, url=self.kwargs['space_url'])
context['get_place'] = place
return context
class ViewEvent(DetailView):
"""
View the content of a event.
:permissions required: view_space
:rtype: Object
:context: event, get_place
"""
context_object_name = 'event'
template_name = 'spaces/event_detail.html'
def dispatch(self, request, *args, **kwargs):
space = get_object_or_404(Space, url=kwargs['space_url'])
if request.user.has_perm('view_space', space):
return super(ViewEvent, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
def get_object(self):
return get_object_or_404(Event, pk=self.kwargs['event_id'])
def get_context_data(self, **kwargs):
context = super(ViewEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space,
url=self.kwargs['space_url'])
return context
class EditEvent(UpdateView):
"""
Returns a MeetingForm filled with the current Meeting data to be edited.
:permissions required: admin_space, admin_event, mod_space, change_event
:rtype: HTML Form
:context: event, get_place
"""
model = Event
template_name = 'spaces/event_form.html'
def dispatch(self, request, *args, **kwargs):
space = get_object_or_404(Space, url=kwargs['space_url'])
event = get_object_or_404(Event, pk=kwargs['event_id'])
if (request.user.has_perm('admin_space', space) or
request.user.has_perm('mod_space', space)):
return super(EditEvent, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
def get_object(self):
cur_event = get_object_or_404(Event, pk=self.kwargs['event_id'])
return cur_event
def get_success_url(self):
space = self.kwargs['space_url']
return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})
def form_valid(self, form):
form_uncommited = form.save(commit=False)
form_uncommited.save()
form.save_m2m()
return super(EditEvent, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(EditEvent, self).get_context_data(**kwargs)
space = get_object_or_404(Space, url=self.kwargs['space_url'])
context['get_place'] = space
return context
class DeleteEvent(DeleteView):
"""
Returns a confirmation page before deleting the Meeting object.
:permissions required: admin_space, mod_space, admin_event, delete_event
:rtype: Confirmation
:context: get_place
"""
def dispatch(self, request, *args, **kwargs):
space = get_object_or_404(Space, url=kwargs['space_url'])
event = get_object_or_404(Event, url=kwargs['event_id'])
if (request.user.has_perm('admin_space', space) or
request.user.has_perm('mod_space', space)):
return super(DeleteEvent, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
def get_object(self):
return get_object_or_404(Event, pk=self.kwargs['event_id'])
def get_success_url(self):
space = self.kwargs['space_url']
return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})
def get_context_data(self, **kwargs):
context = super(DeleteEvent, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space,
url=self.kwargs['space_url'])
return context
class ListEvents(ListView):
"""
List all the events attached to a space.
:permissions required: view_space
:rtype: Object list
:context: event_list, get_place
"""
paginate_by = 25
context_object_name = 'event_list'
def dispatch(self, request, *args, **kwargs):
space = get_object_or_404(Space, url=kwargs['space_url'])
if request.user.has_perm('view_space', space):
return super(ListEvents, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
def get_queryset(self):
place = get_object_or_404(Space, url=self.kwargs['space_url'])
objects = Event.objects.all().filter(space=place.id).order_by('event_date')
return objects
def get_context_data(self, **kwargs):
context = super(ListEvents, self).get_context_data(**kwargs)
context['get_place'] = get_object_or_404(Space,
url=self.kwargs['space_url'])
return context
|
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
def download_url(url, save_path, chunk_size=128):
r = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
class DownloaderBot():
def __init__(self, url):
self.__url = url # Atributo privado que recebe a url contendo a lista de links de PDF para download
print("Downloader object initialezed!")
def set_url(self, value):
self.__url = value
def get_url(self):
return self.__url
def downloaderTceRS(self):
# pdb.set_trace()
cont = 0
driver = webdriver.Chrome()
driver.get("http://dados.tce.rs.gov.br/dados/municipal/recebimentos/2018.html")
page_source = driver.page_source
print(page_source)
soup = BeautifulSoup(page_source, "html.parser")
print(soup)
driver.quit()
x2 = 0
for link in soup.findAll("a"):
print(link.get('href'))
if link.get("href").find("dados.tce.rs.gov.br/dados/municipal/balancete-despesa/2018/") > 0:
if link.get("href").find(".csv") > 0:
save_path = r"C:\Users\schmall\Documents\FGV\Tese\Balanços_RS\dados - despesas - 2018\\" + link.get('href').split('/')[7]
download_url(link.get('href'), save_path, chunk_size=128)
# options = webdriver.ChromeOptions()
#
# preferences = {
# "download.default_directory": r"C:\Users\schmall\Documents\FGV\Tese\Balanços_RS\dados",
# "download.prompt_for_download": False,
# "download.directory_upgrade": True
# }
# options.add_experimental_option("prefs", preferences)
#
# driver3 = webdriver.Chrome(chrome_options=options)
# driver3.get(link.get('href'))
#
#
# time.sleep(5)
#
# url_do_pdf = driver3.current_url # pega a URL do PDF, que é o que queríamos desde o inicio!
#
# print(url_do_pdf)
#
# x1 = 0
# x2 = x2 + 1
# if x2 > 5:
# while x1 == 0:
# count = 0
# li = os.listdir(r"C:\Users\schmall\Documents\FGV\Tese\Balanços_RS\dados")
# for x1 in li:
# if x1.endswith(".crdownload"):
# count = count + 1
# if count == 0:
# x1 = 1
# x2 = 0
# else:
# x1 = 0
# driver3.quit()
#
# # testa botao proximo APÓS baixar todos os PDFs da primeira página!
# pagina_exibe_links_pdf_driver = webdriver.Chrome()
# pagina_exibe_links_pdf_driver.get(self.__url)
#
# try:
# botao_proximo = pagina_exibe_links_pdf_driver.find_element_by_link_text("Próximo →")
# # Necessidade de fazer downloads nas próximas páginas
# botao_proximo.click()
# proxima_pagina = pagina_exibe_links_pdf_driver.current_url
# self.__url = proxima_pagina # Atualiza a página do link dos downloads no objeto!
# pagina_exibe_links_pdf_driver.quit() # Pode fechar o driver, pois já temos a proxima página para reiniciar o loop!
# except NoSuchElementException as exception: # Atingiu a última página da navegação! Acabou o trabalho!
# proxima_pagina_existe = False # Não entrará na próxima iteração do loop!
|
import numpy as np
import pandas as pd
from sklearn import ensemble
import synthimpute as si
def test_rf_quantile():
N = 1000
x = pd.DataFrame({"x1": np.random.randn(N), "x2": np.random.randn(N)})
# Construct example relationship.
y = x.x1 + np.power(x.x2, 3) + np.random.randn(N)
rf = ensemble.RandomForestRegressor(random_state=3)
rf.fit(x, y)
median_preds = si.rf_quantile(rf, x, 0.5)
assert median_preds.size == N
# Test multiple quantiles.
quantiles = np.arange(N) / N
multiple_q_preds = si.rf_quantile(rf, x, quantiles)
assert multiple_q_preds.size == N
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2021 Evgeni Golov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: host_errata_info
version_added: 2.1.0
short_description: Fetch information about Host Errata
description:
- Fetch information about Host Errata
author:
- "Evgeni Golov (@evgeni)"
options:
host:
description:
- Name of the host to fetch errata for.
required: true
type: str
content_view:
description:
- Calculate Applicable Errata based on a particular Content View.
- Required together with I(lifecycle_environment).
- If this is set, I(organization) also needs to be set.
required: false
type: str
lifecycle_environment:
description:
- Calculate Applicable Errata based on a particular Lifecycle Environment.
- Required together with I(content_view).
- If this is set, I(organization) also needs to be set.
required: false
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.infomodulewithoutname
'''
EXAMPLES = '''
- name: "List installable errata for host"
theforeman.foreman.host_errata_info:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
host: "host.example.com"
- name: "List applicable errata for host"
theforeman.foreman.host_errata_info:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
organization: "Default Organization"
host: "host.example.com"
lifecycle_environment: "Library"
content_view: "Default Organization View"
'''
RETURN = '''
host_errata:
description: List of all found errata for the host and their details
returned: success
type: list
elements: dict
'''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import (
ForemanInfoAnsibleModule
)
class ForemanHostErrataInfo(ForemanInfoAnsibleModule):
pass
def main():
module = ForemanHostErrataInfo(
foreman_spec=dict(
name=dict(invisible=True),
host=dict(type='entity', required=True),
content_view=dict(type='entity', scope=['organization']),
lifecycle_environment=dict(type='entity', flat_name='environment_id', scope=['organization']),
),
entity_opts=dict(
resource_type='host_errata',
),
required_together=[
('content_view', 'lifecycle_environment'),
],
required_by={
'content_view': 'organization',
'lifecycle_environment': 'organization',
},
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
|
"""Test logic blocks."""
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from mpf.tests.MpfTestCase import test_config_directory
class TestLogicBlocks(MpfFakeGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/logic_blocks/'
def test_mode_selection_with_counters(self):
self.mock_event("qualify_start_mode1")
self.mock_event("qualify_start_mode2")
self.start_game()
self.start_mode("mode3")
# advance both counters to 2/3
self.post_event("qualify1_count")
self.post_event("qualify1_count")
self.post_event("qualify2_count")
self.post_event("qualify2_count")
# post the final even for both of them
self.machine.switch_controller.process_switch("s_qualify1", 1, True)
self.machine.switch_controller.process_switch("s_qualify2", 1, True)
self.advance_time_and_run()
self.assertEventCalled("qualify_start_mode1")
self.assertEventNotCalled("qualify_start_mode2")
@test_config_directory("tests/machine_files/counters/")
def test_subscription_on_counter_values(self):
self.start_game()
self.start_mode("mode1")
self.assertLightColor("l_chest_matrix_green_2", "black")
self.assertLightColor("l_chest_matrix_green_3", "black")
self.assertLightColor("l_chest_matrix_green_4", "black")
self.assertLightColor("l_chest_matrix_green_5", "black")
self.post_event("count_up")
self.advance_time_and_run(.1)
self.assertLightColor("l_chest_matrix_green_2", "black")
self.assertLightColor("l_chest_matrix_green_3", "black")
self.assertLightColor("l_chest_matrix_green_4", "black")
self.assertLightColor("l_chest_matrix_green_5", "green")
self.post_event("count_up")
self.advance_time_and_run(.1)
self.assertLightColor("l_chest_matrix_green_2", "black")
self.assertLightColor("l_chest_matrix_green_3", "black")
self.assertLightColor("l_chest_matrix_green_4", "green")
self.assertLightColor("l_chest_matrix_green_5", "green")
self.post_event("count_up")
self.advance_time_and_run(.1)
self.assertLightColor("l_chest_matrix_green_2", "black")
self.assertLightColor("l_chest_matrix_green_3", "green")
self.assertLightColor("l_chest_matrix_green_4", "green")
self.assertLightColor("l_chest_matrix_green_5", "green")
self.post_event("count_up")
self.advance_time_and_run(.1)
self.assertLightColor("l_chest_matrix_green_2", "green")
self.assertLightColor("l_chest_matrix_green_3", "green")
self.assertLightColor("l_chest_matrix_green_4", "green")
self.assertLightColor("l_chest_matrix_green_5", "green")
self.drain_all_balls()
self.advance_time_and_run()
self.start_mode("mode1")
self.assertLightColor("l_chest_matrix_green_2", "black")
self.assertLightColor("l_chest_matrix_green_3", "black")
self.assertLightColor("l_chest_matrix_green_4", "black")
self.assertLightColor("l_chest_matrix_green_5", "black")
def test_counter_with_lights(self):
self.start_game()
self.post_event("start_mode2")
self.advance_time_and_run()
self.assertLightColor("led1", "white")
self.assertLightColor("led2", "black")
self.assertLightColor("led3", "black")
# nothing happens because it is disabled
self.post_event("counter_with_lights_count")
self.advance_time_and_run()
self.assertLightColor("led1", "white")
self.assertLightColor("led2", "black")
self.assertLightColor("led3", "black")
# advance
self.post_event("counter_with_lights_enable")
self.post_event("counter_with_lights_count")
self.advance_time_and_run()
self.assertLightColor("led1", "black")
self.assertLightColor("led2", "white")
self.assertLightColor("led3", "black")
# stop mode
self.post_event("stop_mode2")
self.advance_time_and_run()
# all off
self.assertLightColor("led1", "black")
self.assertLightColor("led2", "black")
self.assertLightColor("led3", "black")
# restart mode. should restore state
self.post_event("start_mode2")
self.advance_time_and_run()
self.assertLightColor("led1", "black")
self.assertLightColor("led2", "white")
self.assertLightColor("led3", "black")
# and complete
self.post_event("counter_with_lights_count")
self.advance_time_and_run()
self.assertLightColor("led1", "black")
self.assertLightColor("led2", "black")
self.assertLightColor("led3", "white")
def test_accrual_random_advance(self):
self.start_game()
self.mock_event("accrual1_complete1")
# should do nothing
self.post_event("accrual1_random_advance")
self.assertEqual([False, False, False], self.machine.accruals["accrual1"].value)
# enable accrual
self.post_event("accrual1_enable")
# complete one step
self.post_event("accrual1_step1a")
self.assertEqual([True, False, False], self.machine.accruals["accrual1"].value)
# should advance one of the remaining steps
self.post_event("accrual1_random_advance")
# exactly two steps should be hit
self.assertEqual(2, sum(self.machine.accruals["accrual1"].value))
self.assertEventNotCalled("accrual1_complete1")
# should complete the accrual
self.post_event("accrual1_random_advance")
self.assertEventCalled("accrual1_complete1")
def test_accruals_simple(self):
self.start_game()
self.mock_event("accrual1_complete1")
self.mock_event("accrual1_hit")
self.mock_event("accrual1_complete2")
# accrual should not yet work
self.post_event("accrual1_step1a")
self.post_event("accrual1_step2b")
self.post_event("accrual1_step3c")
self.assertEqual(0, self._events["accrual1_complete1"])
self.assertEqual(0, self._events["accrual1_complete2"])
# enable accrual
self.post_event("accrual1_enable")
# step2
self.post_event("accrual1_step2a")
self.assertEqual(0, self._events["accrual1_complete1"])
self.assertEqual(1, self._events["accrual1_hit"])
# step1
self.post_event("accrual1_step1c")
self.post_event("accrual1_step1b")
self.assertEqual(0, self._events["accrual1_complete1"])
self.assertEqual(2, self._events["accrual1_hit"])
# step 3
self.post_event("accrual1_step3c")
# accrual should fire
self.assertEqual(1, self._events["accrual1_complete1"])
self.assertEqual(1, self._events["accrual1_complete2"])
# should not work again
self.post_event("accrual1_step1a")
self.post_event("accrual1_step2a")
self.post_event("accrual1_step3a")
self.assertEqual(1, self._events["accrual1_complete1"])
self.assertEqual(1, self._events["accrual1_complete2"])
# reset but do not enable yet
self.post_event("accrual1_reset")
# nothing should happen
self.post_event("accrual1_step1a")
self.post_event("accrual1_step2a")
self.post_event("accrual1_step3a")
self.assertEqual(1, self._events["accrual1_complete1"])
self.assertEqual(1, self._events["accrual1_complete2"])
# enable for one step
self.post_event("accrual1_enable")
self.post_event("accrual1_step1a")
# disable for next
self.post_event("accrual1_disable")
self.post_event("accrual1_step2a")
# enable for third step
self.post_event("accrual1_enable")
self.post_event("accrual1_step3a")
# should not complete yet
self.assertEqual(1, self._events["accrual1_complete1"])
self.assertEqual(1, self._events["accrual1_complete2"])
self.post_event("accrual1_step2a")
# but now
self.assertEqual(2, self._events["accrual1_complete1"])
self.assertEqual(2, self._events["accrual1_complete2"])
def test_counter_simple_down(self):
self.start_game()
self.mock_event("logicblock_counter1_complete")
self.mock_event("logicblock_counter1_hit")
self.post_event("counter1_enable")
for i in range(4):
self.post_event("counter1_count")
self.assertEqual(0, self._events["logicblock_counter1_complete"])
# nothing should happen when disabled
self.post_event("counter1_disable")
for i in range(10):
self.post_event("counter1_count")
self.assertEqual(0, self._events["logicblock_counter1_complete"])
self.post_event("counter1_enable")
self.post_event("counter1_count")
self.assertEqual(1, self._events["logicblock_counter1_complete"])
self.assertEqual(5, self._events["logicblock_counter1_hit"])
# it should disable
self.post_event("counter1_count")
self.assertEqual(1, self._events["logicblock_counter1_complete"])
self.assertEqual(5, self._events["logicblock_counter1_hit"])
self.post_event("counter1_restart")
for i in range(4):
self.post_event("counter1_count")
# 4 more hits but not completed
self.assertEqual(1, self._events["logicblock_counter1_complete"])
self.assertEqual(9, self._events["logicblock_counter1_hit"])
# reset
self.post_event("counter1_reset")
for i in range(4):
self.post_event("counter1_count")
# another 4 hits still not complete
self.assertEqual(1, self._events["logicblock_counter1_complete"])
self.assertEqual(13, self._events["logicblock_counter1_hit"])
# and complete again
self.post_event("counter1_count")
self.assertEqual(2, self._events["logicblock_counter1_complete"])
self.assertEqual(14, self._events["logicblock_counter1_hit"])
def test_sequence_simple(self):
self.start_game()
self.mock_event("sequence1_complete")
self.mock_event("logicblock_sequence1_hit")
self.post_event("sequence1_enable")
# wrong order
self.post_event("sequence1_step3a")
self.post_event("sequence1_step2a")
self.post_event("sequence1_step1b")
self.assertEqual(0, self._events["sequence1_complete"])
self.assertEqual(1, self._events["logicblock_sequence1_hit"])
# still not
self.post_event("sequence1_step3b")
self.post_event("sequence1_step1a")
self.assertEqual(0, self._events["sequence1_complete"])
self.assertEqual(1, self._events["logicblock_sequence1_hit"])
# only 1 so far. now step2
self.post_event("sequence1_step2a")
self.assertEqual(0, self._events["sequence1_complete"])
self.assertEqual(2, self._events["logicblock_sequence1_hit"])
# and step 3
self.post_event("sequence1_step3b")
self.assertEqual(1, self._events["sequence1_complete"])
self.assertEqual(3, self._events["logicblock_sequence1_hit"])
# should be disabled
self.post_event("sequence1_step1a")
self.post_event("sequence1_step2a")
self.post_event("sequence1_step3a")
self.assertEqual(1, self._events["sequence1_complete"])
# enable and reset
self.post_event("sequence1_enable")
self.post_event("sequence1_reset")
# reset inbetween
self.post_event("sequence1_step1a")
self.post_event("sequence1_step2a")
self.post_event("sequence1_reset")
self.post_event("sequence1_step3a")
# nothing
self.assertEqual(1, self._events["sequence1_complete"])
# again
self.post_event("sequence1_step1a")
self.assertEqual(1, self._events["sequence1_complete"])
self.post_event("sequence1_step2a")
self.assertEqual(1, self._events["sequence1_complete"])
self.post_event("sequence1_step3a")
self.assertEqual(2, self._events["sequence1_complete"])
def test_counter_in_mode(self):
self.start_game()
self.mock_event("counter2_complete")
self.mock_event("counter2_hit")
for i in range(10):
self.post_event("counter2_count")
self.assertEqual(0, self._events["counter2_complete"])
self.post_event("start_mode1")
self.assertTrue("mode1" in self.machine.modes)
for i in range(2):
self.post_event("counter2_count")
self.assertEqual(i + 1, self._events["counter2_hit"])
self.assertEqual(0, self._events["counter2_complete"])
self.assertEventCalledWith("counter2_hit", count=i + 1, remaining=2 - i, hits=i+1)
self.post_event("counter2_count")
self.assertEqual(1, self._events["counter2_complete"])
self.assertEventCalledWith("counter2_hit", count=3, hits=3, remaining=0)
# should run again
for i in range(2):
self.post_event("counter2_count")
self.assertEqual(i + 4, self._events["counter2_hit"])
self.assertEqual(1, self._events["counter2_complete"])
self.post_event("counter2_count")
self.assertEqual(2, self._events["counter2_complete"])
# stop mode
self.post_event("stop_mode1")
# nothing should happen any more
for i in range(10):
self.post_event("counter2_count")
self.assertEqual(2, self._events["counter2_complete"])
self.assertEqual(6, self._events["counter2_hit"])
def test_counter_control_events(self):
'''
Tests the add, subtract, and set_value control events
for the Counter class.
'''
def reset_event_mocks():
# Reset mocks
self.mock_event("counter6_complete")
self.mock_event("counter6_hit")
self.mock_event("counter7_complete")
self.mock_event("counter7_hit")
self.mock_event("logicblock_counter6_updated")
self.mock_event("logicblock_counter7_updated")
self.start_game()
reset_event_mocks()
# Start mode with control events and counter6
self.post_event("start_mode4")
self.assertTrue("mode4" in self.machine.modes)
# Adds zero to the counter 10 times, counter should not reach completion
for i in range(10):
self.post_event("increase_counter6_0")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(0, self._events["counter6_complete"])
reset_event_mocks()
# Counts the counter once, and then adds 3 to it 3 times,
# The last adding of three should cause the counter to complete once
for i in range(0, 2):
self.post_event("increase_counter6_3")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(0, self._events["counter6_complete"])
reset_event_mocks()
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=7, hits=7, remaining=3)
self.post_event("increase_counter6_3")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(1, self._events["counter6_complete"])
# Test the adding of five to the counter
reset_event_mocks()
self.post_event("increase_counter6_5")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(0, self._events["counter6_complete"])
reset_event_mocks()
self.post_event("increase_counter6_5")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(1, self._events["counter6_complete"])
# Test subtraction
reset_event_mocks()
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=1, hits=1, remaining=9)
self.post_event("reduce_counter6_5")
self.assertEventCalled("logicblock_counter6_updated")
reset_event_mocks()
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=-3, hits=-3, remaining=13)
self.post_event("reduce_counter6_3")
self.assertEventCalled("logicblock_counter6_updated")
reset_event_mocks()
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=-5, hits=-5, remaining=15)
self.post_event("reduce_counter6_0")
self.assertEventCalled("logicblock_counter6_updated")
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=-4, hits=-4, remaining=14)
# Test Setting the Counter to a value
reset_event_mocks()
# Make sure that the counter holds a nonzero value
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=-3, hits=-3, remaining=13)
self.post_event("set_counter6_0")
self.assertEventCalled("logicblock_counter6_updated")
reset_event_mocks()
self.post_event("counter6_count")
self.assertEventCalledWith("counter6_hit", count=1, hits=1, remaining=9)
# Set the counter to a value above the completion value
self.assertEqual(0, self._events["counter6_complete"])
self.post_event("set_counter6_25")
self.assertEventCalled("logicblock_counter6_updated")
self.assertEqual(1, self._events["counter6_complete"])
# Test using counter with direction down
# Test increasing and reducing
reset_event_mocks()
self.post_event("counter7_count")
self.assertEventCalledWith("counter7_hit", count=4, hits=1, remaining=4)
self.post_event("increase_counter7_5")
self.assertEventCalled("logicblock_counter7_updated")
reset_event_mocks()
self.post_event("counter7_count")
self.assertEventCalledWith("counter7_hit", count=8, hits=-3, remaining=8)
self.post_event("reduce_counter7_5")
self.assertEventCalled("logicblock_counter7_updated")
reset_event_mocks()
self.post_event("counter7_count")
self.assertEventCalledWith("counter7_hit", count=2, hits=3, remaining=2)
self.assertEqual(0, self._events["counter7_complete"])
self.post_event("reduce_counter7_3")
self.assertEventCalled("logicblock_counter7_updated")
self.assertEqual(1, self._events["counter7_complete"])
# Test setting the value with direction down counter
reset_event_mocks()
self.assertEqual(0, self._events["counter7_complete"])
self.post_event("set_counter7_negative25")
self.assertEventCalled("logicblock_counter7_updated")
self.assertEqual(1, self._events["counter7_complete"])
self.post_event("set_counter7_0")
self.assertEqual(2, self._events["counter7_complete"])
reset_event_mocks()
self.post_event("set_counter7_3")
self.assertEventCalled("logicblock_counter7_updated")
self.post_event("counter7_count")
self.assertEventCalledWith("counter7_hit", count=2, hits=3, remaining=2)
reset_event_mocks()
self.assertPlaceholderEvaluates(2, "device.counters.counter7.value")
# nothing happens because machine.test2 is undefined
self.post_event("set_counter_placeholder")
self.assertEventNotCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(2, "device.counters.counter7.value")
self.machine.variables.set_machine_var("test2", 4)
self.post_event("set_counter_placeholder")
self.assertEventCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(4, "device.counters.counter7.value")
reset_event_mocks()
self.post_event("subtract_counter_placeholder")
self.assertEventNotCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(4, "device.counters.counter7.value")
reset_event_mocks()
self.machine.variables.set_machine_var("test3", 3)
self.post_event("subtract_counter_placeholder")
self.assertEventCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(1, "device.counters.counter7.value")
reset_event_mocks()
self.post_event("add_counter_placeholder")
self.assertEventNotCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(1, "device.counters.counter7.value")
reset_event_mocks()
self.machine.variables.set_machine_var("test4", 1)
self.post_event("add_counter_placeholder")
self.assertEventCalled("logicblock_counter7_updated")
self.assertPlaceholderEvaluates(2, "device.counters.counter7.value")
def test_logic_block_outside_game(self):
self.mock_event("logicblock_accrual2_complete")
# should work before game
self.post_event("accrual2_step1")
self.post_event("accrual2_step2")
self.assertEqual(1, self._events["logicblock_accrual2_complete"])
self.post_event("accrual2_restart")
self.start_game()
# should work during game
self.post_event("accrual2_step1")
self.post_event("accrual2_step2")
self.assertEqual(2, self._events["logicblock_accrual2_complete"])
self.post_event("accrual2_restart")
self.stop_game()
# should work after game
self.post_event("accrual2_step1")
self.post_event("accrual2_step2")
self.assertEqual(3, self._events["logicblock_accrual2_complete"])
def test_no_reset_on_complete(self):
self.mock_event("logicblock_accrual3_complete")
# start game
self.start_game()
# and enable
self.post_event("accrual3_enable")
# should work once
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(1, self._events["logicblock_accrual3_complete"])
# but not a second time because it disabled
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(1, self._events["logicblock_accrual3_complete"])
# enable again
self.post_event("accrual3_enable")
# still completed
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(1, self._events["logicblock_accrual3_complete"])
# should work after reset
self.post_event("accrual3_reset")
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(2, self._events["logicblock_accrual3_complete"])
# disabled again
self.post_event("accrual3_reset")
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(2, self._events["logicblock_accrual3_complete"])
# works after enable
self.post_event("accrual3_enable")
self.post_event("accrual3_step1")
self.post_event("accrual3_step2")
self.assertEqual(3, self._events["logicblock_accrual3_complete"])
def test_no_reset_and_no_disable_on_complete(self):
self.mock_event("logicblock_accrual4_complete")
# start game
self.start_game()
# and enable
self.post_event("accrual4_enable")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.enabled")
self.assertPlaceholderEvaluates(False, "device.accruals.accrual4.completed")
# should work once
self.post_event("accrual4_step1")
self.post_event("accrual4_step2")
self.assertEqual(1, self._events["logicblock_accrual4_complete"])
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.enabled")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.completed")
# enabled but still completed
self.post_event("accrual4_step1")
self.post_event("accrual4_step2")
self.assertEqual(1, self._events["logicblock_accrual4_complete"])
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.enabled")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.completed")
# should work after reset
self.post_event("accrual4_reset")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.enabled")
self.assertPlaceholderEvaluates(False, "device.accruals.accrual4.completed")
self.post_event("accrual4_step1")
self.post_event("accrual4_step2")
self.assertEqual(2, self._events["logicblock_accrual4_complete"])
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.enabled")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual4.completed")
def test_reset_and_no_disable_on_complete(self):
self.mock_event("logicblock_accrual10_complete")
# start game
self.start_game()
# and enable
self.post_event("accrual10_enable")
self.assertPlaceholderEvaluates(True, "device.accruals.accrual10.enabled")
self.assertPlaceholderEvaluates(False, "device.accruals.accrual10.completed")
# should work once
self.post_event("accrual10_step1")
self.post_event("accrual10_step2")
self.assertEqual(1, self._events["logicblock_accrual10_complete"])
# and instantly reset and work again
self.assertPlaceholderEvaluates(True, "device.accruals.accrual10.enabled")
self.assertPlaceholderEvaluates(False, "device.accruals.accrual10.completed")
self.post_event("accrual10_step1")
self.post_event("accrual10_step2")
self.assertEqual(2, self._events["logicblock_accrual10_complete"])
self.assertPlaceholderEvaluates(True, "device.accruals.accrual10.enabled")
self.assertPlaceholderEvaluates(False, "device.accruals.accrual10.completed")
def test_player_change(self):
self.mock_event("logicblock_accrual5_complete")
self.machine.config['game']['balls_per_game'] = self.machine.placeholder_manager.build_int_template(2)
self.start_two_player_game()
self.advance_time_and_run()
self.post_event("start_mode1")
self.advance_time_and_run(.1)
# should work during game - player1
self.assertEqual(1, self.machine.game.player.number)
self.post_event("accrual5_step1")
self.post_event("accrual5_step2")
self.assertEqual(1, self._events["logicblock_accrual5_complete"])
# player2
self.drain_all_balls()
self.assertPlayerNumber(2)
self.post_event("start_mode1")
self.advance_time_and_run(.1)
# not yet complete
self.post_event("accrual5_step1")
self.assertEqual(1, self._events["logicblock_accrual5_complete"])
# player1 again
self.drain_all_balls()
self.assertPlayerNumber(1)
self.post_event("start_mode1")
self.advance_time_and_run(.1)
# nothing should happen because its disabled and completed for player1
self.post_event("accrual5_step1")
self.post_event("accrual5_step2")
self.assertEqual(1, self._events["logicblock_accrual5_complete"])
# player2 again
self.drain_all_balls()
self.assertPlayerNumber(2)
self.post_event("start_mode1")
self.advance_time_and_run(.1)
# complete it
self.post_event("accrual5_step2")
self.assertEqual(2, self._events["logicblock_accrual5_complete"])
self.post_event("stop_mode1")
self.stop_game()
# does not work after game
self.post_event("accrual5_step1")
self.post_event("accrual5_step2")
self.assertEqual(2, self._events["logicblock_accrual5_complete"])
def test_counter_hit_window(self):
self.start_game()
self.mock_event("logicblock_counter3_complete")
self.mock_event("counter_counter3_hit")
self.post_event("counter3_enable")
for i in range(10):
self.post_event("counter3_count")
self.assertEqual(0, self._events["logicblock_counter3_complete"])
# inside same window. only one hit
self.assertEqual(1, self._events["counter_counter3_hit"])
self.assertEqual(0, self._events["logicblock_counter3_complete"])
self.advance_time_and_run(1)
for i in range(3):
self.post_event("counter3_count")
self.assertEqual(0, self._events["logicblock_counter3_complete"])
self.assertEqual(2 + i, self._events["counter_counter3_hit"])
self.advance_time_and_run(1)
# it should complete
self.post_event("counter3_count")
self.assertEqual(1, self._events["logicblock_counter3_complete"])
self.assertEqual(5, self._events["counter_counter3_hit"])
def test_counter_template(self):
self.start_game()
self.mock_event("logicblock_counter4_complete")
self.mock_event("counter_counter4_hit")
self.machine.game.player.hits = 2
self.post_event("counter4_enable")
for i in range(2):
self.assertEqual(0, self._events["logicblock_counter4_complete"])
self.post_event("counter4_count")
self.assertEqual(2, self._events["counter_counter4_hit"])
self.assertEqual(1, self._events["logicblock_counter4_complete"])
self.advance_time_and_run(1)
self.machine.variables.set_machine_var("start", 1)
self.machine.game.player.hits = 5
self.mock_event("logicblock_counter4_complete")
self.mock_event("counter_counter4_hit")
self.post_event("counter4_reset")
self.post_event("counter4_enable")
for i in range(4):
self.assertEqual(0, self._events["logicblock_counter4_complete"])
self.post_event("counter4_count")
# inside same window. only one hit
self.assertEqual(4, self._events["counter_counter4_hit"])
self.assertEqual(1, self._events["logicblock_counter4_complete"])
self.advance_time_and_run(1)
def test_counter_persist(self):
self.mock_event("logicblock_counter_persist_complete")
self.mock_event("counter_counter_persist_hit")
self.start_two_player_game()
self.post_event("start_mode1")
self.assertTrue("mode1" in self.machine.modes)
self.post_event("counter_persist_enable")
for i in range(3):
self.post_event("counter_persist_count")
self.assertEqual(i + 1, self._events["counter_counter_persist_hit"])
self.assertEqual(0, self._events["logicblock_counter_persist_complete"])
self.drain_all_balls()
self.assertPlayerNumber(2)
for i in range(10):
self.post_event("counter_persist_count")
self.drain_all_balls()
self.assertPlayerNumber(1)
self.post_event("start_mode1")
self.post_event("counter_persist_enable")
self.assertEqual(0, self._events["logicblock_counter_persist_complete"])
for i in range(2):
self.post_event("counter_persist_count")
self.assertEqual(i + 4, self._events["counter_counter_persist_hit"])
self.assertEqual(1, self._events["logicblock_counter_persist_complete"])
def test_count_without_end(self):
self.start_game()
self.post_event("counter5_count")
self.post_event("counter5_count")
self.post_event("counter5_count")
self.assertEqual(3, self.machine.counters["counter5"].value)
def test_counter_delay_timeout(self):
self.start_game()
self.mock_event("logicblock_counter9_complete")
self.mock_event("logicblock_counter9_hit")
self.post_event("counter9_enable")
for i in range(4):
self.post_event("counter9_count")
self.advance_time_and_run(.01)
self.assertEqual(0, self._events["logicblock_counter9_complete"])
# post final event to complete
self.post_event("counter9_count")
self.assertEqual(1, self._events["logicblock_counter9_complete"])
self.assertEqual(5, self._events["logicblock_counter9_hit"])
#restart (reset and enable)
self.post_event("counter9_restart")
# 10 more hits with delay causing timeout
for i in range(10):
self.post_event("counter9_count")
self.advance_time_and_run(1)
self.assertEqual(1, self._events["logicblock_counter9_complete"])
self.assertEqual(15, self._events["logicblock_counter9_hit"])
def test_sequence_delay_timeout(self):
self.start_game()
self.mock_event("sequence2_complete")
self.mock_event("logicblock_sequence2_hit")
self.post_event("sequence2_enable")
# no timer reset
self.post_event("sequence2_step1a")
self.post_event("sequence2_step2a")
self.post_event("sequence2_step3a")
self.assertEqual(1, self._events["sequence2_complete"])
self.assertEqual(3, self._events["logicblock_sequence2_hit"])
# enable and reset
self.post_event("sequence2_enable")
self.post_event("sequence2_reset")
# timer expired
self.post_event("sequence2_step1a")
self.assertEqual(4, self._events["logicblock_sequence2_hit"])
self.advance_time_and_run(1)
self.post_event("sequence2_step2a")
self.post_event("sequence2_step3a")
self.assertEqual(1, self._events["sequence2_complete"])
self.assertEqual(4, self._events["logicblock_sequence2_hit"])
#time expired and restart
self.post_event("sequence2_step1a")
self.advance_time_and_run(.1)
self.post_event("sequence2_step1a")
self.post_event("sequence2_step2a")
self.advance_time_and_run(.01)
self.post_event("sequence2_step3a")
self.assertEqual(2, self._events["sequence2_complete"])
self.assertEqual(8, self._events["logicblock_sequence2_hit"])
def test_accruals_delay_timeout(self):
self.start_game()
self.mock_event("accrual7_complete")
self.mock_event("accrual7_hit")
# enable accrual
self.post_event("accrual7_enable")
# no timer reset
self.post_event("accrual7_step1")
self.post_event("accrual7_step2")
self.post_event("accrual7_step3")
self.assertEqual(1, self._events["accrual7_complete"])
self.assertEqual(3, self._events["accrual7_hit"])
# time advance after each step but under timeout
self.post_event("accrual7_step1")
self.advance_time_and_run(.01)
self.post_event("accrual7_step2")
self.advance_time_and_run(.01)
self.post_event("accrual7_step3")
self.assertEqual(2, self._events["accrual7_complete"])
self.assertEqual(6, self._events["accrual7_hit"])
# timer advance after each step over timeout
self.post_event("accrual7_step1")
self.advance_time_and_run(1)
self.post_event("accrual7_step2")
self.advance_time_and_run(1)
self.post_event("accrual7_step3")
self.assertEqual(2, self._events["accrual7_complete"])
self.assertEqual(9, self._events["accrual7_hit"])
#final two steps without additional time passed
self.post_event("accrual7_step1")
self.post_event("accrual7_step2")
self.assertEqual(3, self._events["accrual7_complete"])
self.assertEqual(11, self._events["accrual7_hit"])
|
from os import environ as env
import requests
_DARKSKY_KEY = env['DARKSKY_KEY']
_URL = 'https://api.darksky.net/forecast'
def _get_single_forecast(coord):
lat, lng = coord
url = '{url}/{key}/{lat},{lng}?exclude=minutely,hourly,daily,alerts,flags'.format(url=_URL, key=_DARKSKY_KEY,
lat=lat, lng=lng)
resp = requests.get(url)
if resp.status_code >= 400:
return {}
return resp.json()
def get_forecast(*coords):
return [_get_single_forecast(coord) for coord in coords]
|
# ====================================================================================================================
# trainModel.py
# ====================================================================================================================
## AUTHOR: Vamsi Krishna Reddy Satti
# This script trains a model having the performance of the bestModel on the data provided.
# The training time does not exceed 3 hours.
# Usage example:
# python trainModel.py -modelName bestModel -data ../cs763-assign3/data.bin -target ../cs763-assign3/labels.bin
import argparse
import pickle
import os
import torch
import torchfile
from src import criterion, layers, model, optim, utils
# Model training configuration of hyperparameters
config = {
'lr': 6e-5,
'epochs': 200,
'batch_size': 256,
}
torch.set_default_dtype(torch.float64)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # GPU support if CUDA supported hardware is present
# Parse the arguments
parser = argparse.ArgumentParser(description="This script trains my best model")
parser.add_argument("-modelName", dest="model_name", required=True, type=str, help="name of the model")
parser.add_argument("-data", dest="train_data_path", required=True, type=str, help="location of the training data")
parser.add_argument("-target", dest="train_labels_path", required=True, type=str, help="location of the training labels")
args = parser.parse_args()
# I'm using Windows, so since default of long in Windows is 4 bytes, need to force long as 8 bytes.
X_train = torch.tensor(torchfile.load(args.train_data_path, force_8bytes_long=True), dtype=torch.float64).reshape(-1, 108*108).to(device)
y_train = torch.tensor(torchfile.load(args.train_labels_path, force_8bytes_long=True), dtype=torch.float64).reshape(-1).long().to(device)
# Model definition
net = model.Model()
net.add_layer(layers.Conv2d((108, 108), 1, 16, kernel_size=18, stride=2))
net.add_layer(layers.ReLU())
net.add_layer(layers.MaxPool2d(2))
net.add_layer(layers.BatchNorm2d(16))
net.add_layer(layers.Conv2d((23, 23), 16, 32, kernel_size=5, stride=2))
net.add_layer(layers.ReLU())
net.add_layer(layers.MaxPool2d(2))
net.add_layer(layers.BatchNorm2d(32))
net.add_layer(layers.Flatten())
net.add_layer(layers.Linear(5 * 5 * 32, 256))
net.add_layer(layers.ReLU())
net.add_layer(layers.BatchNorm1d(256))
net.add_layer(layers.Linear(256, 128))
net.add_layer(layers.ReLU())
net.add_layer(layers.BatchNorm1d(64))
net.add_layer(layers.Linear(128, 64))
net.add_layer(layers.ReLU())
net.add_layer(layers.BatchNorm1d(64))
net.add_layer(layers.Linear(64, 6))
net.to(device)
net.train() # set model to train mode
# Preprocess the data
preprocess = {}
preprocess['mean'] = X_train.mean(0, keepdim=True)
preprocess['std'] = X_train.std(0, keepdim=True)
X_train -= preprocess['mean']
X_train /= preprocess['std']
X_train = X_train.reshape(-1, 1, 108, 108)
# Initialize the DataLoader
dataloader_train = utils.DataLoader((X_train, y_train), batch_size=config['batch_size'], shuffle=True)
# Loss function used is Cross Entropy Loss
# Optimizer used is Stochastic Gradient Descent with Nesterov momentum
loss_fn = criterion.CrossEntropyLoss()
optimizer = optim.Adam(net, config['lr'])
scheduler = optim.StepLR(optimizer, step_size=5, gamma=0.96)
# Training of the model happens here...
for epoch in range(config['epochs']):
print(f"[Epoch] {epoch} starts...")
scheduler.step()
for X, y in dataloader_train:
output = net(X)
loss = loss_fn(output, y)
net.zero_grad()
grad = loss_fn.backward(output, y)
net.backward(output, grad)
optimizer.step()
# Save the model to a file
for key in preprocess:
preprocess[key] = preprocess[key].cpu()
state = {
'config': config,
'model': net.cpu(),
'preprocess': preprocess
}
model_file_path = f"./{args.model_name}/model.bin"
os.makedirs(os.path.dirname(model_file_path), exist_ok=True)
with open(model_file_path, 'w+b') as file:
pickle.dump(state, file)
|
from deck_themer.helpers import *
# Test for experimenting with HDP and with HDP hyperparameters.
# The 'corpus' created below is necessary for both the lda_param_checker() method and the create_lda() method. The
# 'decklists' created below is only used for the lda_param_checker().
decklists, corpus, df = corpus_maker('..\\CSV_files\\obfuscated_tdm.csv')
# Deck files are somewhat proprietary, so you'll have to make your own.
# Use this for getting a general feel for what the different hyperparameters do.
lda_test_results = lda_param_checker(tw=tp.TermWeight.IDF, min_df_0=5, min_df_f=6, k_0=8, k_f=11, k_s=1, alpha_0=-1,
alpha_f=1, eta_0=0, eta_f=2, corpus=corpus, word_list=decklists, to_excel=True,
fname='..\\tests\\TestResults\\obfuscated_tdm_lda_param_checking_results_tw-IDF.xlsx')
## Use this for testing specific hyperparameters.
# lda = create_lda(min_df=5, k=9, corpus=corpus)
|
from keras import models
from keras import layers
from keras import optimizers
from matplotlib import pyplot as plt
import pickle
class NeuralNet:
def __init__(self, x_train, y_train, lr=0.001):
self.model = models.Sequential()
self.x_train = x_train
self.y_train = y_train
self.partial_x_train = None
self.partial_y_train = None
self.x_val = None
self.y_val = None
self.lr = lr
self.history = None
def divide_data(self):
self.x_val = self.x_train[:10000]
self.y_val = self.y_train[:10000]
self.partial_x_train = self.x_train[10000:]
self.partial_y_train = self.y_train[10000:]
def network(self):
self.model.add(layers.Dense(
16, activation='relu', input_shape=(10000,)))
self.model.add(layers.Dense(16, activation='relu'))
self.model.add(layers.Dense(1, activation='sigmoid'))
self.model.compile(optimizer=optimizers.RMSprop(self.lr),
loss='binary_crossentropy',
metrics=['accuracy'])
def train_model(self):
self.history = self.model.fit(self.partial_x_train,
self.partial_y_train,
epochs=20,
batch_size=512,
validation_data=(self.x_val, self.y_val))
self.save_model()
with open("history.pkl", "wb") as file:
pickle.dump(self.history.history, file)
def save_model(self):
model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
def load_model(self):
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
|
try:
while 1:
T=input()
if T!='END': print(T[::-1])
except: pass
|
from autorecon.plugins import ServiceScan
class SMTPUserEnum(ServiceScan):
def __init__(self):
super().__init__()
self.name = 'SMTP-User-Enum'
self.tags = ['default', 'safe', 'smtp', 'email']
def configure(self):
self.match_service_name('^smtp')
async def run(self, service):
await service.execute('hydra smtp-enum://{addressv6}:{port}/vrfy -L "' + self.get_global('username_wordlist', default='/usr/share/seclists/Usernames/top-usernames-shortlist.txt') + '" 2>&1', outfile='{protocol}_{port}_smtp_user-enum_hydra_vrfy.txt')
await service.execute('hydra smtp-enum://{addressv6}:{port}/expn -L "' + self.get_global('username_wordlist', default='/usr/share/seclists/Usernames/top-usernames-shortlist.txt') + '" 2>&1', outfile='{protocol}_{port}_smtp_user-enum_hydra_expn.txt')
def manual(self, service, plugin_was_run):
service.add_manual_command('Try User Enumeration using "RCPT TO". Replace <TARGET-DOMAIN> with the target\'s domain name:', [
'hydra smtp-enum://{addressv6}:{port}/rcpt -L "' + self.get_global('username_wordlist', default='/usr/share/seclists/Usernames/top-usernames-shortlist.txt') + '" -o "{scandir}/{protocol}_{port}_smtp_user-enum_hydra_rcpt.txt" -p <TARGET-DOMAIN>'
])
|
# -*- coding: UTF-8 -*-
from SMInfoParser.StateMachineInfo import StateJumpInfo
from SMInfoParser.StateMachineInfoParser import SMInfoParser
from re import (findall,
sub)
class GraphvizSMInfoExtractor(SMInfoParser):
"""
State Machine information extractor, this class help extract information from graphviz graph
"""
def __init__(self):
pass
def get_sm_jump_info_list_from_file(self, file_path):
"""
Get state machine information from graphviz dot file.
:param file_path: path to the file
:return:
"""
state_machine_content_handler = self.__StateMachineContentHandler(file_path)
return state_machine_content_handler.get_transition_list()
class __StateMachineContentHandler():
__transition_list = None
def __init__(self, file_path):
if ".dot" not in file_path:
raise RuntimeError("Please use '.dot' file")
self.__update_transitions_list(file_path)
self.__transition_list = self.__sort_list(self.__transition_list)
pass
def get_transition_list(self):
return self.__transition_list
def __update_transitions_list(self, file_path):
state_name_dict = {}
self.__transition_list = []
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines: # find state symbol-name pair
# has label but no transition symbol, it's a state, get the name from it
if '->' not in line and 'label' in line:
state_symbol = line.split("[")[0]
state_symbol = sub(r'\s', "", state_symbol)
state_name = findall(r'label="[1-9|a-z|A-Z|_]+', line)[0]
state_name = state_name.split('"')[1]
state_name_dict[state_symbol] = state_name
for line in lines: # find transition
if '->' in line and 'label' in line:
state_jump_info = StateJumpInfo()
line = sub(r'\s', "", line)
states = line.split('[')[0]
from_state, to_state = states.split('->')
transition = findall(r'label="[1-9|a-z|A-Z|_|\\n]+', line)[0]
transition = transition.split('"')[1]
state_jump_info.condition, state_jump_info.action = transition.split("\\n")
state_jump_info.from_state = state_name_dict[from_state]
state_jump_info.to_state = state_name_dict[to_state]
self.__transition_list.append(state_jump_info)
@staticmethod
def __sort_list(st_list):
ret_list = []
state_list = []
for item in st_list:
if item.from_state not in state_list:
state_list.append(item.from_state)
state_list.sort(key=lambda x: findall(r'[0-9]+',x)[0])
for state in state_list:
if "2WAIT_FOR_STATION_RES" in state:
state = state
state_st_list = []
for item in st_list:
if item.from_state == state:
print(item.from_state)
print(item.condition)
print(item.to_state)
print("")
state_st_list.append(item)
state_st_list.sort(key=lambda x: int(findall(r'^[0-9]+',x.condition)[0]))
for item in state_st_list:
ret_list.append(item)
return ret_list
|
from .entity import Entity
class Player(Entity):
pass
class Gorilla(Player):
type = 'gorilla'
|
"""Documentation related tests."""
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from tests.tools import *
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest" : "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip"
}
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml"
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
class WireProtocolData(object):
def __init__(self, data_files=DATA_FILE):
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions" : 0,
"/versions" : 0,
"goalstate" : 0,
"hostingenvuri" : 0,
"sharedconfiguri" : 0,
"certificatesuri" : 0,
"extensionsconfiguri" : 0,
"extensionArtifact" : 0,
"manifest.xml" : 0,
"manifest_of_ga.xml" : 0,
"ExampleHandlerLinux" : 0
}
self.version_info = load_data(data_files.get("version_info"))
self.goal_state = load_data(data_files.get("goal_state"))
self.hosting_env = load_data(data_files.get("hosting_env"))
self.shared_config = load_data(data_files.get("shared_config"))
self.certs = load_data(data_files.get("certs"))
self.ext_conf = load_data(data_files.get("ext_conf"))
self.manifest = load_data(data_files.get("manifest"))
self.ga_manifest = load_data(data_files.get("ga_manifest"))
self.trans_prv = load_data(data_files.get("trans_prv"))
self.trans_cert = load_data(data_files.get("trans_cert"))
self.ext = load_bin_data(data_files.get("test_ext"))
def mock_http_get(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
# wire server versions
if "comp=versions" in url:
content = self.version_info
self.call_counts["comp=versions"] += 1
# HostPlugin versions
elif "/versions" in url:
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif "hostingenvuri" in url:
content = self.hosting_env
self.call_counts["hostingenvuri"] += 1
elif "sharedconfiguri" in url:
content = self.shared_config
self.call_counts["sharedconfiguri"] += 1
elif "certificatesuri" in url:
content = self.certs
self.call_counts["certificatesuri"] += 1
elif "extensionsconfiguri" in url:
content = self.ext_conf
self.call_counts["extensionsconfiguri"] += 1
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs or \
"x-ms-artifact-location" not in kwargs["headers"]:
raise Exception("Bad HEADERS passed to HostPlugin: {0}",
kwargs)
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif "manifest_of_ga.xml" in url:
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
#Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
|
import tkinter as tk
import os, time
from xml.dom.minidom import parse
from awesometkinter.bidirender import add_bidi_support # https://github.com/Aboghazala/AwesomeTkinter
import arabic_reshaper # https://github.com/mpcabd/python-arabic-reshaper
import vlc # pip install python-vlc (https://github.com/oaubert/python-vlc)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class FormSuraScrollable(tk.Frame):
def __init__(self, sura):
self.parent=tk.Toplevel()
self.parent.wm_geometry("630x700") # width x height
tk.Frame.__init__(self, self.parent)
self.sura = sura
self.parent.protocol("WM_DELETE_WINDOW", self.callback) #user quit the screen
self.canvas = tk.Canvas(self, borderwidth=0, background="white")
self.frame = tk.Frame(self.canvas, background="white")
self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((0,0), window=self.frame, anchor="nw",tags="self.frame")
self.frame.bind("<Configure>", self.onFrameConfigure)
self._init_widgets()
def get_ayas(self):
all_ayas = parse(os.path.join(BASE_DIR, 'static/quran-ayas.xml'))
sura_ayas = all_ayas.getElementsByTagName('sura')[self.sura].getElementsByTagName('aya')
return [aya_text.getAttribute('text') for aya_text in sura_ayas]
def play_audio(self,fname):
print(f'going to play {fname}')
p=vlc.MediaPlayer(fname)
p.play()
time.sleep(0.5) # sleep because it needs time to start playing
while p.is_playing():
time.sleep(0.5) # sleep to use less CPU
def lbl_click(self,event,n,lbl):
if event.num != 1:
# if not left button clicked
return
lbl['fg']='red'
self.frame.update()
fname = os.path.join(BASE_DIR,'static','audio',f'S{self.sura+1}_{n+1}_') # f'S98_1_0.bin'
if os.path.exists(f'{fname}0.bin'):
print('only 1 file')
self.play_audio(f'{fname}0.bin')
elif os.path.exists(f'{fname}1.bin'):
print('more than 1 file')
# eg 5-2,6-2,8-3
for i in range(20):
if os.path.exists(f'{fname}{i+1}.bin'):
self.play_audio(f'{fname}{i+1}.bin')
else:
break
else:
print('file doesnt exists')
lbl['fg']='black'
def _init_widgets(self):
for n,aya_text in enumerate(self.get_ayas()):
# https://github.com/googlefonts/noto-fonts/tree/main/hinted/ttf/NotoSansArabic
lbl = tk.Label(self.frame, anchor="e", width=35, font=("Noto Sans Arabic", 22), bg='white',pady=5, relief = 'ridge') # bold, lighter,
lbl.grid(column = 0, row = n)
ctxt=arabic_reshaper.reshape(aya_text)
wrapped_text = ''
for i in range(0,len(ctxt),40): wrapped_text+=f'{ctxt[i:i+40]}\n' # letter wrap
add_bidi_support(lbl)
lbl.set(f'({n+1}) {wrapped_text}')
lbl.bind( "<Button>", lambda event, lbl_n=n, lbl=lbl: self.lbl_click(event, lbl_n, lbl))
def callback(self):
print ('user exits the screen')
self.parent.destroy()
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
|
import os
import PIL.Image
import cv2
import numpy as np
import pytest
def test_opencv_avc1():
filename = 'test.mp4'
if os.path.exists(filename):
os.unlink(filename)
width = height = 256
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter(filename, fourcc, 20.0, (width, height))
assert os.path.exists(filename), 'No video file created'
for frame in sorted(os.listdir("frames")):
img = PIL.Image.open("frames/" + frame)
tmp_img = cv2.cvtColor(np.asarray(img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
out.write(tmp_img)
# Release everything if job is finished
out.release()
assert os.stat(filename).st_size > 0, 'Video file is empty'
@pytest.mark.skip
def test_opencv_mp4v():
filename = 'test.mp4'
if os.path.exists(filename):
os.unlink(filename)
width = height = 256
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(filename, fourcc, 20.0, (width, height))
assert os.path.exists(filename), 'No video file created'
for frame in sorted(os.listdir("frames")):
img = PIL.Image.open("frames/" + frame)
tmp_img = cv2.cvtColor(np.asarray(img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
out.write(tmp_img)
# Release everything if job is finished
out.release()
assert os.stat(filename).st_size > 0, 'Video file is empty'
# import imageio
# frame_arr = []
# for frame in sorted(os.listdir("frames")):
# img = PIL.Image.open("frames/" + frame)
# tmp_img = np.asarray(img, dtype=np.uint8)
# frame_arr.append(tmp_img)
# imageio.mimwrite('test.gif', frame_arr, "GIF", fps=60)
|
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import pytest
from unittest import TestCase
from src.builder import Builder
# 暫時跳過
# @pytest.mark.skipif()
class BuilderTest(TestCase):
"""測試 Builder"""
def setUp(self):
"""測試前先建立好環境"""
self.user = 'Oscar'
self.keyword = '愛吃味'
self.keyword_builder = Builder(self.keyword, self.user)
self.song_url = 'https://www.youtube.com/watch?v=7M6nsbieMks'
self.song_builder = Builder(self.song_url, self.user)
self.song_list_url = 'https://www.youtube.com/playlist?list=PLRR3Za6-4AAL_VvOXL-eqPIWnFfB8QmD8'
self.song_list_builder = Builder(self.song_list_url, self.user)
def tearDown(self):
"""測試結束後收拾環境"""
pass
def test_keyword_builder(self):
song = self.keyword_builder.get_item()
self.assertEqual('7M6nsbieMks', song.info['id'])
self.assertEqual(
'https://www.youtube.com/watch?v=7M6nsbieMks', song.info['url'])
self.assertEqual(
'Trout Fresh/呂士軒 - 愛吃味 (Official Music Video)', song.info['title'])
self.assertEqual(258, song.info['duration'])
self.assertEqual('SmashRegz', song.info['uploader'])
self.assertEqual(self.user, song.info['request'])
# self.assertEqual('./downloads/7M6nsbieMks.mp3', song.file_locat)
def test_song_builder(self):
song = self.song_builder.get_item()
self.assertEqual('7M6nsbieMks', song.info['id'])
self.assertEqual(
'https://www.youtube.com/watch?v=7M6nsbieMks', song.info['url'])
self.assertEqual(
'Trout Fresh/呂士軒 - 愛吃味 (Official Music Video)', song.info['title'])
self.assertEqual(258, song.info['duration'])
self.assertEqual('SmashRegz', song.info['uploader'])
self.assertEqual(self.user, song.info['request'])
# self.assertEqual('./downloads/7M6nsbieMks.mp3', song.file_locat)
def test_song_list_builder(self):
song_list = self.song_list_builder.get_item()
self.assertEqual('TEST', song_list.info['title'])
self.assertEqual(454, song_list.info['duration'])
self.assertEqual('俊廷江', song_list.info['uploader'])
it = iter(song_list)
song = next(it)
self.assertEqual(
'Snail\'s House - Grape Soda [Tasty Release]', song.info['title'])
self.assertEqual(194, song.info['duration'])
self.assertEqual('Tasty', song.info['uploader'])
song = next(it)
self.assertEqual(
'TheFatRat - Never Be Alone [Tasty Release]', song.info['title'])
self.assertEqual(260, song.info['duration'])
self.assertEqual('Tasty', song.info['uploader'])
|
from fjsp import Solver, Problem, Job, Operation, Machine, Task, Resource
from typing import List, Tuple
import numpy as np
class Chromosome:
def __init__(self, machine_selection: List[int], operation_sequence: List[int]):
self.machine_selection = machine_selection
self.operation_sequence = operation_sequence
self.fitness = 0
def set_fitness(self, fitness):
self.fitness = fitness
def __str__(self) -> str:
return "[" + (", ".join(str(i) for i in (self.machine_selection + self.operation_sequence))) + "]\n"
def __repr__(self):
return str(self)
class ParentSelector:
def __init__(self, name):
self.name = name
def select_parent(self, population: List[Chromosome]) -> Chromosome:
pass
class RoulleteWheel(ParentSelector):
def __init__(self):
super(RoulleteWheel, self).__init__("roullete_wheel")
def select_parent(self, population: List[Chromosome]):
total_fitness = np.sum([c.fitness for c in population])
p = np.random.rand()
pointer = 0
for i, c in enumerate(population):
r = c/total_fitness
pointer += r
if p < pointer:
return i
return len(population) - 1
class Tournament(ParentSelector):
def __init__(self):
super(Tournament, self).__init__("tournament")
def select_parent(self, population: List[Chromosome], k=3):
candidates = np.random.choice(len(population), size=3, replace=False)
best_candidate = candidates[0]
for i in candidates:
if population[i].fitness > population[best_candidate].fitness:
best_candidate = i
return i
class Crossover:
def __init__(self, name: str):
self.name = name
def crossover(self, p1: List[int], p2: List[int]) -> Tuple[Chromosome, Chromosome]:
pass
class TwoPointCrossover(Crossover):
def __init__(self):
super(TwoPointCrossover, self).__init__("two_point")
def crossover(self, p1: List[int], p2: List[int]) -> Tuple[List[int], List[int]]:
points = np.random.choice(len(p1), size=2, replace=False)
points.sort()
ms1 = p1[:points[0]] + p2[points[0]:points[1]] + p1[points[1]:]
ms2 = p2[:points[0]] + p1[points[0]:points[1]] + p2[points[1]:]
return ms1, ms2
class UniformCrossover(Crossover):
def __init__(self):
super(UniformCrossover, self).__init__("uniform")
def crossover(self, p1: List[int], p2: List[int]) -> Tuple[List[int], List[int]]:
swap = np.random.randint(2, size=len(p1))
ms1 = np.copy(p1)
ms2 = np.copy(p2)
for i, j in enumerate(swap):
if j == 1:
temp = ms1[i]
ms1[i] = ms2[i]
ms1[i] = temp
return ms1, ms2
class POXCrossover(Crossover):
def __init__(self):
super(POXCrossover, self).__init__("pox")
def crossover(self, p1: List[int], p2: List[int]) -> Tuple[List[int], List[int]]:
# generate sub-joint between p1 and p2
sub_joints = set()
for i in p1:
if i in p2:
sub_joints.add(i)
for i in p2:
if i in p1:
sub_joints.add(i)
sub_joints = list(sub_joints)
# divide into 2 parts
np.random.shuffle(sub_joints)
js1 = sub_joints[:len(sub_joints)//2]
js2 = sub_joints[len(sub_joints)//2:]
c1, c2 = [], []
j = 0
for i in p2:
if i not in js1:
c1.append(i)
j += 1
while j < len(p1) and p1[j] in js1:
c1.append(p1[j])
j += 1
j = 0
for i in p1:
if i not in js2:
c2.append(i)
j += 1
while j < len(p2) and p2[j] in js2:
c2.append(p2[j])
j += 1
return c1, c2
class Mutator:
def __init__(self, name, p: float):
self.name = name
self.p = p
def mutate(self, p: List[int]) -> List[int]:
pass
class MSMutator(Mutator):
def __init__(self, p: float):
super(MSMutator, self).__init__("ms", p)
def mutate(self, p: List[int], problem: Problem) -> List[int]:
pc = np.copy(p)
for i, m in enumerate(p):
prob = np.random.rand()
if prob < self.p:
opr = problem.get_operation_by_index(i)
pc[i] = opr.get_lowest_machine() + 1
return pc
class OSMutator(Mutator):
def __init__(self, p: float):
super(OSMutator, self).__init__("os", p)
def mutate(self, p: List[int]) -> List[int]:
prob = np.random.rand()
pc = np.copy(p)
if prob < self.p:
np.random.shuffle(pc)
return pc
class GeneticAlgorithm(Solver):
def __init__(self):
super(GeneticAlgorithm, self).__init__("Genetic Algorithm")
self.parent_selectors: List[ParentSelector] = [
RoulleteWheel(),
Tournament()
]
self.crossovers: List[Crossover] = [
TwoPointCrossover(),
UniformCrossover(),
POXCrossover()
]
def get_parent_selector(self, selector) -> ParentSelector:
for p in self.parent_selectors:
if p.name == selector:
return p
raise ValueError(f"Parent selector {selector} is not defined")
def get_crossover(self, crossover) -> Crossover:
for c in self.crossovers:
if c.name == crossover:
return c
raise ValueError(f"Crossover {crossover} is not defined")
def global_selection(self) -> Chromosome:
machine_selection = []
operation_sequence = []
# 1. Create a new array to record all machines’ processing time, initialize each element to 0;
time_array = [0 for _ in range(self.problem.n_machine)]
# 2. Select a job randomly and insure one job to be selected only once, then select the first operation of the job;
ms_temp = [[] for _ in range(len(self.problem.jobs))]
for job in self.problem.get_shuffled_job():
ms: List[int] = []
for operation in job.operations:
# 3. Add the processing time of each machine in the available machines and the corresponding
# machine’s time in the time array together
added_time = []
for machine in operation.machines:
added_time.append(time_array[machine.index] + machine.operation_time)
# 4. Compare the added time to find the shortest time, then select the index k of the machine which has the shortest
# time. If there is the same time among different machines, a machine is selected randomly among them;
k = np.argmin(added_time)
# 5. Set the allele which corresponds to the current operation in the MS part to k;
ms.append(k + 1)
# 6. Add the current selected machine’s processing time and its corresponding allele in the
# time array together in order to update the time array;
selected_machine = operation.machines[k]
time_array[selected_machine.index] += selected_machine.operation_time
# 7. Select the next operation of the current job, and execute
# Step 3 to Step 6 until all operations of the current job are
# selected, then go to Step 8;
# 8. Go to step 2 until all jobs are all selected once
# set the operation sequence allele
operation_sequence.append(job.id)
ms_temp[job.index] = ms
for ms in ms_temp:
for i in ms:
machine_selection.append(i)
np.random.shuffle(operation_sequence)
return Chromosome(machine_selection, operation_sequence)
def local_selection(self):
machine_selection = []
operation_sequence = []
# 1. In order to record all machines’ processing time, create a
# new array (called time array), the length equals to L, and
# set each element 0;
# 2. Select the first job, and its first operation;
ms_temp = [[] for _ in range(len(self.problem.jobs))]
for job in self.problem.get_shuffled_job():
time_array = [0 for _ in range(self.problem.L)]
ms: List[int] = []
for operation in job.operations:
# 3. Set each allele 0 in the array;
# skip
# 4. Add the processing time of each machine in the alternative
# machine set and the corresponding machines’ time
# in the array together;
added_time = []
for machine in operation.machines:
added_time.append(time_array[machine.index] + machine.operation_time)
# 5. Compare the added time to find the shortest time, then select the index k of the machine which has the shortest
# time. If there is the same time among different machines, a machine is selected randomly among them;
k = np.argmin(added_time)
# 6. Set the allele which corresponds to the current operation in the MS part to k;
ms.append(k + 1)
# 7. Add the current selected machine’s processing time and its corresponding allele in the
# time array together in order to update the time array;
selected_machine = operation.machines[k]
time_array[selected_machine.index] += selected_machine.operation_time
# 8. Select the next operation of the current job, and go to
# Step 4 until all the operations of the current job are
# selected, then go to Step 9;
# 9. Select the next job, and select the first operation of the current job;
# 10. Go to Step 3 until all jobs are selected once
# set the operation sequence allele
operation_sequence.append(job.id)
ms_temp[job.index] = ms
for ms in ms_temp:
for i in ms:
machine_selection.append(i)
np.random.shuffle(operation_sequence)
return Chromosome(machine_selection, operation_sequence)
def random_selection(self):
machine_selection = []
operation_sequence = []
for job in self.problem.jobs:
for operation in job.operations:
selected_machine_idx = operation.get_random_machine(return_index=True)
machine_selection.append(selected_machine_idx + 1)
operation_sequence.append(job.id)
np.random.shuffle(operation_sequence)
return Chromosome(machine_selection, operation_sequence)
def init_population(self, population_amount, gs, ls, rs):
assert gs + ls + rs != 1, "The initialization population fragment sum is not 1"
self.population: List[Chromosome] = []
for _ in range(int(gs * population_amount)):
chromosome = self.global_selection()
self.population.append(chromosome)
for _ in range(int(ls * population_amount)):
chromosome = self.local_selection()
self.population.append(chromosome)
for _ in range(int(rs * population_amount)):
chromosome = self.random_selection()
self.population.append(chromosome)
def is_valid_chromosome(self, chromosome: Chromosome) -> bool:
for i, m in enumerate(chromosome.machine_selection):
opr = self.problem.get_operation_by_index(i)
if opr.get_machine_by_id(m) == None:
return False
return True
def fix_chromosome(self, chromosome: Chromosome) -> Chromosome:
for i, m in enumerate(chromosome.machine_selection):
opr = self.problem.get_operation_by_index(i)
chromosome.machine_selection[i] = np.min([m, len(opr.machines)])
return chromosome
def decode_chromosome(self, chromosome: Chromosome):
# 1. Convert machine selection to machine matrix and time matrix
machine_matrix = []
time_matrix = []
i = 0
for job in self.problem.jobs:
used_machine = []
used_time = []
for operation in job.operations:
machine_idx = chromosome.machine_selection[i]
used_machine.append(operation.machines[machine_idx - 1].id)
used_time.append(operation.machines[machine_idx - 1].operation_time)
i += 1
machine_matrix.append(used_machine)
time_matrix.append(used_time)
# 2. Decode operation sequence
resources: List[Resource] = [Resource(i + 1) for i in range(self.problem.n_machine)]
# variable to track current operation on job-n. Default is 1st operation
current_job_operations = [1 for _ in range(len(self.problem.jobs))]
for job_id in chromosome.operation_sequence:
operation_id = current_job_operations[job_id - 1]
job = self.problem.get_job_by_id(job_id)
if job == None:
raise ValueError(f"Job with id {job_id} is not found")
operation = job.get_operation_by_id(operation_id)
if operation == None:
raise ValueError(f"Operation with id {operation_id} is not found")
selected_machine_id = machine_matrix[job.index][operation.index]
selected_machine = operation.get_machine_by_id(selected_machine_id)
resource = resources[selected_machine.index]
# find all idle time
idle_times = resource.find_idle_time()
# let's check if the operation can fit in the idle time
# 1. select idle time that the start_time is >= last operation
last_operation = job.get_operation_by_id(operation_id - 1)
last_operation_time = 0
if last_operation != None:
# there is last operation, it means this operation need to be inserted after the last operation
last_operation_machine = machine_matrix[job.index][last_operation.index]
last_machine = last_operation.get_machine_by_id(last_operation_machine)
last_resource = resources[last_machine.index]
last_task = last_resource.find_operation(job_id, last_operation.id)
if last_task != None:
last_operation_time = last_task.get_end() # start + duration
# 2. check if the operation can fit in
is_fit = False
for (start, end) in idle_times:
tb = np.max([start, last_operation_time])
if tb + selected_machine.operation_time <= end:
# its fit :), lets put it in there
# print('insert', (start, end), tb, selected_machine.operation_time)
resource.add_task(operation, tb)
is_fit = True
break
if not is_fit:
# the operation is not fit in any idle time, so put it in the last operation
last_resource_time = resource.get_last_operation_time()
tb = np.max([last_resource_time, last_operation_time])
# print('add_last', job_id, operation_id, '=>', last_resource_time, last_operation_time)
resource.add_task(operation, tb)
# increment the operation id for next operation
current_job_operations[job_id - 1] += 1
return resources
def calculate_fitness(self, chromosome) -> int:
resources = self.decode_chromosome(chromosome)
makespan = 0
for resource in resources:
makespan = np.max([resource.get_last_operation_time(), makespan])
return makespan
def evaluate(self):
for i in range(len(self.population)):
fitness = self.calculate_fitness(self.population[i])
self.population[i].set_fitness(fitness)
# sort population based on fitness
sorted(self.population, key=lambda c: c.fitness)
def solve(self, problem: Problem, population_amount=100, gs=.6, ls=.3, rs=.1, parent_selector='tournament', pm=.1, iter=100, selected_offspring=.5) -> List[Resource]:
self.problem = problem
# print(self.global_selection().machine_selection)
self.init_population(population_amount, gs, ls, rs)
self.evaluate()
selector = self.get_parent_selector(parent_selector)
two_point_crossover = self.get_crossover("two_point")
uniform_crossover = self.get_crossover("uniform")
pox_crossover = self.get_crossover("pox")
os_mutator = OSMutator(pm)
ms_mutator = MSMutator(pm)
print("========== Before =============")
top_3 = self.population[:3]
for i, c in enumerate(top_3):
print(f"Top {i+1}")
print("Machine Selection:", c.machine_selection)
print("Operation Sequence:", c.operation_sequence)
print("Fitness/Makespan:", c.fitness)
print("==========================================")
new_population: List[Chromosome] = []
crossover_amount = 0
mutation_amount = 0
for i in range(iter):
print("Generation", i+1)
while (len(new_population) < population_amount):
# select 2 parent
p1_idx = selector.select_parent(self.population)
p2_idx = selector.select_parent(self.population)
p1 = self.population[p1_idx]
p2 = self.population[p2_idx]
if crossover_amount < 3:
print("Before Crossover")
print("Selected Parent 1:")
print("Machine Selection:", p1.machine_selection)
print("Operation Sequence:", p1.operation_sequence)
print("Selected Parent 2:")
print("Machine Selection:", p2.machine_selection)
print("Operation Sequence:", p2.operation_sequence)
if len(new_population) < population_amount // 2:
ms1, ms2 = two_point_crossover.crossover(p1.machine_selection, p2.machine_selection)
else:
ms1, ms2 = uniform_crossover.crossover(p1.machine_selection, p2.machine_selection)
os1, os2 = pox_crossover.crossover(p1.operation_sequence, p2.operation_sequence)
c1 = Chromosome(ms1, os1)
c2 = Chromosome(ms2, os2)
c1 = self.fix_chromosome(c1)
c2 = self.fix_chromosome(c2)
new_population.append(c1)
new_population.append(c2)
if crossover_amount < 3:
print("After Crossover")
print("Offspring 1:")
print("Machine Selection:", c1.machine_selection)
print("Operation Sequence:", c2.operation_sequence)
print("Offspring 2:")
print("Machine Selection:", c1.machine_selection)
print("Operation Sequence:", c2.operation_sequence)
crossover_amount += 1
for i, c in enumerate(new_population):
# do mutation if p < pm
p = np.random.rand()
if p < pm:
if mutation_amount < 3:
print("Before Mutation")
print("Machine Selection:", c.machine_selection)
print("Operation Sequence:", c.operation_sequence)
ms = ms_mutator.mutate(c.machine_selection, self.problem)
os = os_mutator.mutate(c.operation_sequence)
new_population[i] = Chromosome(ms, os)
if mutation_amount < 3:
print("After Mutation")
print("Machine Selection:", ms)
print("Operation Sequence:", os)
mutation_amount += 1
# self.population = new_population
for i in range(len(new_population)):
fitness = self.calculate_fitness(new_population[i])
new_population[i].set_fitness(fitness)
sorted(new_population, key=lambda c: c.fitness)
# set top-t% from new population
t = int(selected_offspring*population_amount)
self.population[-t:] = new_population[:t]
# re-evaluate the new population
self.evaluate()
best_chromosome = self.population[0]
print("Best fitness:", best_chromosome.fitness)
print("========== After ============")
# get the best chromosome
top_3 = self.population[:3]
for i, c in enumerate(top_3):
print(f"Top {i+1}")
print("Machine Selection:", c.machine_selection)
print("Operation Sequence:", c.operation_sequence)
print("Fitness/Makespan:", c.fitness)
print("==========================================")
resources = self.decode_chromosome(best_chromosome)
return resources
|
import os
from os.path import join
import json
def foo(dirname, fnames, outname):
data = []
for fname in fnames:
data.extend(json.load(open(join(dirname, fname))))
with open(join(dirname, outname), 'w') as f:
f.write(json.dumps(data, indent=2))
dirname = 'ull_both_5_100'
fnames = [
"gen_nucleus_test_1_0-125.json",
"gen_nucleus_test_1_125-315.json",
"gen_nucleus_test_1_315-500.json",
]
outname = "gen_nucleus_test_1_0-500.json"
foo(dirname, fnames, outname)
|
# ************
# File: vnnlib_parser.py
# Top contributors (to current version):
# Panagiotis Kouvaros (panagiotis.kouvaros@gmail.com)
# This file is part of the Venus project.
# Copyright: 2019-2021 by the authors listed in the AUTHORS file in the
# top-level directory.
# License: BSD 2-Clause (see the file LICENSE in the top-level directory).
# Description: Parses a VNNLIB specification.
# ************
import os
import math
import collections
import torch
import numpy as np
from sly import Lexer
from sly import Parser
from venus.specification.formula import Formula, StateCoordinate, VarConstConstraint, VarVarConstraint, ConjFormula, \
DisjFormula, NAryConjFormula, NAryDisjFormula, NegationFormula
from venus.specification.specification import Specification
from venus.network.node import Input
from venus.bounds.bounds import Bounds
class VNNLIBParser:
def __init__(self, pf, input_shape, config):
self.pf = pf
self.input_shape = input_shape
self.X_SZ = np.prod(input_shape)
self.config = config
def parse(self):
with open(self.pf, 'r') as f:
string = f.read()
lexer = VNNLexer()
parser = VNNParser(self.X_SZ, self.config)
i_b, o_f, i_cl = parser.parse(lexer.tokenize(string))
specs = []
if len(i_cl) == 0:
bounds = Bounds(
i_b[0].reshape(self.input_shape),
i_b[1].reshape(self.input_shape)
)
specs.append(
Specification(
Input(bounds, self.config),
NegationFormula(o_f).to_NNF(),
os.path.basename(self.pf)[1]
)
)
else:
for clause in i_cl:
if not o_f is None and not clause[1] is None:
f = ConjFormula(o_f, clause[1])
elif not o_f is None and clause[1] is None:
f = o_f
elif o_f is None and not clause[1] is None:
f = clause[1]
else:
raise Exception('No output constraints found')
specs.append(
Specification(
Input(clause[0][0], clause[0][1]),
NegationFormula(f).to_NNF(),
os.path.basename(self.pf)[1]
)
)
return specs
class VNNLexer(Lexer):
tokens = { LE, GE, ASSERT, AND, OR, INPUT, OUTPUT, NUM, NUM, LPAR, RPAR, UNDERSCR, CONST, REAL}
ignore = ' \t'
LE = r'<='
GE = r'>='
ASSERT = r'assert'
AND = r'and'
OR = r'or'
INPUT = r'X'
OUTPUT = r'Y'
LPAR = r'\('
RPAR = r'\)'
UNDERSCR = r'_'
CONST = 'declare-const'
REAL = r'Real'
# @_(r'[-+]?([0-9]*\.[0-9]+|[0-9]+)')
@_(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?')
def NUM(self, t):
t.value = float(t.value)
return t
@_(r';.*')
def COMMENT(self, t):
pass
# newline tracking
@_(r'\n+')
def newline(self, t):
self.lineno = t.value.count('\n')
class VNNParser(Parser):
tokens = VNNLexer.tokens
TermTuple = collections.namedtuple('term_tuple', ['type', 'index', 'sense', 'bound'])
def __init__(self, X_SZ, config):
self.env = { }
self.X_SZ = X_SZ
self.config = config
self.i_b = [
torch.ones(
self.X_SZ,
dtype=config.PRECISION,
device=config.DEVICE
) * -math.inf,
torch.ones(
self.X_SZ,
dtype=config.PRECISION,
device=config.DEVICE
) * math.inf
]
self.o_f = None
self.i_cl = []
@_('statement')
def statements(self, p):
return self.i_b, self.o_f, self.i_cl
@_('statement statements')
def statements(self, p):
return self.i_b, self.o_f, self.i_cl
@_('input_statement')
def statement(self, p):
pass
@_('output_statement')
def statement(self, p):
if self.o_f is None:
self.o_f = p.output_statement
else:
self.o_f = ConjFormula(self.o_f, p.output_statement)
@_('LPAR CONST input_id REAL RPAR')
def statement(self, p):
pass
@_('LPAR CONST output_id REAL RPAR')
def statement(self, p):
pass
@_('LPAR ASSERT input_term RPAR')
def input_statement(self, p):
pass
@_('LPAR ASSERT LPAR OR input_and_clauses RPAR RPAR')
def input_statement(self, p):
self.i_cl = p.input_and_clauses
@_('input_and_clause')
def input_and_clauses(self, p):
return [p.input_and_clause]
@_('input_and_clause input_and_clauses')
def input_and_clauses(self, p):
return p.input_and_clauses + [p.input_and_clause]
@_('LPAR AND iio_terms RPAR')
def input_and_clause(self, p):
i_b = [
torch.ones(
self.X_SZ,
dtype=self.config.PRECISION,
device=self.config.DEVICE
) * -math.inf,
torch.ones(
self.X_SZ,
dtype=self.config.PRECISION,
device=self.config.DEVICE
) * math.inf
]
o_f_terms = []
for term in p.iio_terms:
if term.type == 'input':
if term.sense == 'le':
i_b[1][term.index] = term.bound
elif term.sense == 'ge':
i_b[0][term.index] = term.bound
else:
raise Exception(f'Unexpected term sense {term.sense}')
elif term.type == 'output':
if term.sense == 'le':
constr = VarConstConstraint(term.index, Formula.Sense.LE, term.bound)
elif term.sense == 'ge':
constr = VarConstConstraint(term.index, Formula.Sense.GE, term.bound)
else:
raise Exception(f'Unexpected term sense {term.sense}')
o_f_terms.append(constr)
else:
raise Exception(f'Unexpected term type {term.type}')
o_f = None if len(o_f_terms) == 0 else NAryConjFormula(o_f_terms)
return (i_b, o_f)
@_('io_input_term io_terms')
def iio_terms(self, p):
return [p.io_input_term] + p.io_terms
@_('io_term')
def io_terms(self, p):
return [p.io_term]
@_('io_term io_terms')
def io_terms(self, p):
return [p.io_term] + p.io_terms
@_('io_input_term')
def io_term(self, p):
return p.io_input_term
@_('io_output_term')
def io_term(self, p):
return p.io_output_term
@_('LPAR LE input_id NUM RPAR')
def io_input_term(self, p):
return VNNParser.TermTuple('input', p.input_id, 'le', p.NUM)
@_('LPAR GE input_id NUM RPAR')
def io_input_term(self, p):
return VNNParser.TermTuple('input', p.input_id, 'ge', p.NUM)
@_('LPAR LE output_id NUM RPAR')
def io_output_term(self, p):
return VNNParser.TermTuple('output', p.output_id, 'le', p.NUM)
@_('LPAR GE output_id NUM RPAR')
def io_output_term(self, p):
return VNNParser.TermTuple('output', p.output_id, 'ge', p.NUM)
@_('LPAR LE output_id output_id RPAR')
def io_output_term(self, p):
return VNNParser.TermTuple('output', p.output_id0, 'le', p.output_id1)
@_('LPAR GE output_id output_id RPAR')
def io_output_term(self, p):
return VNNParser.TermTuple('output', p.output_id0, 'ge', p.output_id1)
@_('LPAR LE input_id NUM RPAR')
def input_term(self, p):
self.i_b[1][p.input_id] = p.NUM
@_('LPAR GE input_id NUM RPAR')
def input_term(self, p):
self.i_b[0][p.input_id] = p.NUM
@_('INPUT UNDERSCR NUM')
def input_id(self, p):
return int(p.NUM)
@_('LPAR ASSERT output_term RPAR')
def output_statement(self, p):
return p.output_term
@_('LPAR ASSERT output_logic_clause RPAR')
def output_statement(self, p):
return p.output_logic_clause
@_('output_and_clause')
def output_logic_clause(self, p):
return p.output_and_clause
@_('output_or_clause')
def output_logic_clause(self, p):
return p.output_or_clause
@_('output_logic_clause')
def output_logic_clauses(self, p):
return [p.output_logic_clause]
@_('output_logic_clause output_logic_clauses')
def output_logic_clauses(self, p):
return p.output_logic_clauses + [p.output_logic_clause]
@_('LPAR AND output_logic_clauses RPAR')
def output_and_clause(self, p):
if len(p.output_logic_clauses) == 1:
return p.output_logic_clauses[0]
elif len(p.output_logic_clauses) == 2:
return ConjFormula(p.output_logic_clauses[0], p.output_logic_clauses[1])
else:
return NAryConjFormula(p.output_logic_clauses)
@_('LPAR AND output_terms RPAR')
def output_and_clause(self, p):
if len(p.output_terms) == 1:
return p.output_terms[0]
elif len(p.output_terms) == 2:
return ConjFormula(p.output_terms[0], p.output_terms[1])
else:
return NAryConjFormula(p.output_terms)
@_('LPAR OR output_logic_clauses RPAR')
def output_or_clause(self, p):
if len(p.output_logic_clauses) == 1:
return p.output_logic_clauses[0]
elif len(p.output_logic_clauses) == 2:
return DisjFormula(p.output_logic_clauses[0], p.output_logic_clauses[1])
else:
return NAryDisjFormula(p.output_logic_clauses)
@_('LPAR OR output_terms RPAR')
def output_or_clause(self, p):
if len(p.output_terms) == 1:
return p.output_terms[0]
elif len(p.output_terms) == 2:
return DisjFormula(p.output_terms[0], p.output_terms[1])
else:
return NAryDisjFormula(p.output_terms)
@_('output_term output_terms')
def output_terms(self, p):
return p.output_terms + [p.output_term]
@_('output_term')
def output_terms(self, p):
return [p.output_term]
@_('LPAR LE output_id NUM RPAR')
def output_term(self, p):
return VarConstConstraint(p.output_id, Formula.Sense.LE, p.NUM)
@_('LPAR GE output_id NUM RPAR')
def output_term(self, p):
return VarConstConstraint(p.output_id, Formula.Sense.GE, p.NUM)
@_('LPAR LE output_id output_id RPAR')
def output_term(self, p):
return VarVarConstraint(p.output_id0, Formula.Sense.LE, p.output_id1)
@_('LPAR GE output_id output_id RPAR')
def output_term(self, p):
return VarVarConstraint(p.output_id0, Formula.Sense.GE, p.output_id1)
@_('OUTPUT UNDERSCR NUM')
def output_id(self, p):
return StateCoordinate(int(p.NUM))
|
# coding=utf-8
# noinspection PyUnresolvedReferences
import maya.cmds as cmds
#
from LxBasic import bscMethods
from LxPreset import prsConfigure
#
none = ''
#
def setOutProxy(fileString_, renderer, exportMode=0):
temporaryFile = bscMethods.OsFile.temporaryName(fileString_)
# Export
if renderer == prsConfigure.Utility.DEF_value_renderer_arnold:
setOutArnoldProxy(temporaryFile, exportMode)
elif renderer == prsConfigure.Utility.DEF_value_renderer_redshift:
setOutRedshiftProxy(temporaryFile, exportMode)
#
bscMethods.OsFile.copyTo(temporaryFile, fileString_)
#
def setOutArnoldProxy(fileString_, exportMode=0):
option = '-mask 255;-lightLinks 1;-shadowLinks 1;'
if exportMode == 0:
cmds.file(
fileString_,
force=1,
options=option,
type='ASS Export',
preserveReferences=0,
constructionHistory=1,
exportSelected=1
)
#
elif exportMode == 1:
cmds.file(
fileString_,
force=1,
options=option,
type='ASS Export',
preserveReferences=0,
constructionHistory=1,
exportAll=1
)
#
def setOutRedshiftProxy(fileString_, exportMode=0):
option = 'exportConnectivity=1;enableCompression=1;'
if exportMode == 0:
cmds.file(
fileString_,
force=1,
options=option,
type='Redshift Proxy',
preserveReferences=0,
constructionHistory=1,
exportSelected=1
)
#
elif exportMode == 1:
cmds.file(
fileString_,
force=1,
options=option,
type='Redshift Proxy',
preserveReferences=0,
constructionHistory=1,
exportAll=1
)
#
def getArnoldProxyFile(proxyNode):
attr = proxyNode + '.dso'
return cmds.getAttr(attr)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import six
from google.api_core.gapic_v2.dispatch import dispatch
@pytest.mark.skipif(six.PY2, reason='dispatch only works on Python 3.')
def test_dispatch():
class Foo(object):
@dispatch
def bar(self, number, letter):
return 'Brought by the letter {} and the number {}.'.format(
letter, number,
)
@bar.register(str)
def _bar_with_string(self, letter):
return self.bar(11, letter)
foo = Foo()
assert foo.bar(8, 'L') == 'Brought by the letter L and the number 8.'
assert foo.bar('Z') == 'Brought by the letter Z and the number 11.'
|
"""
Tidy Scenario: A robot uses its hand to tidy up the table, pushing the objects into a bin/cart etc.
EE = 'End Effector', which is the gripper / suction cup
"""
import torch
import random
import stillleben as sl
from sl_cutscenes.constants import SCENARIO_DEFAULTS
import sl_cutscenes.constants as CONSTANTS
from sl_cutscenes.scenarios.scenario import Scenario
class TidyScenario(Scenario):
def __init__(self, cfg, scene):
self.name = "Tidy"
self.config = SCENARIO_DEFAULTS["scenes"]["tidy"]
self.prep_time = 1.000 # during this time (in s), the scene will not be rendered
self.remaining_pause = 0.000 # pause time remaining for the gripper
self.allow_multiple_cameras = False
self.max_waypoint_deviation = 0.02 # in m
self.max_velocity = 0.5 # in m/s
self.acceleration = 1.0 # in m/s²
self.ee = None
self.robot_sim = None
super(TidyScenario, self).__init__(cfg, scene) # also calls reset_sim()
@property
def ee_pose(self):
return self.ee.pose()
@property
def ee_t(self):
return self.ee.pose()[:3, 3]
def can_render(self):
"""
:return: True if scene has been prepared and can be rendered, False otherwise.
"""
return self.sim_t > self.prep_time and self.ee is not None
def load_meshes_(self):
"""
SCENARIO-SPECIFIC
"""
self.mesh_loader.load_meshes(CONSTANTS.NO_POOL_TABLE)
self.mesh_loader.load_meshes(CONSTANTS.YCBV_OBJECTS)
self.mesh_loader.load_meshes(CONSTANTS.SUCTION_GRIPPER)
def setup_objects_(self):
"""
SCENARIO-SPECIFIC
"""
table_info_mesh, ycbv_info_meshes, self.ee_mesh = self.mesh_loader.get_meshes()
# place table
table_mod = {"mod_pose": CONSTANTS.TABLE_POSE}
self.table = self.add_object_to_scene(table_info_mesh, True, **table_mod)
self.table = self.update_object_height(cur_obj=self.table)
self.z_offset = self.table.pose()[2, -1]
# drop 10 random YCB-Video objects onto the table
for obj_info_mesh in random.choices(ycbv_info_meshes, k=3):
print(" >>> trying to add object")
mod_t = torch.tensor([
random.uniform(self.config["pos"]["x_min"], self.config["pos"]["x_max"]),
random.uniform(self.config["pos"]["y_min"], self.config["pos"]["y_max"]),
random.uniform(self.config["pos"]["z_min"], self.config["pos"]["z_max"])
])
obj_mod = {"mod_t": mod_t}
obj = self.add_object_to_scene(obj_info_mesh, False, **obj_mod)
obj = self.update_object_height(cur_obj=obj, objs=[self.table])
# removing last object if colliding with anything else
if self.is_there_collision():
print(" >>> object colliding!")
self.remove_obj_from_scene(obj)
def setup_robot_sim(self):
if not self.objects_loaded:
self.setup_objects()
# set up end effector (ee)
ee_pose = CONSTANTS.END_EFFECTOR_POSE
init_z = random.uniform(self.config["endeffector_pos"]["z_min"], self.config["endeffector_pos"]["z_max"])
ee_t = torch.tensor([
self.config["endeffector_pos"]["x"],
self.config["endeffector_pos"]["y_1"] if random.random() < 0.5
else self.config["endeffector_pos"]["y_2"],
init_z
])
ee_pose[:3, 3] = ee_t
ee_mod = {"mod_pose": ee_pose}
self.start_ee_pose_ = ee_pose
self.ee = self.add_object_to_scene(self.ee_mesh, is_static=False, **ee_mod)
self.ee = self.update_object_height(cur_obj=self.ee, objs=[self.table])
self.table_height = self.ee.pose()[2, 3] - init_z
self.ee_velocity = 0.0
# set up the waypoints the ee has to reach
self.waypoints = [
torch.tensor([
random.uniform(self.config["waypoint_pos"]["x_min"], self.config["waypoint_pos"]["x_max"]),
self.ee_t[1],
random.uniform(self.config["waypoint_pos"]["z_min"], self.config["waypoint_pos"]["z_max"])
+ self.table_height,
]),
torch.tensor([
random.uniform(self.config["waypoint_pos"]["x_min"], self.config["waypoint_pos"]["x_max"]),
self.ee_t[1] * -1,
random.uniform(self.config["waypoint_pos"]["z_min"], self.config["waypoint_pos"]["z_max"])
+ self.table_height,
]),
]
# set up the robot simulation
self.robot_sim = sl.ManipulationSim(self.scene, self.ee, self.start_ee_pose_)
self.robot_sim.set_spring_parameters(3000.0, 10.0, 100.0) # stiffness, damping, force_limit
def setup_cameras_(self):
"""
SCENARIO-SPECIFIC
"""
# TODO set camera to robot
self.cameras = [
self.update_camera_height(camera=cam, objs=[self.table]) for cam in self.cameras
]
def simulate(self):
self.sim_t += self.sim_dt
# add robot after preparation time to ensure that the objects are not falling anymore
if self.sim_t > self.prep_time and self.ee is None:
self.setup_robot_sim()
# if paused or gripper is not set up or no waypoints remaining -> simulate object physics without gripper
if self.ee is None or self.remaining_pause > 0 or len(self.waypoints) < 1:
self.sim_step_()
if self.remaining_pause > 0:
self.remaining_pause -= self.sim_dt
# if gripper is loaded and there is another unreached waypoint for it: move the robot
else:
cur_waypoint = self.waypoints[0]
pose_delta = cur_waypoint - self.start_ee_pose_[:3, 3]
pose_delta_norm = torch.linalg.norm(pose_delta)
pose_delta_normalized = pose_delta / pose_delta_norm
# reached current waypoint -> pop it and pause briefly
if pose_delta_norm < self.max_waypoint_deviation:
_ = self.waypoints.pop(0)
self.ee_velocity = 0.0
self.remaining_pause += 0.300 # pause for a bit
# else: adjust movement velocity according to distance to waypoint
else:
ideal_velocity = pose_delta_norm * 2.0
acceleration = self.acceleration * self.sim_dt
if self.ee_velocity < ideal_velocity:
self.ee_velocity = min(self.ee_velocity + acceleration, self.max_velocity)
elif self.ee_velocity >= ideal_velocity:
self.ee_velocity = max(self.ee_velocity - acceleration, 0.0)
# calculate new gripper pose with calculated delta vector and velocity
ee_pose = self.start_ee_pose_
ee_pose[:3, 3] += self.ee_velocity * self.sim_dt * pose_delta_normalized
self.robot_sim.step(ee_pose, self.sim_dt) # TODO move to sim_step()
self.start_ee_pose_ = ee_pose
|
from __future__ import print_function, division
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, inplanes, outplanes, name, nums=3,
kernel_size=3, padding=1, stride=1):
super(ConvBlock, self).__init__()
self.nums = nums
self.relu = nn.ReLU(True)
if isinstance(name, str):
self.name = name
else:
raise Exception("name should be str")
for i in range(self.nums):
self.add_module('conv' + self.name + "_" + str(i), nn.Conv2d(inplanes, outplanes, padding=padding, kernel_size=kernel_size, stride=stride))
self.add_module('conv' + self.name + "_" + str(i) + "_bn", nn.BatchNorm2d(outplanes))
inplanes = outplanes
self.initial()
def forward(self, x):
net = x
for i in range(self.nums):
net = self._modules['conv' + self.name + "_" + str(i)](net)
net = self._modules['conv' + self.name + "_" + str(i) + "_bn"](net)
net = self.relu(net)
return net
def initial(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
class Decoder(nn.Module):
def __init__(self, opt):
super(Decoder, self).__init__()
self.opt = opt
self.relu = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.deconv1_1_new = nn.ConvTranspose2d(512, 512, (4, 4), 1, 0)
self.deconv1_1_bn = nn.BatchNorm2d(512)
self.convblock1 = ConvBlock(512, 512, "1", nums=2)
self.convblock2 = ConvBlock(512, 512, "2", nums=3)
self.convblock3 = ConvBlock(512, 256, "3", nums=4)
self.convblock4 = ConvBlock(256 + 160, 256, "4", nums=4)
self.convblock5 = ConvBlock(256, 128, "5", nums=3)
self.convblock6 = ConvBlock(128, 64, "6", nums=2)
self.conv7_1 = nn.ConvTranspose2d(64, 32, 3, 1, 1)
self.conv7_1_bn = nn.BatchNorm2d(32)
self.conv7_2 = nn.ConvTranspose2d(32, 3, 3, 1, 1)
self.tanh = nn.Tanh()
def forward(self, id_feature, mouth_feature):
id_feature0 = id_feature[0].contiguous().view(-1, self.opt.feature_length, 1, 1)
mouth_feature = mouth_feature.contiguous().view(-1, self.opt.feature_length, 1, 1)
whole_feature = torch.cat((id_feature0, mouth_feature), dim=1)
net = self.deconv1_1_new(whole_feature)
net = self.relu(self.deconv1_1_bn(net))
for i in range(6):
if i == 3:
net = torch.cat((id_feature[i], net), 1)
net = self._modules['convblock' + str(i + 1)](net)
net = self.upsample(net)
net = self.conv7_1(net)
net = self.relu(self.conv7_1_bn(net))
net = self.conv7_2(net)
net = self.tanh(net)
net = (net + 1) / 2.0
return net
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import mxnet as mx
import numpy as np
import sklearn
from mxnet import ndarray as nd
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_image
def main(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
ctx_num = len(ctx)
path_imgrec = os.path.join(args.input, 'train.rec')
path_imgidx = os.path.join(args.input, 'train.idx')
imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
outf = open(os.path.join(args.input, 'c2c'), 'w')
s = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
assert header.flag > 0
print('header0 label', header.label)
header0 = (int(header.label[0]), int(header.label[1]))
# assert(header.flag==1)
imgidx = range(1, int(header.label[0]))
id2range = {}
seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in seq_identity:
s = imgrec.read_idx(identity)
header, _ = mx.recordio.unpack(s)
id2range[identity] = (int(header.label[0]), int(header.label[1]))
print('id2range', len(id2range))
prop = face_image.load_property(args.input)
image_size = prop.image_size
print('image_size', image_size)
vec = args.model.split(',')
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
model = mx.mod.Module.load(prefix, epoch, context=ctx)
model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
nrof_images = 0
nrof_removed = 0
idx = 1
id2label = {}
pp = 0
for _id, v in id2range.iteritems():
pp += 1
if pp % 100 == 0:
print('processing id', pp)
_list = range(*v)
ocontents = []
for i in range(len(_list)):
_idx = _list[i]
# print('_idx', _id, _idx)
s = imgrec.read_idx(_idx)
ocontents.append(s)
# continue
embeddings = None
headers = [None] * len(ocontents)
# print(len(ocontents))
ba = 0
while True:
bb = min(ba + args.batch_size, len(ocontents))
if ba >= bb:
break
_batch_size = bb - ba
_batch_size2 = max(_batch_size, ctx_num)
data = nd.zeros((_batch_size2, 3, image_size[0], image_size[1]))
label = nd.zeros((_batch_size2,))
count = bb - ba
ii = 0
for i in range(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
headers[i] = header
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label[0]
ii += 1
while ii < _batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii += 1
db = mx.io.DataBatch(data=(data,), label=(label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros((len(ocontents), net_out.shape[1]))
embeddings[ba:bb, :] = net_out[0:_batch_size, :]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
emb_mean = np.mean(embeddings, axis=0, keepdims=True)
emb_mean = sklearn.preprocessing.normalize(emb_mean)
sim = np.dot(embeddings, emb_mean.T)
# print(sim.shape)
sims = sim.flatten()
assert len(_list) == len(sims)
assert len(_list) == len(ocontents)
for i in range(len(ocontents)):
_sim = sims[i]
_idx = _list[i]
_header = headers[i]
# TODO
outf.write("%d,%f,%d\n" % (_idx, _sim, int(_header.label[1])))
outf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
# general
parser.add_argument('--input', default='', type=str, help='')
parser.add_argument('--model', default='../model/softmax,50', help='path to load model.')
parser.add_argument('--batch-size', default=32, type=int, help='')
args = parser.parse_args()
main(args)
|
import click
import sys
def init_subroutine():
global subroutine_stack
subroutine_stack = []
def push_subroutine(cmd_name: str):
global subroutine_stack
subroutine_stack.append(cmd_name)
def pop_subroutine():
global subroutine_stack
last = subroutine_stack[-1]
subroutine_stack = subroutine_stack[:-1]
return last
def get_subroutine():
return subroutine_stack[-1]
def _clog(message: str):
click.echo(click.style(f"[{get_subroutine()}]", bg='magenta', fg='white'), nl=False)
click.echo(message)
def clog(*messages):
val = ''
for message in messages:
val = val + ' ' + str(message)
_clog(val)
def cerr(message: str):
click.echo(click.style(f"[{get_subroutine()}]", bg='magenta', fg='white'), nl=False)
click.echo(click.style(f" {message}", fg = 'bright_red'))
def csuccess(message: str):
click.echo(click.style(f"[{get_subroutine()}]", bg='magenta', fg='white'), nl=False)
click.echo(click.style(f" {message}", fg = 'green'))
def cexit(message: str):
cerr(f'{message}, exiting program.')
sys.exit(1)
|
import pytest
from arc import ARC
@pytest.fixture(scope="module")
def decomposition_samples() -> ARC:
return ARC(idxs={8, 10, 16, 17, 30})
# NOTE: This test is a little ambiguous, as the red object
# might reasonably be a rectangle missing 2 points, or a line trace.
def test_8(decomposition_samples: ARC):
board = decomposition_samples.tasks[8].cases[0].input
board.decompose()
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == [
"Cluster(2x4)@(2, 0, 2)",
"Rect(14x9)@(0, 0, 0)",
"Rect(2x2)@(10, 3, 8)",
]
def test_10(decomposition_samples: ARC):
board = decomposition_samples.tasks[10].cases[0].input
board.decompose()
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == [
"Line(3x1)@(6, 7, 5)",
"Line(6x1)@(3, 3, 5)",
"Line(8x1)@(1, 1, 5)",
"Line(9x1)@(0, 5, 5)",
"Rect(9x9)@(0, 0, 0)",
]
def test_16(decomposition_samples: ARC):
board = decomposition_samples.tasks[16].cases[0].input
board.decompose()
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == ["Cell(1x3)@(0, 0, 10)"]
grandchild_names = sorted([kid.id for kid in board.rep.children[0]])
assert grandchild_names == [
"Dot@(0, 0, 3)",
"Dot@(0, 1, 1)",
"Dot@(0, 2, 2)",
]
hier_repr = str(board).replace(" ", "").split("\n")
assert hier_repr == [
"[Tile]Pattern(3x3)@(0,0,10)(1ch,9pts,15p)",
"Generating(V:2)",
"[Cell]Cell(1x3)@(0,0,10)(3ch,3pts,10p)",
"Dot@(0,0,3)",
"Dot@(0,1,1)",
"Dot@(0,2,2)",
]
# Repeating call with an empty processing queue should do nothing
board.decompose()
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == ["Cell(1x3)@(0, 0, 10)"]
grandchild_names = sorted([kid.id for kid in board.rep.children[0]])
assert grandchild_names == [
"Dot@(0, 0, 3)",
"Dot@(0, 1, 1)",
"Dot@(0, 2, 2)",
]
# Initializing and allowing no processes should leave the board in raw state
board.decompose(characteristic="", init=True)
assert board.current == ""
assert board.proc_q == [""]
# Only allowing Processes.Background gives a different result
board.decompose(characteristic="B", init=True)
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == [
"Line(3x1)@(0, 1, 1)",
"Rect(3x2)@(0, 1, 2)",
"Rect(3x3)@(0, 0, 3)",
]
def test_17(decomposition_samples: ARC):
board = decomposition_samples.tasks[17].cases[0].input
board.decompose(max_iter=3)
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == [
"Cluster(15x17)@(4, 3, 0)",
"Pattern(21x21)@(0, 0, 10)",
]
def test_30(decomposition_samples: ARC):
board = decomposition_samples.tasks[30].cases[0].input
board.decompose()
child_names = sorted([kid.id for kid in board.rep.children])
assert child_names == [
"Rect(2x2)@(0, 1, 2)",
"Rect(2x2)@(1, 7, 1)",
"Rect(2x2)@(2, 4, 4)",
"Rect(5x10)@(0, 0, 0)",
]
|
from airflow.contrib.kubernetes.kubernetes_request_factory.pod_request_factory import (
SimplePodRequestFactory as AirflowSimplePodRequestFactory,
)
from airflow.contrib.kubernetes.secret import Secret
class DbndPodRequestFactory(AirflowSimplePodRequestFactory):
def create(self, pod):
req = super(DbndPodRequestFactory, self).create(pod=pod)
self.extact_extended_resources(pod, req)
return req
def extact_extended_resources(self, pod, req):
# type: (Pod, Dict) -> None
r = pod.resources
if not r and not r.requests and not r.limits:
return
req["spec"]["containers"][0].setdefault("resources", {})
resources = req["spec"]["containers"][0]["resources"]
if r.requests:
resources.setdefault("requests", {})
resources["requests"].update(**r.requests)
if r.limits:
resources.setdefault("limits", {})
resources["limits"].update(**r.limits)
def extract_node_affinity(self, pod, req):
if not hasattr(pod, "node_affinity"):
return
nodeAffinity = req["spec"].setdefault("nodeSelector", {})
nodeAffinity.update(pod.node_affinity)
def extract_volume_secrets(self, pod, req):
vol_secrets = [s for s in pod.secrets if s.deploy_type == "volume"]
if any(vol_secrets):
req["spec"]["containers"][0]["volumeMounts"] = req["spec"]["containers"][
0
].get("volumeMounts", [])
req["spec"]["volumes"] = req["spec"].get("volumes", [])
for idx, vol in enumerate(vol_secrets): # type: Secret
vol_id = "secretvol" + str(idx)
volumeMount = {
"mountPath": vol.deploy_target,
"name": vol_id,
"readOnly": True,
}
if vol.key:
volumeMount["subPath"] = vol.key
req["spec"]["containers"][0]["volumeMounts"].append(volumeMount)
req["spec"]["volumes"].append(
{"name": vol_id, "secret": {"secretName": vol.secret}}
)
|
import logging
import os
import re
from pathlib import Path
from typing import Dict
import anndata as ad
import pandas as pd
from sqlalchemy.orm import Session
from histocat.core.acquisition import service as acquisition_service
from histocat.core.dataset import service as dataset_service
from histocat.core.dataset.dto import DatasetCreateDto, DatasetUpdateDto
from histocat.core.dataset.models import CELL_FILENAME, DatasetModel
from histocat.core.notifier import Message
from histocat.core.project import service as project_service
from histocat.core.redis_manager import UPDATES_CHANNEL_NAME, redis_manager
from histocat.core.slide import service as slide_service
from histocat.worker.io.utils import copy_file
logger = logging.getLogger(__name__)
PANEL_CSV_FILE = "panel.csv"
def _report_error(project_id: int, message: str):
"""Log error message and send it to the client via websocket"""
logger.warning(message)
redis_manager.publish(UPDATES_CHANNEL_NAME, Message(project_id, "error", message))
def import_dataset(
db: Session,
input_folder: Path,
project_id: int,
masks_folder: str,
regionprops_folder: str,
intensities_folder: str,
):
"""Import dataset from the folder compatible with 'steinbock' format."""
# Validate data
project = project_service.get(db, id=project_id)
if not project:
_report_error(project_id, f"Dataset Import Error: project [id: {project_id}] does not exist")
return
# Find data source folder where panel file resides
src_folder = None
for path in input_folder.rglob(PANEL_CSV_FILE):
src_folder = path.parent
break
if src_folder is None:
_report_error(project_id, f"Dataset Import Error: panel file is missing")
return
mask_files = sorted(Path(src_folder / masks_folder).rglob("*.tiff"))
if len(mask_files) == 0:
_report_error(project_id, f"Dataset Import Error: mask files are missing in folder '{masks_folder}'")
return
regionprops_files = sorted(Path(src_folder / regionprops_folder).rglob("*.csv"))
if len(regionprops_files) == 0:
_report_error(
project_id, f"Dataset Import Error: regionprops files are missing in folder '{regionprops_folder}'"
)
return
intensities_files = sorted(Path(src_folder / intensities_folder).rglob("*.csv"))
if len(intensities_files) == 0:
_report_error(
project_id, f"Dataset Import Error: intensities files are missing in folder '{intensities_folder}'"
)
return
# Postpone dataset db entry creation until input data validated
create_params = DatasetCreateDto(project_id=project_id, origin="steinbock", status="pending")
dataset = dataset_service.create(db, params=create_params)
dst_folder = Path(dataset.location)
os.makedirs(dst_folder, exist_ok=True)
# Import panel data
panel_df = _import_panel(os.path.join(src_folder, PANEL_CSV_FILE))
# Metadata dictionary
meta = {}
masks = {}
acquisition_id_mapping = {}
for mask_file in mask_files:
result = _import_mask(db, mask_file, dataset)
if result is not None:
mask_meta, slide_name, acquisition_origin_id = result
acquisition_id = mask_meta.get("acquisition").get("id")
image_number = mask_meta.get("acquisition").get("origin_id")
masks[acquisition_id] = mask_meta
acquisition_id_mapping[f"{slide_name}_{image_number}"] = acquisition_id
meta["masks"] = masks
regionprops_df = pd.DataFrame()
for regionprops_file in regionprops_files:
slide_name, acquisition_origin_id, df = _import_regionprops(regionprops_file, acquisition_id_mapping)
regionprops_df = regionprops_df.append(df)
regionprops_df.reset_index(inplace=True, drop=True)
regionprops_df["CellId"] = regionprops_df.index
intensities_df = pd.DataFrame()
for intensities_file in intensities_files:
slide_name, acquisition_origin_id, df = _import_intensities(intensities_file, acquisition_id_mapping)
intensities_df = intensities_df.append(df)
intensities_df.reset_index(inplace=True, drop=True)
intensities_df["CellId"] = regionprops_df.index
var_names = []
x_df = pd.DataFrame()
for index, row in panel_df.iterrows():
channel = row["channel"]
name = row["name"]
# TODO: check intensity
x_df[channel] = intensities_df[name]
var_names.append(channel)
var = pd.DataFrame(index=var_names)
var["Channel"] = var.index
X_counts = x_df.to_numpy()
adata = ad.AnnData(X_counts, obs=regionprops_df, var=var, dtype="float32")
dst_uri = dst_folder / CELL_FILENAME
adata.write_h5ad(dst_uri)
acquisition_ids = sorted(list(masks.keys()))
update_params = DatasetUpdateDto(
name=f"Dataset {dataset.id}", status="ready", acquisition_ids=acquisition_ids, channels=var_names, meta=meta
)
dataset = dataset_service.update(db, item=dataset, params=update_params)
redis_manager.publish(UPDATES_CHANNEL_NAME, Message(project_id, "dataset_imported"))
def _import_panel(path: str):
panel_df = pd.read_csv(path)
return panel_df
def _import_mask(db: Session, filepath: Path, dataset: DatasetModel):
p = re.compile("(?P<Name>.*)_(?P<AcquisitionID>[0-9]+).tiff")
slide_name, acquisition_origin_id = p.findall(filepath.name)[0]
slide = slide_service.get_by_name(db, project_id=dataset.project_id, name=slide_name)
if slide is None:
return None
acquisition = acquisition_service.get_by_origin_id(db, slide_id=slide.id, origin_id=acquisition_origin_id)
location = copy_file(str(filepath), dataset.location)
meta = {
"location": location,
"slide": {"id": slide.id, "origin_id": slide.origin_id},
"acquisition": {"id": acquisition.id, "origin_id": acquisition.origin_id},
}
return meta, slide_name, acquisition_origin_id
def _import_regionprops(filepath: Path, acquisition_id_mapping: Dict[str, int]):
p = re.compile("(?P<Name>.*)_(?P<AcquisitionID>[0-9]+).csv")
slide_name, acquisition_origin_id = p.findall(filepath.name)[0]
df = pd.read_csv(filepath)
df.rename(columns={"Object": "ObjectNumber", "centroid-0": "CentroidY", "centroid-1": "CentroidX"}, inplace=True)
df["ImageNumber"] = acquisition_origin_id
df["AcquisitionId"] = acquisition_id_mapping.get(f"{slide_name}_{acquisition_origin_id}")
return (
slide_name,
acquisition_origin_id,
df[["ObjectNumber", "ImageNumber", "AcquisitionId", "CentroidX", "CentroidY"]],
)
def _import_intensities(filepath: Path, acquisition_id_mapping: Dict[str, int]):
p = re.compile("(?P<Name>.*)_(?P<AcquisitionID>[0-9]+).csv")
slide_name, acquisition_origin_id = p.findall(filepath.name)[0]
df = pd.read_csv(filepath)
df.rename(columns={"Object": "ObjectNumber"}, inplace=True)
df["ImageNumber"] = acquisition_origin_id
df["AcquisitionId"] = acquisition_id_mapping.get(f"{slide_name}_{acquisition_origin_id}")
return slide_name, acquisition_origin_id, df
|
from transitions import Machine
#from transitions.extensions import GraphMachine as Machine
from utils import send_text_message
from utils import send_image_url
from utils import send_button_message
from data import data
class TocMachine(Machine):
def __init__(self, **machine_configs):
self.machine = Machine(
model=self,
**machine_configs
)
def user_sent_something(self, event):
return True
def is_travelling_for_adventure(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'adventure' or text.lower() == '1'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'adventure' or text.lower() == '1'
return False
def is_travelling_for_historical(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'historical buildings' or text.lower() == '2'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'historical buildings' or text.lower() == '2'
return False
def is_travelling_for_naturals(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'naturals' or text.lower() == '3'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'naturals' or text.lower() == '3'
return False
def is_travelling_for_local_food(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'local food' or text.lower() == '4'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'local food' or text.lower() == '4'
return False
def on_enter_purpose(self, event):
print("I'm entering purpose")
sender_id = event['sender']['id']
responese = send_text_message(sender_id, data['text'])
# response = send_image_url(sender_id, "./fsm.png", 'png')
# send_button_message(sender_id, "select one", data['buttons'])
# def on_exit_purpose(self):
# print('Leaving purpose')
def on_enter_adventure(self, event):
print("I'm entering adventure")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Adventure']['text_to_send'])
send_button_message(sender_id, data['Adventure']['text_button'], data['Adventure']['buttons'])
# def on_exit_adventure(self):
# print('Leaving adventure')
def on_enter_historical(self, event):
print("I'm entering historical")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Historical Buildings']['text_to_send'])
send_button_message(sender_id, data['Historical Buildings']['text_button'], data['Historical Buildings']['buttons'])
# def on_exit_historical(self):
# print('Leaving historical')
def on_enter_naturals(self, event):
print("I'm entering naturals")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Naturals']['text_to_send'])
send_button_message(sender_id, data['Naturals']['text_button'], data['Naturals']['buttons'])
# def on_exit_naturals(self):
# print('Leaving naturals')
def on_enter_local_food(self, event):
print("I'm entering local food")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Local Food']['text_to_send'])
send_button_message(sender_id, data['Local Food']['text_button'], data['Local Food']['buttons'])
# def on_exit_local_food(self):
# print('Leaving local food')
#########################################################################
def is_adventure_to_raja_ampat(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'raja ampat islands, west papua'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'raja ampat islands, west papua'
return False
def is_adventure_to_gili_island(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'gili islands, lombok'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'gili islands, lombok'
return False
def is_adventure_to_mount_bromo(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'mount bromo, east java'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'mount bromo, east java'
return False
def on_enter_raja_ampat(self, event):
print("I'm entering raja ampat")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Adventure']['Raja Ampat Islands, West Papua']['text'])
send_image_url(sender_id, data['Adventure']['Raja Ampat Islands, West Papua']['image'], data['Adventure']['Raja Ampat Islands, West Papua']['image_type'])
send_button_message(sender_id, "quick options:", data['Adventure']['Raja Ampat Islands, West Papua']['buttons'])
def on_enter_gili_island(self, event):
print("I'm entering gili island")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Adventure']['Gili Islands, Lombok']['text'])
send_image_url(sender_id, data['Adventure']['Gili Islands, Lombok']['image'], data['Adventure']['Gili Islands, Lombok']['image_type'])
send_button_message(sender_id, "quick options:", data['Adventure']['Gili Islands, Lombok']['buttons'])
def on_enter_mount_bromo(self, event):
print("I'm entering mount_bromo")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Adventure']['Mount Bromo, East Java']['text'])
send_image_url(sender_id, data['Adventure']['Mount Bromo, East Java']['image'], data['Adventure']['Mount Bromo, East Java']['image_type'])
send_button_message(sender_id, "quick options:", data['Adventure']['Mount Bromo, East Java']['buttons'])
#########################################################################
def is_historical_to_borobudur(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'borobudur temple, magelang'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'borobudur temple, magelang'
return False
def is_historical_to_prambanan(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'prambanan temple, yogyakarta'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'prambanan temple, yogyakarta'
return False
def is_historical_to_old_town(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'kota tua (old town), jakarta'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'kota tua (old town), jakarta'
return False
def on_enter_borobudur(self, event):
print("I'm entering borobudur")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Historical Buildings']['Borobudur Temple, Magelang']['text'])
send_image_url(sender_id, data['Historical Buildings']['Borobudur Temple, Magelang']['image'], data['Historical Buildings']['Borobudur Temple, Magelang']['image_type'])
send_button_message(sender_id, "quick options:", data['Historical Buildings']['Borobudur Temple, Magelang']['buttons'])
def on_enter_prambanan(self, event):
print("I'm entering prambanan")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Historical Buildings']['Prambanan Temple, Yogyakarta']['text'])
send_image_url(sender_id, data['Historical Buildings']['Prambanan Temple, Yogyakarta']['image'], data['Historical Buildings']['Prambanan Temple, Yogyakarta']['image_type'])
send_button_message(sender_id, "quick options:", data['Historical Buildings']['Prambanan Temple, Yogyakarta']['buttons'])
def on_enter_old_town(self, event):
print("I'm entering old town")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Historical Buildings']['Kota Tua (Old Town), Jakarta']['text'])
send_image_url(sender_id, data['Historical Buildings']['Kota Tua (Old Town), Jakarta']['image'], data['Historical Buildings']['Kota Tua (Old Town), Jakarta']['image_type'])
send_button_message(sender_id, "quick options:", data['Historical Buildings']['Kota Tua (Old Town), Jakarta']['buttons'])
#########################################################################
def is_naturals_in_bali(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'bali, the island of gods'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'bali, the island of gods'
return False
def is_naturals_in_lake_toba(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'lake toba, north sumatra'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'lake toba, north sumatra'
return False
def is_naturals_in_dieng_plateau(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'dieng plateau'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'dieng plateau'
return False
def on_enter_bali(self, event):
print("I'm entering bali")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Naturals']['Bali, the Island of Gods']['text'])
send_image_url(sender_id, data['Naturals']['Bali, the Island of Gods']['image'], data['Naturals']['Bali, the Island of Gods']['image_type'])
send_button_message(sender_id, "quick options:", data['Naturals']['Bali, the Island of Gods']['buttons'])
def on_enter_lake_toba(self, event):
print("I'm entering lake toba")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Naturals']['Lake Toba, North Sumatra']['text'])
send_image_url(sender_id, data['Naturals']['Lake Toba, North Sumatra']['image'], data['Naturals']['Lake Toba, North Sumatra']['image_type'])
send_button_message(sender_id, "quick options:", data['Naturals']['Lake Toba, North Sumatra']['buttons'])
def on_enter_dieng_plateau(self, event):
print("I'm entering dieng plateau")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Naturals']['Dieng Plateau']['text'])
send_image_url(sender_id, data['Naturals']['Dieng Plateau']['image'], data['Naturals']['Dieng Plateau']['image_type'])
send_button_message(sender_id, "quick options:", data['Naturals']['Dieng Plateau']['buttons'])
#########################################################################
def is_local_food_satay(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'satay'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'satay'
return False
def is_local_food_rendang(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'rendang'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'rendang'
return False
def is_local_food_fried_rice(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'fried rice'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'fried rice'
return False
def on_enter_satay(self, event):
print("I'm entering satay")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Local Food']['Satay']['text'])
send_image_url(sender_id, data['Local Food']['Satay']['image'], data['Local Food']['Satay']['image_type'])
send_button_message(sender_id, "quick options:", data['Local Food']['Satay']['buttons'])
def on_enter_rendang(self, event):
print("I'm entering rendang")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Local Food']['Rendang']['text'])
send_image_url(sender_id, data['Local Food']['Rendang']['image'], data['Local Food']['Rendang']['image_type'])
send_button_message(sender_id, "quick options:", data['Local Food']['Rendang']['buttons'])
def on_enter_fried_rice(self, event):
print("I'm entering fried rice")
sender_id = event['sender']['id']
send_text_message(sender_id, data['Local Food']['Fried Rice']['text'])
send_image_url(sender_id, data['Local Food']['Fried Rice']['image'], data['Local Food']['Fried Rice']['image_type'])
send_button_message(sender_id, "quick options:", data['Local Food']['Fried Rice']['buttons'])
############################################################
def is_next(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'see next recommended place'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'see next recommended place'
return False
def is_select_another_place(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'select another place'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'select another place'
return False
def is_change_travel_purpose(self, event):
if event.get("message"):
text = event['message']['text']
return text.lower() == 'change my travel purpose'
elif (event.get("postback")):
text = event['postback']['payload']
return text.lower() == 'change my travel purpose'
return False
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_gtsabbreviation import (
v3GTSAbbreviation as v3GTSAbbreviation_,
)
__all__ = ["v3GTSAbbreviation"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3GTSAbbreviation(v3GTSAbbreviation_):
"""
v3 Code System GTSAbbreviation
Open Issue: It appears that the printnames are suboptimal and should
be improved for many of the existing codes.
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-GTSAbbreviation
"""
class Meta:
resource = _resource
|
import unittest
import pinocchio as pin
pin.switchToNumpyMatrix()
import numpy as np
@unittest.skipUnless(pin.WITH_HPP_FCL,"Needs HPP-FCL")
class TestGeometryObjectBindings(unittest.TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoid()
self.collision_model = pin.buildSampleGeometryModelHumanoid(self.model)
def test_name_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.name == 'rleg_shoulder_object')
col.name = 'new_collision_name'
self.assertTrue(col.name == 'new_collision_name')
def test_parent_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.parentJoint == 2)
col.parentJoint = 3
self.assertTrue(col.parentJoint == 3)
def test_placement_get_set(self):
m = pin.SE3.Identity()
new_m = pin.SE3.Random()
col = self.collision_model.geometryObjects[0]
self.assertTrue(np.allclose(col.placement.homogeneous,m.homogeneous))
col.placement = new_m
self.assertTrue(np.allclose(col.placement.homogeneous , new_m.homogeneous))
def test_meshpath_get(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.meshPath is not None)
def test_scale(self):
scale = np.array([1.,2.,3.])
pin.setGeometryMeshScales(self.collision_model,scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue(obj.meshScale[0] == scale[0])
self.assertTrue(obj.meshScale[1] == scale[1])
self.assertTrue(obj.meshScale[2] == scale[2])
def test_scalar_scale(self):
scale = 2.
vec = np.array([scale]*3)
pin.setGeometryMeshScales(self.collision_model,scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue(np.allclose(obj.meshScale, vec))
def test_create_data(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
def test_create_datas(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
data_2, collision_data_2 = pin.createDatas(self.model, self.collision_model)
self.assertTrue(self.model.check(data_2))
self.assertEqual(len(collision_data_2.oMg), self.collision_model.ngeoms)
def test_copy(self):
collision_model_copy = self.collision_model.copy()
self.assertEqual(self.collision_model.ngeoms,collision_model_copy.ngeoms)
collision_data = self.collision_model.createData()
collision_data_copy = collision_data.copy()
self.assertEqual(len(collision_data.oMg),len(collision_data_copy.oMg))
if __name__ == '__main__':
unittest.main()
|
from helper import *
import math
from queue import PriorityQueue
#from path_finding import *
class Node():
def __init__(self, x, y):
self.coords = (x, y)
self.estimate = 0
self.cost = 0
def __gt__(self, other):
return self.estimate > other.estimate
def __eq__(self, other):
return self.estimate == other.estimate
def manhattan(coord1, coord2):
return abs(coord1[0]-coord2[0]) + abs(coord1[1]-coord2[1])
def normalize_tiles(gameMap, playerPos, housePos):
tiles = gameMap.tiles
max_x = 0
max_y = 0
for row in tiles:
for tile in row:
if tile.Position.x > max_x:
max_x = tile.Position.x
if tile.Position.y > max_y:
max_y = tile.Position.y
overflow_left = 0
overflow_up = 0
overflow_right = 0
overflow_down = 0
if playerPos.x - 10 < 0:
overflow_left = abs(playerPos.x-10)
if playerPos.y - 10 < 0:
overflow_up = abs(playerPos.y-10)
if playerPos.x + 10 > max_x:
overflow_right = (playerPos.x+10) - max_x
if playerPos.y + 10 > max_y:
overflow_down = (playerPos.y+10) - max_y
result = [[' ' for _ in range(255)] for _ in range(255)]
for row in tiles:
for tile in row:
coord = tile.Position
#if coord.x > max_x - overflow_left:
# coord.x = 254 - (max_x - coord.x)
#if coord.y > max_y - overflow_up:
# coord.y = 254 - (max_y - coord.y)
#if coord.y < overflow_down:
# coord.y = max_y + coord.y + 1
#if coord.x < overflow_right:
# coord.x = max_x + coord.x + 1
if tile.TileContent == TileContent.Empty:
result[coord.y][coord.x] = ' '
if tile.TileContent == TileContent.Resource:
result[coord.y][coord.x] = 'R'
if tile.TileContent == TileContent.House:
if housePos.x == coord.x and housePos.y == coord.y:
result[coord.y][coord.x] = ' '
else:
result[coord.y][coord.x] = '#'
if tile.TileContent == TileContent.Wall:
result[coord.y][coord.x] = 'T'
if tile.TileContent == TileContent.Lava:
result[coord.y][coord.x] = '#'
if tile.TileContent == TileContent.Player:
result[coord.y][coord.x] = 'P'
if tile.TileContent == TileContent.Shop:
result[coord.y][coord.x] = 'S'
return result
def print_view(maze):
output = ""
for row in maze:
for col in row:
output += col
output += '\n'
print(output)
def reconstruct_path(maze, paths, node):
result = []
while paths[node] != None:
prev = paths[node]
move = (node[0]-prev[0], node[1]-prev[1])
result.insert(0, move)
node = prev
return result
def solve_path(maze, start, goal):
#print_view(maze)
costs = {start.coords: 0}
paths = {start.coords: None}
queue = PriorityQueue()
queue.put(start)
print(start.coords, goal.coords)
while not queue.empty():
current_node = queue.get()
#print(current_node.coords)
#print(current_node.coords)
if current_node.coords == goal.coords:
return reconstruct_path(maze, paths, current_node.coords)
neighbors = get_neighbors(maze, current_node, goal)
for neighbor in neighbors:
neighbor.cost = current_node.cost + cost(maze, current_node, neighbor)
neighbor.estimate = neighbor.cost + cost_estimate(maze, neighbor, goal)
if neighbor.coords not in costs or costs[neighbor.coords] > neighbor.cost:
paths[neighbor.coords] = current_node.coords
queue.put(neighbor)
costs[neighbor.coords] = neighbor.cost
return None
def cost(maze, node, neighbor):
return 1
def cost_estimate(maze, node, goal):
return manhattan(node.coords, goal.coords)
def get_neighbors(maze, node, goal):
result = []
left = (node.coords[0]-1, node.coords[1])
up = (node.coords[0], node.coords[1]-1)
right = (node.coords[0]+1, node.coords[1])
down = (node.coords[0], node.coords[1]+1)
Positions = [up, right, down, left]
for pos in Positions:
#if pos[0] < 0:
# pos[0] = 254
#if pos[1] < 0:
# pos[1] = 254
#if pos[0] >= 255:
# pos[0] = 0
#if pos[1] >= 255:
# pos[1] = 0
if pos == goal.coords or maze[pos[1]][pos[0]] == ' ' or maze[pos[1]][pos[0]] == 'T':
result.append(Node(pos[0], pos[1]))
return result
RESSOURCE_BY_BLOC = 1000 # guess
RESSOURCE_BY_PLAYER = 10000
class Bot:
def __init__(self):
self.peace = 0
self.prev_score = 0
self.count = 1
pass
def sortClosest(self, tiles, type):
ressources = []
for row_index, row in enumerate(tiles):
for col_index, tile in enumerate(row):
#print(tile.TileContent)
if tile.TileContent == type:
ressources.append(Node(tile.Position.x, tile.Position.y))
nodeOwnPos = Node(self.PlayerInfo.Position.x, self.PlayerInfo.Position.y)
ressources.sort(key=lambda x: manhattan(nodeOwnPos.coords, x.coords))
return ressources
def evaluateRessource(self):
return -math.inf
closestRessource = self.sortClosest(self.gameMap.tiles, 4)
for ressource in closestRessource:
pos = Node(self.PlayerInfo.Position.x, self.PlayerInfo.Position.y)
path = solve_path(self.gameMap._tiles, pos, ressource)
if path != None:
self.ressourcePath = path
break
if len(closestRessource) == 0:
return -math.inf
return RESSOURCE_BY_BLOC / len(path)
def evaluatekill(self):
closestplayer = self.sortClosest(self.gameMap.tiles, TileContent.Player)[1:]
for player in closestplayer:
pos = Node(self.PlayerInfo.Position.x, self.PlayerInfo.Position.y)
path = solve_path(self.gameMap._tiles, pos, player)
if path != None:
self.killingPath = path
break
if len(closestplayer) == 0:
print("wtf")
return -math.inf
print("test", len(path), RESSOURCE_BY_PLAYER / len(path))
return RESSOURCE_BY_PLAYER / len(path)
def evaluateUpgrade(self):
return -math.inf, UpgradeType.AttackPower
essentialItems = ["Sword", "Shield", "Backpack"]
totalRessources = self.PlayerInfo.totalRessources
level = self.PlayerInfo.getUpgradeLevel(self, self.PlayerInfo.CarryingCapacity)
priority = -math.inf, None
if(level <= 3):
if level == 1 and totalRessources >= 10000:
priority = math.inf, UpgradeType.CarryingCapacity
if level == 2 and totalRessources >= 15000:
priority = math.inf, UpgradeType.CarryingCapacity
if level == 3 and totalRessources >= 25000:
priority = math.inf, UpgradeType.CarryingCapacity
else:
if all(i in essentialItems for i in self.playerInfo.carriedItems):
if level == 4 and totalRessources >= 50000:
priority = math.inf, UpgradeType.CarryingCapacity
return priority
def evaluatePurchase(self):
return 0
def go_home(self, gameMap):
return
#tiles = normalize_tiles(gameMap, Point(self.ownPos.coords[0], self.ownPos.coords[1]))
pos = self.PlayerInfo.Position
pos = Node(pos.x, pos.y)
house = self.PlayerInfo.HouseLocation
house = Node(house.x, house.y)
path = solve_path(tiles, pos, house)
if path != None:
point = Point(path[0][0], path[0][1])
return create_move_action(point)
else:
print("something bad happened")
return create_move_action(Point(-1, 0))
def before_turn(self, playerInfo):
"""
Gets called before ExecuteTurn. This is where you get your bot's state.
:param playerInfo: Your bot's current state.
"""
self.PlayerInfo = playerInfo
def execute_turn(self, gameMap, visiblePlayers):
"""
This is where you decide what action to take.
:param gameMap: The gamemap.
:param visiblePlayers: The list of visible players.
"""
self.peace -= 1
if self.prev_score < self.PlayerInfo.Score:
self.count += 1
self.prev_score = self.PlayerInfo.Score
if self.count % 3 == 0:
self.peace = 25
self.count = 2
try:
pass
#prev_score = int(StorageHelper.read("points"))
#if prev_score < self.playerInfo.Score:
# StorageHelper.write("peace", 10)
except:
pass
#StorageHelper.write("points", self.PlayerInfo.Score)
self.ownPos = Node(self.PlayerInfo.Position.x, self.PlayerInfo.Position.y)
self.housePos = Node(self.PlayerInfo.HouseLocation.x, self.PlayerInfo.HouseLocation.y)
self.gameMap = gameMap
self.gameMap._tiles = normalize_tiles(self.gameMap, self.PlayerInfo.Position, self.PlayerInfo.HouseLocation)
self.visiblePlayers = visiblePlayers
# GO KILLING LEFT
if len(self.sortClosest(self.gameMap.tiles, TileContent.Player)) == 1 or self.peace > 0:
print("test")
pos = Node(self.PlayerInfo.Position.x, self.PlayerInfo.Position.y)
print("yMin: ", self.gameMap.yMin)
yMin = self.gameMap.yMin
xMin = self.gameMap.xMin
if yMin < 0:
yMin = 255 + yMin
if xMin < 0:
xMin = 255 + xMin
Target = Node(xMin, self.PlayerInfo.Position.y)
self.path = solve_path(self.gameMap._tiles, pos, Target)
return self.move(self.path)
#create_move_action(self.path[0])
Costs = {"ressource": -math.inf, "kill": -math.inf, "upgrade": -math.inf}
Costs["getRessource"] = self.evaluateRessource()
Costs["goKill"] = self.evaluatekill()
Costs["goUpgrade"], item = self.evaluateUpgrade()
print(Costs)
nextPlan = max(Costs, key=Costs.get)
print(nextPlan)
# PLAN
nextAction = ""
if nextPlan == "getRessource":
self.path = self.ressourcePath
next_node = Node(self.path[0]+self.ownPos[0], self.path[1]+self.ownPos[1])
if len(self.path) < 2 and next_node.coords != self.housePos.coords:
nextAction = "collect"
else:
nextAction = "move"
elif nextPlan == "goKill":
self.path = self.killingPath
print(self.path)
return self.move(self.path)
elif nextPlan == "goUpgrade":
self.path = self.upgradePath
if len(self.path) < 2:
nextAction = "upgrade"
else:
nextAction = "move"
# ACTION
if nextPlan == "move":
return create_move_action(self.path[0])
elif nextAction == "collect":
return create_collect_action(self.path[0])
elif nextAction == "attack":
return create_attack_action(self.path[0])
elif nextAction == "purchase":
return create_upgrade_action(item)
else:
return create_move_action(Point(-1, 0))
def after_turn(self):
"""
Gets called after executeTurn
"""
pass
def move(self, path):
next_node = Point(path[0][0]+self.ownPos.coords[0], path[0][1]+self.ownPos.coords[1])
if self.gameMap.getTileAt(next_node) == TileContent.Wall or self.gameMap.getTileAt(next_node) == TileContent.Player:
return create_attack_action(Point(path[0][0], path[0][1]))
else:
return create_move_action(Point(path[0][0], path[0][1]))
|
from utipy.string.letter_strings import letter_strings
def test_letter_strings():
# n: int, num_chars: Optional[int] = None, upper: bool = False, descending: bool = False
assert letter_strings(n=3) == ["a", "b", "c"]
# 2 chars
ls = letter_strings(n=27)
assert len(ls) == 27
assert ls[:3] == ["aa", "ab", "ac"]
# Start with 1 chars, end with 2
ls = letter_strings(n=27, num_chars=1)
assert len(ls) == 27
assert ls[:3] == ["a", "b", "c"]
assert ls[-1] == "aa"
# 3 chars
ls = letter_strings(n=27, num_chars=3)
assert len(ls) == 27
assert ls[:3] == ["aaa", "aab", "aac"]
assert ls[-1] == "aba"
# Uppercase
ls = letter_strings(n=3, upper=True)
assert ls == ["A", "B", "C"]
# Descending
ls = letter_strings(n=3, descending=True)
assert ls == ["z", "y", "x"]
|
# coding: utf8
'''
LintCode:http://www.lintcode.com/zh-cn/problem/balanced-binary-tree/
93. 平衡二叉树
给定一个二叉树,确定它是高度平衡的。对于这个问题,一棵高度平衡的二叉树的定义是:一棵二叉树中每个节点的两个子树的深度相差不会超过1。
样例
给出二叉树 A={3,9,20,#,#,15,7}, B={3,#,20,15,7}
A) 3 B) 3
/ \ \
9 20 20
/ \ / \
15 7 15 7
二叉树A是高度平衡的二叉树,但是B不是
'''
# 这题跟求最大二叉树的深度类似, 采用递归的方式求树的深度, 终止条件可以加入一个判断, 如果在递归当中发现了左右子树的高度差超过了 1, 可以返回 -1
# 这样在后续的递归就可以不用再继续判断. 已经有了 -1 就已经知道这棵树不平衡
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: True if this Binary tree is Balanced, or false.
"""
def isBalanced(self, root):
# write your code here
return self.maxDepth(root) != -1
def maxDepth(self, node):
if node == None:
return 0
left_depth = self.maxDepth(node.left)
right_depth = self.maxDepth(node.right)
if left_depth == -1 or right_depth == -1 or abs(left_depth - right_depth) > 1:
return -1
return max(left_depth, right_depth) + 1
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 15:59:11 2017
@author: af5u13
"""
#exec(open("/home/andi/dev/visr/src/python/scripts/testPanningCalculator.py").read())
import visr
import panning
import numpy as np;
#conffile = """<?xml version=\"1.0\" encoding=\"utf-8\"?>
#<panningConfiguration dimension=\"2\" infinite=\"false\">
# <outputEqConfiguration numberOfBiquads=\"1\" type=\"iir\">
# <filterSpec name=\"lowpass\">
# <biquad a1=\"-1.9688283\" a2=\"0.96907117\" b0=\"6.0729856e-05\" b1=\"0.00012145971\" b2=\"6.0729856e-05\"/>
# </filterSpec>
# <filterSpec name=\"highpass\">
# <biquad a1=\"-1.9688283\" a2=\"0.96907117\" b0=\"-0.98447486\" b1=\"1.9689497\" b2=\"-0.98447486\"/>
# </filterSpec>
# </outputEqConfiguration>
# <loudspeaker channel=\"2\" delay=\"0\" eq=\"highpass\" gainDB=\"0\" id=\"T-030\">
# <cart x=\"2.17\" y=\"1.36\" z=\"0\"/>
# </loudspeaker>
# <loudspeaker channel=\"1\" delay=\"9.8764e-05\" eq=\"highpass\" gainDB=\"-0.35712\" id=\"T+030\">
# <cart x=\"2.15\" y=\"-1.22\" z=\"0.01\"/>
# </loudspeaker>
# <virtualspeaker eq=\"highpass\" id=\"3\">
# <cart x=\"-1\" y=\"0\" z=\"0\"/>
# <route lspId=\"T-030\" gainDB=\"0\"/>
# <route lspId=\"T+030\" gainDB=\"0\"/>
# </virtualspeaker>
# <subwoofer assignedLoudspeakers=\"T-030, T+030\" channel=\"3\" delay=\"0\" eq=\"lowpass\" gainDB=\"0\" weights=\"1.000000, 1.000000\"/>
# <triplet l1=\"T+030\" l2=\"3\"/>
# <triplet l1=\"T-030\" l2=\"T+030\"/>
# <triplet l1=\"3\" l2=\"T-030\"/>
#</panningConfiguration>"""
filename = 'C:/Local/gc1y17/visr/config/isvr/audiolab_stereo_1sub_with_rerouting.xml'
conffile = open(filename,'r')
lc = panning.LoudspeakerArray(filename)
#lc.loadXmlString(conffile)
print("FILE", conffile.read())
numRegSpeakers = lc.numberOfRegularLoudspeakers
numTotSpeakers = lc.totNumberOfLoudspeakers
numSub = lc.numberOfSubwoofers
numTrip = lc.numberOfTriplets
is2D = lc.is2D
print( '\n','GENERAL CONFIGURATION ')
print('Number of regular speakers: ', numRegSpeakers)
print('Number of virtual speakers: ', numTotSpeakers-numRegSpeakers)
print('Total number speakers (virtual included): ',numTotSpeakers)
print('Number of subwoofers: ', numSub)
print('Number of triplets: ', numTrip)
print('2D layout: ',is2D)
print('Infinite distance: ', lc.isInfinite)
print( '\n','LIST OF LOUDSPEAKER CHANNELS')
print('Loudspeaker channel indices: ', lc.channelIndices())
print('Detail:')
for idx in range(0,numRegSpeakers):
# THE METHODS getSpeakerChannel and getSpeakerChannelIndex are redundant in the binding
#print('Channel of loudspeaker at index ',idx,': ',lc.getSpeakerChannel(idx))
print('Channel index of loudspeaker at index ',idx,': ',lc.getSpeakerChannelIndex(idx))
print( '\n','LIST OF SUBWOOFER CHANNELS')
print('Subwoofer indices: ',lc.subwooferChannelIndices())
print('Detail:')
for idx in range(0,numSub):
print('Channel index of subwoofer at index ',idx,': ',lc.subwooferChannelIndex(idx))
print( '\n','LIST OF TRIPLETS')
for idx in range(0,numTrip):
if is2D:
print('Triplet at index ',idx,': ',lc.getTriplet(idx)[0:2])
else:
print('Triplet at index ',idx,': ',lc.getTriplet(idx))
|
import math
import matplotlib.pyplot as plt
import numpy
import pandas
import scipy
import numpy.linalg as linalg
import sklearn.cluster as cluster
import sklearn.neighbors as neighbors
Spiral = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\jain.csv',
delimiter=',')
nObs = Spiral.shape[0]
plt.scatter(Spiral['x'], Spiral['y'], c = Spiral['group'])
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.show()
trainData = Spiral[['x','y']]
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(trainData)
print("Cluster Centroids = \n", kmeans.cluster_centers_)
Spiral['KMeanCluster'] = kmeans.labels_
for i in range(2):
print("Cluster Label = ", i)
print(Spiral.loc[Spiral['KMeanCluster'] == i])
plt.scatter(Spiral['x'], Spiral['y'], c = Spiral['KMeanCluster'])
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.show()
# Fourteen nearest neighbors
for numberOfNeighbors in numpy.arange(1, 20):
trainData = Spiral[['x','y']]
kNNSpec = neighbors.NearestNeighbors(n_neighbors = numberOfNeighbors, algorithm = 'brute', metric = 'euclidean')
nbrs = kNNSpec.fit(trainData)
d3, i3 = nbrs.kneighbors(trainData)
# Retrieve the distances among the observations
distObject = neighbors.DistanceMetric.get_metric('euclidean')
distances = distObject.pairwise(trainData)
# Create the Adjacency matrix
Adjacency = numpy.zeros((nObs, nObs))
for i in range(nObs):
for j in i3[i]:
Adjacency[i,j] = math.exp(- (distances[i][j])**2 )
# Make the Adjacency matrix symmetric
Adjacency = 0.5 * (Adjacency + Adjacency.transpose())
# Create the Degree matrix
Degree = numpy.zeros((nObs, nObs))
for i in range(nObs):
sum = 0
for j in range(nObs):
sum += Adjacency[i,j]
Degree[i,i] = sum
# Create the Laplacian matrix
Lmatrix = Degree - Adjacency
# Obtain the eigenvalues and the eigenvectors of the Laplacian matrix
evals, evecs = linalg.eigh(Lmatrix)
# Part (d)
averageTraceL = numpy.trace(Lmatrix) / nObs
for i in range(numberOfNeighbors):
threshEigenvalue = evals[i,] / averageTraceL
print("i = %2d, Eigenvalue = %.14e %.14e" % (i, evals[i,], threshEigenvalue))
# Series plot of the smallest five eigenvalues to determine the number of clusters
sequence = numpy.arange(1,(numberOfNeighbors+1),1)
plt.plot(sequence, evals[0:numberOfNeighbors,], marker = "o")
plt.xlabel('Sequence')
plt.ylabel('Eigenvalue')
plt.xticks(sequence)
plt.grid("both")
plt.show()
Z = evecs[:,[0,1]]
# Perform 2-cluster K-mean on the first two eigenvectors
kmeans_spectral = cluster.KMeans(n_clusters = 2, random_state = 0).fit(Z)
Spiral['SpectralCluster'] = kmeans_spectral.labels_
plt.scatter(Spiral['x'], Spiral['y'], c = Spiral['SpectralCluster'])
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.show()
|
#!/usr/bin/env python3
import asyncio
from async_generator import aclosing
from amqproto.adapters.asyncio_adapter import AsyncioConnection, run
async def main():
try:
async with AsyncioConnection(host='localhost') as connection:
async with connection.get_channel() as channel:
await channel.queue_declare('task_queue', durable=True)
await channel.basic_qos(prefetch_count=1)
await channel.basic_consume('task_queue')
print(' [x] Waiting for messages. To exit press CTRL+C')
async with aclosing(channel.delivered_messages()) as messages:
async for message in messages:
body = message.body.decode('utf-8')
to_sleep = body.count('.')
print(" [x] Received %r, sleeping for %d seconds" % (
body, to_sleep
))
await asyncio.sleep(to_sleep)
print(" [x] Done")
await channel.basic_ack(
delivery_tag=message.delivery_info.delivery_tag
)
except asyncio.CancelledError:
print(' [x] Bye!')
run(main())
|
from normality import collapse_spaces, stringify
from pprint import pprint # noqa
from datetime import datetime
from lxml import html
from opensanctions import constants
MAX_RESULTS = 160
SEEN = set()
COUNTRIES_URL = "https://www.interpol.int/en/How-we-work/Notices/View-Red-Notices"
SEXES = {
"M": constants.MALE,
"F": constants.FEMALE,
}
def parse_date(date):
if date:
try:
date = datetime.strptime(date, "%Y/%m/%d")
except ValueError:
date = datetime.strptime(date, "%Y")
return date.date()
def get_value(el):
if el is None:
return
text = stringify(el.get("value"))
if text is not None:
return collapse_spaces(text)
def get_countries(context):
res = context.http.get(COUNTRIES_URL)
doc = html.fromstring(res.text)
path = ".//select[@id='arrestWarrantCountryId']//option"
options = doc.findall(path)
return [get_value(el) for el in options]
def crawl_notice(context, notice):
url = notice.get("_links", {}).get("self", {}).get("href")
if url in SEEN:
return
SEEN.add(url)
res = context.http.get(url)
# if res.status_code == 403:
# context.log.warning("Blocked by INTERPOL", url=res.url, country=country)
# return
# if not res.from_cache:
# time.sleep(0.5)
notice = res.json()
first_name = notice["forename"] or ""
last_name = notice["name"] or ""
dob = notice["date_of_birth"]
warrants = [
(warrant["charge"], warrant["issuing_country_id"])
for warrant in notice["arrest_warrants"] # noqa
]
entity = context.make("Person")
entity.make_id("INTERPOL", notice.get("entity_id"))
entity.add("name", first_name + " " + last_name)
entity.add("firstName", first_name)
entity.add("lastName", last_name)
entity.add("nationality", notice.get("nationalities"))
# TODO: make this a Sanction:
for charge, country in warrants:
entity.add("program", country)
entity.add("summary", charge)
entity.add("gender", SEXES.get(notice.get("sex_id")))
entity.add("birthPlace", notice.get("place_of_birth"))
entity.add("birthDate", parse_date(dob))
entity.add("sourceUrl", url)
# entity.add("keywords", "REDNOTICE")
# entity.add("topics", "crime")
context.emit(entity)
def crawl_country(context, country, age_max=120, age_min=0):
params = {
"ageMin": int(age_min),
"ageMax": int(age_max),
"arrestWarrantCountryId": country,
"resultPerPage": MAX_RESULTS,
}
res = context.http.get(context.dataset.data.url, params=params)
# if res.status_code == 403:
# context.log.warning("Blocked by INTERPOL", url=res.url, country=country)
# return
# if not res.from_cache:
# time.sleep(0.5)
data = res.json()
notices = data.get("_embedded", {}).get("notices", [])
for notice in notices:
crawl_notice(context, notice)
total = data.get("total")
# pprint((country, total, age_max, age_min))
if total > MAX_RESULTS:
age_range = age_max - age_min
if age_range > 1:
age_split = age_min + (age_range // 2)
crawl_country(context, country, age_max, age_split)
crawl_country(context, country, age_split, age_min)
elif age_range == 1:
crawl_country(context, country, age_max, age_max)
crawl_country(context, country, age_min, age_min)
def crawl(context):
for country in get_countries(context):
if country is not None:
crawl_country(context, country)
|
"""Class for RESQML Earth Model Interpretation organizational objects."""
from ._utils import (equivalent_extra_metadata, extract_has_occurred_during, equivalent_chrono_pairs,
create_xml_has_occurred_during)
import resqpy.olio.uuid as bu
import resqpy.olio.xml_et as rqet
from resqpy.olio.base import BaseResqpy
from resqpy.olio.xml_namespaces import curly_namespace as ns
from .organization_feature import OrganizationFeature
class EarthModelInterpretation(BaseResqpy):
"""Class for RESQML Earth Model Interpretation organizational objects."""
# TODO: add support for StratigraphicColumn reference and other optional references
resqml_type = 'EarthModelInterpretation'
valid_domains = ('depth', 'time', 'mixed')
def __init__(self,
parent_model,
root_node = None,
uuid = None,
title = None,
organization_feature = None,
domain = 'depth',
extra_metadata = None):
"""Initialises an earth model interpretation organisational object."""
self.domain = domain
self.organization_feature = organization_feature # InterpretedFeature RESQML field
self.feature_root = None if self.organization_feature is None else self.organization_feature.root
self.has_occurred_during = (None, None)
if (not title) and organization_feature is not None:
title = organization_feature.feature_name
super().__init__(model = parent_model,
uuid = uuid,
title = title,
extra_metadata = extra_metadata,
root_node = root_node)
def _load_from_xml(self):
self.domain = rqet.find_tag_text(self.root, 'Domain')
interp_feature_ref_node = rqet.find_tag(self.root, 'InterpretedFeature')
assert interp_feature_ref_node is not None
self.feature_root = self.model.referenced_node(interp_feature_ref_node)
if self.feature_root is not None:
self.organization_feature = OrganizationFeature(self.model,
uuid = self.feature_root.attrib['uuid'],
feature_name = self.model.title_for_root(self.feature_root))
self.has_occurred_during = extract_has_occurred_during(self.root)
def is_equivalent(self, other, check_extra_metadata = True):
"""Returns True if this interpretation is essentially the same as the other; otherwise False."""
if other is None or not isinstance(other, EarthModelInterpretation):
return False
if self is other or bu.matching_uuids(self.uuid, other.uuid):
return True
if self.organization_feature is not None:
if not self.organization_feature.is_equivalent(other.organization_feature):
return False
elif other.organization_feature is not None:
return False
if self.root is not None and other.root is not None:
if rqet.citation_title_for_node(self.root) != rqet.citation_title_for_node(other.root):
return False
elif self.root is not None or other.root is not None:
return False
if check_extra_metadata and not equivalent_extra_metadata(self, other):
return False
return self.domain == other.domain and equivalent_chrono_pairs(self.has_occurred_during,
other.has_occurred_during)
def create_xml(self,
organization_feature_root = None,
add_as_part = True,
add_relationships = True,
originator = None,
title_suffix = None,
reuse = True):
"""Creates an earth model interpretation organisational xml node from an earth model interpretation object."""
# note: related organization feature node should be created first and referenced here
if not self.title:
self.title = self.organization_feature.feature_name
if title_suffix:
self.title += ' ' + title_suffix
if reuse and self.try_reuse():
return self.root
emi = super().create_xml(add_as_part = False, originator = originator)
if self.organization_feature is not None:
of_root = self.organization_feature.root
if of_root is not None:
if organization_feature_root is None:
organization_feature_root = of_root
else:
assert of_root is organization_feature_root, 'organization feature mismatch'
assert organization_feature_root is not None, 'interpreted feature not established for model interpretation'
assert self.domain in self.valid_domains, 'illegal domain value for earth model interpretation'
dom_node = rqet.SubElement(emi, ns['resqml2'] + 'Domain')
dom_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Domain')
dom_node.text = self.domain
self.model.create_ref_node('InterpretedFeature',
self.model.title_for_root(organization_feature_root),
organization_feature_root.attrib['uuid'],
content_type = 'obj_OrganizationFeature',
root = emi)
create_xml_has_occurred_during(self.model, emi, self.has_occurred_during)
if add_as_part:
self.model.add_part('obj_EarthModelInterpretation', self.uuid, emi)
if add_relationships:
self.model.create_reciprocal_relationship(emi, 'destinationObject', organization_feature_root,
'sourceObject')
return emi
|
# Introduction to Python
In this second workshop we'll take our first look at programming in Python. We'll cover the key data types (including NumPy arrays), as well as functions, and control flow statements. You could do an entire course focused on learning Python as a general purpose programming languague. For our goal, we're going to focus on how to use Python and some of its key libraries for data science work. The language has been around since 1991 and is named in tribute to Monty Python...
<center>

</center>
There are a couple of really good introductory books on using Python for data science that I recommend.
<center>
[](https://www.amazon.co.uk/Python-Data-Analysis-Wes-Mckinney-dp-1491957662/dp/1491957662/ref=dp_ob_title_bk) [](https://www.amazon.co.uk/Python-Data-Science-Handbook-Techniques/dp/1491912057/ref=sr_1_1?crid=KFHZFNZYHQTL&dchild=1&keywords=python+data+science+handbook&qid=1619688682&s=books&sprefix=python+data+science+%2Cstripbooks%2C157&sr=1-1)
</center>
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/i5NelhnFbrQ" frameborder="0" allowfullscreen></iframe>
</div>
## Similarities Between Python and R
Both Python and R are high-level interpreted languages - this means that Python and R scripts can only be run if you have a Python or R interpreter installed on the machine you’re using.
Both Python and R are typically used with libraries or packages that can hugely expand the functionality of the basic language.
When doing data science in R or Python, you’re likely using the base language plus packages such as `{dplyr}` and `{tidyr}` in R and libraries such as `numpy` and `pandas` in Python.
Reproducibility is important so you need to keep track of the packages/libraries you’re using so that you can recreate your analysis at some future point (or so that someone else can recreate your analysis).
## Differences Between Python and R
Python is a general purpose programming language, while R is a specialised statistical programming language (although it can sometimes be used for more general programming tasks).
As a language, Python code is more readable than R code and there’s a consistency in the Python language that you don’t get in R (e.g., differences in base R vs. Tidyverse syntax vs. formula syntax). While R has backwards compatibility, Python does not (e.g., differences between Python 2.x and 3.x).
Python is an object-oriented programming language and this gives you flexibility to use methods to work with new objects.
You can do a lot more in Python than in R, but Python lacks some of the statistical model libraries that are present in R (although it has lots more machine learning libraries).
### Indendation Has Meaning In Python
One of the biggest differences between Python and R, is that **in Python indentation has meaning** - in R indentation can make your scripts looks nice (but has no intrinsic meaning). In Python, indentation is used in place of { } in order to group together statements.
Consider a for loop to print the numbers 1 to 10.
In Python:
for i in range(1,11):
print(i)
In R:
for (i in 1:10) {
print(i)
}
### Python Uses Zero-Based Indexing
In Python, indexing starts at 0 but in R indexing starts at 1. In practice, this means that if we want to reference an element in a list (for example) we need to remember the initial element is at position 0 in Python, but position 1 in R.
So, to refer to the initial element `('apple')` we need to do the following:
In Python:
my_list = ['apple', 'banana', 'pear']
my_list[0]
In R:
my_list <- list('apple', 'banana', 'pear')
my_list[1]
In both these cases the output will be `apple`.
%%HTML
<div style="text-align: center">
<iframe width="250" height="250" src="https://giphy.com/embed/LmNwrBhejkK9EFP504" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><p><a href="https://giphy.com/gifs/memecandy-LmNwrBhejkK9EFP504">via GIPHY</a></p>
</div>
## Data Types and Variable Assignment
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/DxZ0eTWYmnE" frameborder="0" allowfullscreen></iframe>
</div>
We will be writing our code using a Jupyter Notebook. On macOS, you are going to be entering the commands below that start up a Jupyter Notebook using Terminal, and on Windows you will be using the Anaconda Prompt. Jupyter Notebooks allows us to write text in Markdown format (as I have written in this block), alongside the Python code and output. If you have completed the previous workshop, you should be able to launch a Jupyter Notebook on your own machine. Make sure you have a conda environment activated (type `conda activate` if you don't). Set up a new Notebook by typing `jupyter notebook` in a terminal window. This should launch Jupyter Notebooks running in your browser.
conda activate
jupyter notebook
Start a new Python script in your `Python (data_science)` environment.

Some of the most common data types in Python are integer (`int`) and floating point (`float`) for representing numbers, and strings (`str`) for representing text. We can assign values to variables - the data type is dynamically inferred by Python (in contrast to other languages such as C++ where they have to be explicitly declared). You can check what type a variable is by using `type()`.
Try running the following two lines of Python code in your Jupyter Notebook.
my_name = 'Andrew'
print(type(my_name))
In the output, you should see that the variable `my_name` has been identified as type `str` as it is contains text.
Assigning values to variables is a key component in scripting/coding. You can use variables to store values that you need to be able to access later, and variables can be re-assigned as your script progresses (maybe you want to store temporary values in a variable). You can print the contents of a variable using the `print(variable_name)` function in Python. In the code below, we create two variables. The `print` function can take multiple arguments so we can print the values of the two variables with the line `print(first_variable_name, second_variable_name)`.
my_favourite_number_text = 'My favourite number is:'
my_favourite_number = 25
print(my_favourite_number_text, my_favourite_number)
Modify the above code so that it displays the following:
25 is my favourite number.
```{admonition} Click the button to reveal answer
:class: dropdown
my_favourite_number = 25
my_favourite_number_text = "is my favourite number."
print(my_favourite_number, my_favourite_number_text)
```
You might have come up with a slightly different solution. It's important to remember there are often several (sometimes many) ways to achieve the same task - even in quite simple cases.
Let’s now assign a different number to the `my_favourite_number` variable and change the `my_favourite_number_text` back to what it was originally.
my_favourite_number = 16
my_favourite_number_text = 'My favourite number is:'
print(my_favourite_number_text, my_favourite_number)
## Lists
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/bINIwJnLAn4" frameborder="0" allowfullscreen></iframe>
</div>
Lists can be assigned to variables. In Python lists are indicated by square brackets, `[` and `]`, and contain elements - often of the same type - for example, lists of integers, strings etc. Lists can also contain a mix of elements of different types (but you may want to avoid this). The next bit of code creates a list called `my_numbers` that contains 4 integer elements. Remember, in contrast to R, Python uses zero-position indexing. So the first element in the list is at position 0, the second at position 1 etc. We can index an element in the list using square brackets. Let's index the second element...
my_numbers = [10, 20, 30, 40]
my_numbers[1]
The next list is a list of strings. How would you go about indexing the fourth element?
my_names = ['Andrew', 'Suzanne', 'Eliza', 'Seb']
```{admonition} Click the button to reveal answer
:class: dropdown
my_names[3]
```
Elements in lists can be changed (i.e., lists are mutable):
my_names[0] = 'Iggy Pop'
print(my_names)
Lists can be sliced using `:` The following will slice from the third element to the end of the list.
my_names[2:]
While the line below will slice from the start of the list up to (but *excluding*) the third element.
my_names[:2]
We can also specify the start and stop points of the slicing as follows.
my_names[1:3]
## Tuples
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/i5FOo0LtT9M" frameborder="0" allowfullscreen></iframe>
</div>
Tuples are like lists, except they are *immutable* - in other words, their contents cannot be changed later. While lists are created using *square* brackets, tuples are created using *round* brackets. But like lists, you use *square* brackets to reference elements in a tuple.
my_tuple = (10, 20, 30, 40)
my_tuple[3]
If you try to change an element in a tuple, an error will be generated.
my_tuple[3] = 5
Tuples can be sliced too. The following will slice from the start up to (but excluding) the fourth element.
my_tuple[:3]
How would you slice `my_tuple` so that it contains only the second and third elements?
:::{admonition} Click the button to reveal answer
:class: dropdown
my_tuple[1:3]
:::
If you wanted to, you could map the above output onto a new variable called `my_sliced_tuple` and use the operator `==` (more on this later) to check that they are the same.
my_sliced_tuple = my_tuple[:3]
my_sliced_tuple == my_tuple[:3]
## Arrays
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/YvfGr8ZN5-k" frameborder="0" allowfullscreen></iframe>
</div>
An array is a data structure consisting of a collection of elements, each identified by at least one array index (or key). Arrays are core data structures in data science and machine learning. For example, you can store images as 2-dimensional arrays representing pixel brightness across the area of the image.
Data frames and tibbles in R are types of 2-dimensional arrays - data stored in rectangular format with rows and columns. Arrays don’t have to just be in two dimensions but it can be tricky imagining more dimensions...
NumPy arrays are better than inbuilt Python arrays in that they are more efficient as the arrays grow larger in size.
In the code below we’re importing the `numpy` package as `np` (this is the conventional alias for this package). We then set our randomisation seed to ensure reproducibility. Remember, computers can't generate true random numbers so use an algorithm. We can fix the start of this algorithmic generation procedure to ensure that if we re-run our code we get the same random numbers. We then create an array of random integers from 0 (inclusively) and 10 (exclusively) that has 3 rows and 4 columns. We use the NumPy routine `random` and the operation `randint` to generate this array. We need to specify the low and high values of the range we're sampling from, and the shape of the array (number of rows by number of columns) we are wanting to generate.
import numpy as np
np.random.seed(1234)
my_array = np.random.randint(low=0, high=10, size=(3, 4))
my_array
We can then check the shape of the array using `.shape`
my_array.shape
## Getting Help
In your Jupyter Notebook you can type `help()` and put in the brackets the name of the module and function you want help with. Most help files are incredibly useful and will clearly describe what a function does, what it takes as its input parameters, and what it returns in its output. Below is what I get when I ask for help with the `shape` operation in `numpy`.
help(np.shape)
## Variable Assignments are References (not Copies)
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/BnbjqxFZRlk" frameborder="0" allowfullscreen></iframe>
</div>
Assignment in Python involves creating bindings between a target and an object in computer memory - not just simple copying - this can easily trip you up if you assume that assignment creates a new **copy** of of the original object.
my_old_names = ['Andrew', 'Suzanne', 'Eliza', 'Seb']
my_new_names = my_old_names
print(my_new_names)
my_old_names[0] = 'this is surprising'
print(my_new_names)
Note that in the above code we're changing the first element in the list `my_old_names` but keeping the variable `my_new_names` the same as it was (or so we think). But if we now print `my_new_names` we see that this list reflects the change we made to `my_old_names`. This will catch you out unless you realise that both variable names are pointing to the same contents in the computer's memory.
## Functions
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/zvIEzvX5zEU" frameborder="0" allowfullscreen></iframe>
</div>
Python has a number of built-in functions such as `print()`, `abs()`, `bin()` etc. You can see the full list [here](https://docs.python.org/3/library/functions.html). Generally, functions take an input, do something with the input, and then output the result. You can use the `help()` function to get help on other functions.
help(print)
## User-Defined Functions
If you find yourself writing the same chunk of code again and again, you might want to turn it into a function. For example, imagine I want to display someone’s BMI on the basis of knowing their height (in metres) and mass (in kg). The formula for BMI is: BMI = kg/m2. Let’s write a function that takes as its input someone’s weight and height, and then returns their BMI. We use `def` to define our function that we're calling `bmi`. It takes two parameters, `weight` and `height`. Inside the body of the function it creates a new variable called `bmi_value` which is `weight` divided by `height` squared. The function then returns this value.
We can call the function to work out the bmi of someone with a weight of 87 kgs, and a height of 1.8 metres with with `bmi(87, 1.8)`.
Note that in the code below we are using indentation for the body of the function. Indentation in Python is important and is meaningful (i.e., it's not an aesthetic decision). Indentation is used to indicate a block of code. The convention is to indent each line of a block by 4 spaces (don't use tab).
def bmi(weight, height):
bmi_value = weight/(height*height)
return bmi_value
bmi(87, 1.8)
It is worth noting that functions don't have to take any input parameters. For example, in the trivial function below - which prints "Hello world!" - we can call the function with `hello_world()`
def hello_world():
print('Hello world!')
hello_world()
Your challenge to write a function that calculates Cohen's d, a common measure of effect size, for an experiment given two means of 1020 and 1000, and a pooled standard deviation (SD) of 50. The equation for Cohen's d is:
$$
Cohen's D = \frac{mean1 - mean2}{pooled\: SD}
$$
Your function will need to take three arguments (the two means plus the pooled SD) and return the Cohen's d value. Try that now.
:::{admonition} One possible solution is below.
:class: dropdown
def cohen_d(mean1, mean2, sd):
effect_size = (mean1 - mean2) / sd
return effect_size
cohen_d(1020, 1000, 50)
:::
## Control Flow Statements - For Loops
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/vQrhJMd6Hb8" frameborder="0" allowfullscreen></iframe>
</div>
We can run the same command or chunk of code any number of times by placing it within a `for` loop. In the following, we print the phrase `Hello world!` five times. In Python, the code block within the loop that is to be repeated needs to indented.
for i in range(0, 5):
print('Hello world!')
We can also iterate over elements in an array using a `for` loop. In the following example, we iterate through the elements in our list and print each element.
my_names = ['Andrew', 'Suzanne', 'Eliza', 'Seb']
for element in my_names:
print(element)
### Iterating Over an Array
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/Uu_jm-Ukp_U" frameborder="0" allowfullscreen></iframe>
</div>
In the same way we can iterate over lists, we can iterate over arrays row-by-row. Let’s create a 2-dimensional array called `vital_stats` with the weights and heights of three individuals. The first row of the array will be their weights, and the second row their heights. The third will be their names.
weights = np.array([70, 60, 90])
heights = np.array([1.67, 1.77, 1.78])
names = np.array(['Iggy', 'David', 'Lou'])
vital_stats = np.array((weights, heights, names))
print(vital_stats)
We see that in a for loop we can iterate over the rows in our array.
for index in vital_stats:
print(index)
What we really want to do is iterate over columns. Luckily there is a Transpose attribute built into NumPy arrays that produces the transposed array (i.e., columns and rows swapped).
for index in vital_stats.T:
print(index)
We can cycle through the rows of this transposed array (with the rows now corresponding to the weight, height and name of each individual), and pass these values to the `bmi` function that we wrote. Each person’s weight is in column 0 of the array, and each person’s height in column 1. Their name is in column 2. The following loop cycles through our NumPy array person by person and calls the `bmi` function for each person in the array before printing out the result (rounded to zero decimal places using the `round` function). Note that the elements in the array we have created are all of type `str` as this is the only way to represent the elements as being of the same type (which is a requirement of NumPy arrays). We can convert each number that is currently of type `str` to a number with `float` so that we can pass it to the `bmi` function.
for person in vital_stats.T:
weight = float(person[0])
height = float(person[1])
print(person[2], 'has a BMI of', round(bmi(weight, height)))
The following will be a group-based activity which you will do in class. Imagine three experiments. Each is a between participants design with two groups. For Experiments 1, 2, and 3 the mean of group 1 is always 500. For Experiments 1, 2, and 3 the means of group 2 are 485, 490, and 495 respectively. The pooled SD for all groups is 10. Write the code that will produce Cohen's d from a `numpy` array that contains the means and the pooled SD for each of the three experiments.
:::{admonition} One possible solution is below.
:class: dropdown
experiment1 = np.array([500, 485, 10])
experiment2 = np.array([500, 490, 10])
experiment3 = np.array([500, 495, 10])
all_experiments = np.array((experiment1, experiment2, experiment3))
def cohen_d(mean1, mean2, sd):
effect_size = (mean1-mean2)/sd
return effect_size
for experiment in all_experiments:
mean1 = experiment[0]
mean2 = experiment[1]
sd = experiment[2]
print("For a group one mean of", mean1, "and a group two mean of", mean2, "and a pooled sd of ", sd, "Cohen's d is", cohen_d(mean1, mean2, sd))
:::
## Operators
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/2UphPf9kpew" frameborder="0" allowfullscreen></iframe>
</div>
The kinds of logical operators that you’ve come across in `R`, are also used in `Python`. They result in `True` or `False`. For example, for variables `a` and `b`:
Equals: a == b
Not Equals: a != b
Less than: a < b
Less than or equal to: a <= b
Greater than: a > b
Greater than or equal to: a >= b
These expressions can be used in lots of contexts including in control flow statements where evaluation of an expression determines how the control flow statement is interpreted. In Python the logical operator AND is represented by `and`, OR by `or`, and NOT by `not`.
a = 5
b = 6
a == b
a != b
a < b
a <= b
a > b
a >= b
You can also compare NumPy arrays element by element using logical operators as follows.
a = np.array([1, 2, 3])
b = np.array([3, 2, 1])
a == b
a < b
You can also apply arithmetic operations to arrays on an element by element basis.
a + b
## Control Flow Statements - While Loops
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/dmteK2wKjv4" frameborder="0" allowfullscreen></iframe>
</div>
Code inside a while loop runs as long as the while loop evaluates to `True`. For example, we set a counter, `i`, to equal zero. The code in the `while` loop will run as long as the counter, `i`, does not equal 2. Each time we run the code in the while loop, we increment `i` by 1. This means that this loop will run exactly twice, each time printing the `i`th element in the list.
my_names = ['Andrew', 'Suzanne', 'Eliza', 'Seb']
i = 0
while i != 2:
print(my_names[i])
i += 1
## Control Flow Statements - Conditionals
%%HTML
<div style="text-align: center">
<iframe width="560" height="315" src="https://youtube.com/embed/WBDUYuQ-r7I" frameborder="0" allowfullscreen></iframe>
</div>
`If` statements (and the related `elif` and `else`) are conditional statements such that if they evaluate as `True` the associated code chunk is run, else some other code chunk is run. In the following example, the `else` statement catches any cases where neither the `if` nor `elif` statements evaluate as `True`.
my_first_number = 5
my_second_number = 6
if my_first_number < my_second_number:
print(my_first_number, 'is less than', my_second_number)
elif my_first_number > my_second_number:
print(my_first_number, 'is greater than', my_second_number)
else:
print(my_first_number, 'is equal to', my_second_number)
What do you think will happen if you set the values of both numbers to be the same - and delete the last two lines of code (i.e., the line beginning `else:` and the following one)?
```{admonition} Click the button to reveal answer
:class: dropdown
Nothing happens. The `if` statement evaluates to False, not True, as does the `elif` statement. As nothing evaluates to True (and we didn't write any code that deals with other outcomes) nothing happens.
```
|
# -*- coding:utf-8 -*-
# Created by Hans-Thomas on 2011-05-11.
#=============================================================================
# logger.py --- Logger meta class
#=============================================================================
import logging
class MetaLogger(type):
def __new__(self, classname, bases, classdict):
classdict.setdefault('log', logging.getLogger('%s.%s' % (classdict['__module__'], classname)))
return type.__new__(self, classname, bases, classdict)
class Logger(object):
__metaclass__ = MetaLogger
#.............................................................................
# logger.py
|
from typing import Any
try:
import cv2
import numpy as np
except ImportError:
raise ImportError(
"cv2 is not installed. Please install it with `pip install opencv-python`."
)
try:
import mss
except ImportError:
raise ImportError("mss is not installed. Please install it with `pip install mss`.")
try:
import pyautogui
except ImportError:
raise ImportError(
"pyautogui is not installed. Please install it with `pip install pyautogui`."
)
from gurun.node import Node
class ScreenshotPAG(Node):
def run(self, filename: str = None, *args: Any, **kwargs: Any) -> np.ndarray:
image = pyautogui.screenshot()
if filename is not None:
image.save(filename)
return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
class ScreenshotMMS(Node):
def __init__(self, monitor: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._monitor = monitor
def run(self, filename: str = None, *args: Any, **kwargs: Any) -> np.ndarray:
with mss.mss() as sct:
output = np.array(sct.grab(sct.monitors[self._monitor]))[:, :, :3]
if filename is not None:
cv2.imwrite(filename, output)
return output
|
import requests
class Nacos:
"""
Nacos api
"""
def __init__(self, url, namespace):
"""
:param url: Nacos的地址
:param namespace: 应用的namespace
"""
self.url = url
self.namespace = namespace
def get_config(self, data_id, group="DEFAULT_GROUP"):
"""
获取配置项的值
:param data_id: config的id
:param group: 所属的group
:return: 返回配置的值,如果此配置不存在就抛出ConfigNotFoundException
"""
params = {"tenant": self.namespace, "dataId": data_id, "group": group}
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}
config_request = requests.get(url=f"{self.url}/nacos/v1/cs/configs", params=params, headers=headers)
config_value = config_request.text
if config_value == "config data not exist\n":
raise ConfigNotFoundException(data_id)
return config_value
class ConfigNotFoundException(Exception):
def __init__(self, config_name):
self.config_name = config_name
def __str__(self):
return f"Error!!!Can't get {self.config_name} from Nacos Server,please check your dataId."
|
from typing import List
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, status, Response
from util.public import get_dirs_by_uid
from schema import DirectoryOut, DirectoryBase, BookmarkOut
from models import User, Directory
from database import get_db
from router.client.c_auth import get_current_user
from util.public import get_dir_by_name_and_uid, get_dir_by_id
from util import error
router = APIRouter(prefix="/client/d", tags=["Client-Directory"])
@router.post("/", response_model=DirectoryOut)
async def create_directory(payload: DirectoryBase, db: Session = Depends(get_db), c_user: User = Depends(get_current_user)):
dir_in_db = await get_dir_by_name_and_uid(name=payload.name, uid=c_user.id, db=db)
if dir_in_db:
error.existed_error(f"directory '{payload.name}' existed")
dir = Directory(name=payload.name,
description=payload.description,
user_id=c_user.id)
db.add(dir)
db.commit()
res = await get_dir_by_name_and_uid(name=payload.name, uid=c_user.id, db=db)
return res
@router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_dir_by_id(id: str, c_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
dir_in_db: Directory = await get_dir_by_id(id=id, db=db)
if not dir_in_db:
error.notfound_error(f"not found directory '{id}'")
if dir_in_db.user_id != c_user.id:
error.auth_failed(f"permission denied")
db.delete(dir_in_db)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.put("/{id}", response_model=DirectoryOut)
async def update_directory(id: str, payload: DirectoryBase, c_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
dir_in_db: Directory = await get_dir_by_id(id=id, db=db)
if not dir_in_db:
error.notfound_error(f"not found directory '{id}'")
if dir_in_db.user_id != c_user.id:
error.auth_failed(f"permission denied")
dup = await get_dir_by_name_and_uid(name=payload.name, uid=c_user.id, db=db)
if dup and str(dup.id) != id:
error.existed_error(f"directory '{payload.name}' existed")
dir_in_db.name = payload.name
dir_in_db.description = payload.description
db.commit()
return await get_dir_by_id(id=id, db=db)
@router.get("/", response_model=List[DirectoryOut])
async def get_directories(c_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
return await get_dirs_by_uid(uid=c_user.id, db=db)
@router.get("/{id}", response_model=List[BookmarkOut])
async def get_bookmarks_by_dir_id(id: str, db: Session = Depends(get_db), c_user: User = Depends(get_current_user)):
dir_in_db: Directory = await get_dir_by_id(id=id, db=db)
if not dir_in_db:
error.notfound_error(f"not found directory '{id}'")
if dir_in_db.user_id != c_user.id:
error.auth_failed(f"permission denied")
return dir_in_db.bookmarks
|
RSA_KEY_LEN = 512
AES_KEY_LEN = 32 # 暂定为32位
UPDATE_KEY_DURATION = 60 * 10 # 密钥更新时间间隔
|
from django.urls import path
from . import views
app_name = 'oidc_provider'
urlpatterns = [
path('.well-known/<str:service>', views.well_known,
name="_well_known"),
path('registration', views.registration,
name="registration"),
path('registration_read', views.registration_read,
name="registration_read"),
path('authorization', views.authorization,
name="authorization"),
path('verify/oidc_user_login/', views.verify_user,
name="verify_user"),
path('token', views.token, name="token"),
path('userinfo', views.userinfo, name="userinfo"),
path('introspection', views.introspection, name="introspection"),
path('check_session_iframe', views.check_session_iframe,
name="check_session_iframe"),
path('session', views.session_endpoint, name="session"),
# logout
path('verify_logout', views.verify_logout,
name="verify_logout"),
path('post_logout', views.post_logout, name="post_logout"),
path('rp_logout', views.rp_logout, name="rp_logout"),
]
|
#!/usr/bin/env python3
"""Simple example on how to move the robot."""
import json
import sys
import robot_fingers
from rrc_iprl_package.example import move_up_and_down
# Number of actions in one episode (1000 actions per second for two minutes)
episode_length = 2 * 60 * 1000
def main():
# the difficulty level and the goal pose (as JSON string) are passed as
# arguments
difficulty = int(sys.argv[1])
goal_pose_json = sys.argv[2]
goal = json.loads(goal_pose_json)
print(
"Goal: %s/%s (difficulty: %d)"
% (goal["position"], goal["orientation"], difficulty)
)
# create the robot frontend
frontend = robot_fingers.TriFingerPlatformFrontend()
# move the robot
move_up_and_down(frontend, episode_length)
# It is possible to create custom files in "/output"
with open("/output/hello.txt", "w") as fh:
fh.write("Hello there!\n")
if __name__ == "__main__":
main()
|
import os
import re
import pandas as pd
import numpy as np
import zipfile
def strftime_to_re_pattern(strftime_format):
"""infer the regular expression pattern of a strftime format string
Parameters
----------
strftime_format: str
string with the strftime format of the whatsapp file
Returns
-------
re_pattern: str
regular expression pattern
"""
re_pattern = strftime_format.replace('%d', '\\d\\d')
re_pattern = re_pattern.replace('/', '\\/')
re_pattern = re_pattern.replace('[', '\\[')
re_pattern = re_pattern.replace(']', '\\]')
re_pattern = re_pattern.replace('%m', '\\d\\d')
re_pattern = re_pattern.replace('%b', '[A-Z][a-z]{2}')
re_pattern = re_pattern.replace('%Y', '\\d\\d\\d\\d')
re_pattern = re_pattern.replace('%y', '\\d\\d')
re_pattern = re_pattern.replace('%H', '\\d\\d')
re_pattern = re_pattern.replace('%M', '\\d\\d')
re_pattern = re_pattern.replace('%S', '\\d\\d')
re_pattern = '(' + re_pattern + ')'
return re_pattern
def read_whatsapp(whatsapp_file, datetime_pattern = None,
user_sep=':', strftime_format = '[%d/%m/%Y, %H:%M:%S]',
encoding="utf8", max_non_message_lines=5):
"""
read a whatsapp data file into a pandas dataframe
Parameters
----------
whatsapp_file: str
path of the exported data from Whatsapp, can be a .zip or .txt file
datetime_pattern: str, optional
regular expression to recognize datetime, if None datetime_pattern is inferred from strftime_format
user_sep: str, optional
the character between the user and the message in the whatsapp file
strftime_format: str, optional
the format of the date in the whatsapp file
encoding: str, optional
encoding of the whatsapp txt file
max_non_message_lines: int, optional
the number of non message lines at the beginning of the file. For example
.. was added to the conversation \n
Returns
-------
time_user_df: pandas.DataFrame
DataFrame with all the data from you whatsapp conversation
"""
#check file type
if whatsapp_file.endswith('txt'):
whatsapp_txt = whatsapp_file
elif whatsapp_file.endswith('.zip'):
whatsapp_txt = unzip_whatsapp_file(whatsapp_file)
else:
raise FileNotFoundError('could not open file: %s'%whatsapp_file)
if datetime_pattern is None:
datetime_pattern = strftime_to_re_pattern(strftime_format)
re_datetime = re.compile(datetime_pattern)
datetimeuser_pattern = datetime_pattern + '(.*?)' + user_sep
re_datetimeuser = re.compile(datetimeuser_pattern)
with open(whatsapp_txt, 'r', encoding=encoding) as fo:
line = fo.readline()
# filter weird intro line (without a user)
intro_line = True
counter = 0
while intro_line + (counter < max_non_message_lines) == 2:
try:
check_date_pattern(line, re_datetime)
check_date_user_pattern(line, re_datetimeuser)
intro_line = False
except ValueError:
line = fo.readline()
counter += 1
# print error if patterns are not recognized
if counter == 5:
check_date_pattern(line, re_datetime)
check_date_user_pattern(line, re_datetimeuser)
# read intro lines
empty, date, user, message = re_datetimeuser.split(line)
datetime_list = [date]
user_list = [user]
message_list = [message]
# loop over all other messages
for line in fo:
pattern_match = re_datetimeuser.search(line)
if pattern_match:
empty, date, user, message = re_datetimeuser.split(line)
datetime_list.append(date)
user_list.append(user)
message_list.append(message)
else:
message_list[-1] = message_list[-1] + line
time_user_df = pd.DataFrame(data={'datetime': datetime_list,
'user': user_list,
'text': message_list,
'message': [1] * len(user_list)}, )
time_user_df.index = pd.to_datetime(time_user_df.datetime,
format=strftime_format, dayfirst=True)
return time_user_df
def check_date_pattern(line, re_datetime):
""" Checks if the date pattern can be recognized in a string. If not a ValueError is raised.
Parameters
----------
line: str
line to check date pattern in
re_datetime: _sre.SRE_Pattern
datetime pattern as a compiled regular expression
Returns
-------
"""
if re_datetime.search(line):
print('date pattern recognized')
else:
raise ValueError('pattern not recgonized')
def check_date_user_pattern(line, re_datetimeuser):
""" Checks if the date user pattern can be recognized in a string. If not a ValueError is raised.
Parameters
----------
line: str
line to check date pattern in
re_datetimeuser: _sre.SRE_Pattern
datetimeuser pattern as a compiled regular expression
Returns
-------
"""
if re_datetimeuser.search(line):
print('user separator recognized')
else:
raise ValueError('pattern not recgonized')
def check_anonymized_dataset(time_user_df):
"""
check if a pandas dataframe is an anonymized dataset. If so a KeyError is raised.
Parameters
----------
time_user_df: pandas.DataFrame
dataframe with the whatsapp data
Returns
-------
"""
if 'text' not in time_user_df.columns:
print('making a wordcload only works with a dataset in which the text is included.')
print('Therefore it does not work on anonymized datasets')
raise KeyError('working with anonymised dataset')
def unzip_whatsapp_file(whatsapp_file):
"""
unzips a whatsapp .zip file and returns the path of the _chat.txt file that was extracted from the zip
Parameters
----------
whatsapp_file: str
path to a .zip file with the exported data from Whatsapp.
Returns
-------
str
path to the _chat.txt file that was extracted from the .zip
"""
zip_ref = zipfile.ZipFile(whatsapp_file, 'r')
txt_file = zip_ref.namelist()[0]
zip_ref.extractall(os.path.split(whatsapp_file)[0])
zip_ref.close()
zip_dir = os.path.split(whatsapp_file)[0]
return os.path.join(zip_dir, txt_file)
|
# Copyright 2022 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in object code form
# for any purpose and without fee is hereby granted, provided that the above copyright
# notice appears in all copies and that both that copyright notice and the limited
# warranty and restricted rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY
# DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE.
# AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR FREE.
import os
import adsk.core
from .testUtils import run_test, get_test_base_dir
from ... import config
from ...lib import fusion360utils as futil
app = adsk.core.Application.get()
ui = app.userInterface
CMD_NAME = 'Run Test'
CMD_ID = f'{config.COMPANY_NAME}_{config.ADDIN_NAME}_run'
CMD_Description = 'Run a Test Case'
IS_PROMOTED = True
# Global variables by referencing values from /config.py
WORKSPACE_ID = config.design_workspace
TAB_ID = config.design_tab_id
TAB_NAME = config.design_tab_name
PANEL_ID = config.test_panel_id
PANEL_NAME = config.test_panel_name
PANEL_AFTER = config.test_panel_after
# Resource location for command icons, here we assume a sub folder in this directory named "resources".
ICON_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', 'run', '')
# Holds references to event handlers
local_handlers = []
# Executed when add-in is run.
def start():
# ******************************** Create Command Definition ********************************
cmd_def = ui.commandDefinitions.addButtonDefinition(CMD_ID, CMD_NAME, CMD_Description, ICON_FOLDER)
# Add command created handler. The function passed here will be executed when the command is executed.
futil.add_handler(cmd_def.commandCreated, command_created)
# ******************************** Create Command Control ********************************
# Get target workspace for the command.
workspace = ui.workspaces.itemById(WORKSPACE_ID)
# Get target toolbar tab for the command and create the tab if necessary.
toolbar_tab = workspace.toolbarTabs.itemById(TAB_ID)
if toolbar_tab is None:
toolbar_tab = workspace.toolbarTabs.add(TAB_ID, TAB_NAME)
# Get target panel for the command and and create the panel if necessary.
panel = toolbar_tab.toolbarPanels.itemById(PANEL_ID)
if panel is None:
panel = toolbar_tab.toolbarPanels.add(PANEL_ID, PANEL_NAME, PANEL_AFTER, False)
# Create the command control, i.e. a button in the UI.
control = panel.controls.addCommand(cmd_def)
# Now you can set various options on the control such as promoting it to always be shown.
control.isPromoted = IS_PROMOTED
# Executed when add-in is stopped.
def stop():
# Get the various UI elements for this command
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
toolbar_tab = workspace.toolbarTabs.itemById(TAB_ID)
command_control = panel.controls.itemById(CMD_ID)
command_definition = ui.commandDefinitions.itemById(CMD_ID)
# Delete the button command control
if command_control:
command_control.deleteMe()
# Delete the command definition
if command_definition:
command_definition.deleteMe()
# Delete the panel if it is empty
if panel.controls.count == 0:
panel.deleteMe()
# Delete the tab if it is empty
if toolbar_tab.toolbarPanels.count == 0:
toolbar_tab.deleteMe()
# Function to be called when a user clicks the corresponding button in the UI.
def command_created(args: adsk.core.CommandCreatedEventArgs):
futil.log(f'{CMD_NAME} Command Created Event')
inputs = args.command.commandInputs
drop_style = adsk.core.DropDownStyles.TextListDropDownStyle
drop_down_input = inputs.addDropDownCommandInput('test_name', 'Test Name', drop_style)
test_base_dir = get_test_base_dir()
last_time = 0
for file_name in os.listdir(test_base_dir):
full_path = os.path.join(test_base_dir, file_name)
if os.path.isdir(full_path):
test_results = os.path.join(full_path, 'results.json')
if os.path.exists(test_results):
list_item = drop_down_input.listItems.add(file_name, False)
this_m_time = os.path.getmtime(test_results)
if this_m_time > last_time:
list_item.isSelected = True
last_time = this_m_time
if drop_down_input.listItems.count == 0:
drop_down_input.listItems.add(f'No Tests Found in: {test_base_dir}', True)
args.command.isOKButtonVisible = False
# Connect to the events that are needed by this command.
futil.add_handler(args.command.execute, command_execute, local_handlers=local_handlers)
futil.add_handler(args.command.destroy, command_destroy, local_handlers=local_handlers)
# This function will be called when the user clicks the OK button in the command dialog.
def command_execute(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME} Command Execute Event')
inputs = args.command.commandInputs
drop_down_input: adsk.core.DropDownCommandInput = inputs.itemById('test_name')
test_name = drop_down_input.selectedItem.name
run_test(test_name)
# This function will be called when the user completes the command.
def command_destroy(args: adsk.core.CommandEventArgs):
global local_handlers
local_handlers = []
futil.log(f'{CMD_NAME} Command Destroy Event')
|
from flask import Flask
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from models.DB import db_session
from flask_cors import CORS
from models.objects import SystemUser
app = Flask(__name__)
#TODO- Generate new key ie: print(os.urandom(24))
app.config['SECRET_KEY'] = '\xf2K\xed\xa3\x80r\x03\xd0\xbbv\xbc\x86)7\xbc[#\xf1\xcbT;b\xd6\x82'
db = SQLAlchemy(app)
# CORS
CORS(app)
# API Blueprints
from api.v1.reading import apiReading
app.register_blueprint(apiReading)
from api.v1.system_user import apiSystemUser
app.register_blueprint(apiSystemUser)
@app.route('/test')
def test():
return '<h1>Flask is functioning.</h1>'
@app.route('/testDB')
def testDB():
user = db_session.query(SystemUser).filter(SystemUser.system_user_id == 1).one()
if(user.system_user_id != None):
return '<h1>DB Connected</h1>'
else:
return '<h2>Error connecting to database.</h2>'
|
"""
Copyright 2018 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import websockets
from serial_gateway.logger import root_logger
except ImportError as ex:
exit("{} - {}".format(__name__, ex.msg))
from threading import Thread
import asyncio, time
from asyncio.subprocess import PIPE, STDOUT
logger = root_logger.getChild(__name__)
class WebsocketConsole(Thread):
_source = None
def __init__(self, main_loop):
super().__init__()
self._main_loop = main_loop
self.start()
@staticmethod
def setSource(src):
__class__._source = src
async def send(self, websocket, path):
while True:
if __class__._source:
file_to_tail = __class__._source
__class__._source = None
tail_process = await asyncio.create_subprocess_exec('tail', '-F', file_to_tail, stdout=PIPE, stderr=STDOUT, loop=self._event_loop)
while True:
try:
line = await asyncio.wait_for(tail_process.stdout.readline(), timeout=0.4, loop=self._event_loop)
if line and websocket.open:
try:
line = line.decode().replace('\n', '').replace('\r', '')
await websocket.send(line)
except Exception as ex:
logger.error("could not send data - {}".format(ex))
break
except (TimeoutError, asyncio.TimeoutError):
pass
except Exception as ex:
logger.error(ex)
try:
await websocket.ping()
except Exception as ex:
if not any(code in str(ex) for code in ["1000", "1001"]):
logger.error(ex)
break
tail_process.kill()
await tail_process.wait()
break
else:
try:
await websocket.ping()
await asyncio.sleep(1)
except Exception as ex:
if not any(code in str(ex) for code in ["1000", "1001"]):
logger.error(ex)
break
def run(self):
while not self._main_loop.is_running():
time.sleep(1)
try:
self._event_loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
logger.debug("no event loop found")
self._event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._event_loop)
logger.debug("created new event loop")
try:
server = websockets.serve(self.send, '0.0.0.0', 5678)
self._event_loop.run_until_complete(server)
self._event_loop.run_forever()
except Exception as ex:
logger.error(ex)
logger.error('websocket console exited')
|
import falcon
import jwt
import os
from .dataset_fixtures import *
def test_add_commit_info(client):
ds_id = 'ds000001'
file_data = 'Test annotating requests with user info'
name = 'Test User'
email = 'user@example.com'
user = {
'name': name,
'email': email,
'sub': '123456',
'admin': False
}
jwt_secret = 'shhhhh'
os.environ['JWT_SECRET'] = jwt_secret
access_token = jwt.encode(user, jwt_secret).decode('utf-8')
cookie = 'accessToken={}'.format(access_token)
headers = {
'Cookie': cookie
}
response = client.simulate_post(
'/datasets/{}/files/USER_ADDED_FILE'.format(ds_id), body=file_data, headers=headers)
assert response.status == falcon.HTTP_OK
response = client.simulate_post(
'/datasets/{}/draft'.format(ds_id), body=file_data, headers=headers)
assert response.status == falcon.HTTP_OK
response_content = json.loads(response.content)
assert response_content['name'] == name
assert response_content['email'] == email
def test_is_dirty(client, new_dataset):
ds_id = os.path.basename(new_dataset.path)
# Check if new_dataset is not dirty
response = client.simulate_get(
'/datasets/{}/draft'.format(ds_id))
assert response.status == falcon.HTTP_OK
assert json.loads(response.content)['partial'] == False
# Make the dataset dirty
response = client.simulate_post(
'/datasets/{}/files/NEW_FILE'.format(ds_id), body='some file data')
assert response.status == falcon.HTTP_OK
# Check if partial state is now true
response = client.simulate_get(
'/datasets/{}/draft'.format(ds_id))
assert response.status == falcon.HTTP_OK
assert json.loads(response.content)['partial'] == True
|
import os
import importlib
import pytest
def create_usage_test(dash_duo, filename, dir_name='usage'):
app = importlib.import_module(filename).app
dash_duo.start_server(app)
dash_duo.wait_for_element_by_id("cytoscape", 20)
directory_path = os.path.join(
os.path.dirname(__file__),
'screenshots',
dir_name
)
# Create directory if it doesn't already exist
if not os.path.exists(directory_path):
os.makedirs(directory_path)
dash_duo.driver.save_screenshot(os.path.join(
os.path.dirname(__file__),
'screenshots',
dir_name,
filename + '.png'
))
@pytest.mark.parametrize('name', [
'usage-advanced', 'demos.usage-animated-bfs',
'demos.usage-breadthfirst-layout', 'demos.usage-compound-nodes', 'usage-events',
'usage-elements', 'demos.usage-pie-style', 'usage', 'usage-stylesheet',
'demos.usage-initialisation', 'demos.usage-linkout-example', 'demos.usage-image-export',
'demos.usage-responsive-graph', 'demos.usage-contextmenu', 'demos.usage-leaflet'])
def test_cyug001_usage(name, dash_duo):
create_usage_test(dash_duo, name)
|
from NetWork import NetWork
import time
'''
v0.1.1
'''
class MLManager:
def __init__(self, hp) -> None:
self.host = '127.0.0.1'
self.port = hp
pass
def check(self) -> bool: # 暂时建议仅限测试使用
resp = NetWork.pureGet([self.port, "check"])
if resp == "okok":
return True
else:
return False
def push(self, channel, d) -> str:
log = NetWork.loopPost([self.port, "push", channel], {'data': d}, loopFor=NetWork.NETERRorTO)
return log
def get(self, channel) -> str:
resp = NetWork.loopGet([self.port, "get", channel], loopFor=NetWork.NETERRorTO)
return resp[4:]
def waitForServer(self) -> None:
while not self.check():
print("服务器暂时未启动")
time.sleep(0.1)
pass
def waitForSignal(self, sig, value) -> None:
passReturn = "okok" + str(value)
get = NetWork.loopGet([self.port, "getSignal", sig], loopFor=NetWork.ONLY4NETERR)
while get != passReturn:
time.sleep(0.05)
get = NetWork.loopGet([self.port, "getSignal", sig], loopFor=NetWork.ONLY4NETERR)
print("得到一个不行的", get)
def setSignal(self, sig, value) -> str:
log = NetWork.loopPost([self.port, "setSignal", sig], {'data': value}, loopFor=NetWork.ONLY4NETERR)
return log
# 用于发送get请求,而不是获得元素,内部方法
''' 发送请求之后,没有回应
可能情况: 服务器未启动 -> 打印提示服务器疑似未启动,并且等待
服务器未响应 -> 等待响应
'''
|
import pandas as pd
import numpy as np
from scipy.stats import entropy
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sn
import mdscaling as mds
# Loading Directed Bipartite Twitter graphs
DG={}
for country in ['chile','france']:
DG[country] = mds.DiBipartite('datasets/twitter_%s.csv'%country)
# Computing Correspondance Analysis embedding
for country in ['chile','france']:
DG[country].CA()
# Plotting embeddings for countries
custom_legend=[Line2D([0], [0], color='red', marker='+', lw=0,alpha=1.0, label='MPs'),
Line2D([0], [0], color='deepskyblue', lw=8,alpha=0.6, label='Followers'),]
for country in ['chile','france']:
g = sn.jointplot(x=1,y=0, data=DG[country].embedding[DG[country].embedding.index.isin(DG[country].bottom_nodes_list)], space=0, color="deepskyblue",kind='hex',ratio=10)
cbar_ax = g.fig.add_axes() # x, y, width, height
cbar_ax = g.fig.add_axes([0.15, .4, .05, .5]) # x, y, width, height
plt.colorbar(cax=cbar_ax)
# top
g.ax_joint.plot(DG[country].embedding[DG[country].embedding.index.isin(DG[country].top_nodes_list)][1],
DG[country].embedding[DG[country].embedding.index.isin(DG[country].top_nodes_list)][0],
'+',color='r',mew=1.0,ms=7)
g.ax_joint.legend(handles=custom_legend,loc='lower right',fontsize=12)
g.ax_joint.set_xlabel('PC2')
g.ax_joint.set_ylabel('PC1')
g.ax_joint.set_title('Twitter (%s)'%country,fontsize=14)
g.ax_joint.set_xlim((-3,3))
g.ax_joint.set_ylim((-3,3))
plt.tight_layout()
plt.savefig('datasets/twitter_%s.pdf'%country)
plt.clf()
plt.close()
|
# -*- coding: utf-8 -*-
description = 'common detector devices provided by QMesyDAQ'
group = 'lowlevel'
devices = dict(
timer = device('nicos.devices.generic.VirtualTimer',
description = 'QMesyDAQ timer',
lowlevel = True,
unit = 's',
fmtstr = '%.1f',
),
mon1 = device('nicos.devices.generic.VirtualCounter',
description = 'QMesyDAQ monitor 1',
type = 'monitor',
lowlevel = True,
fmtstr = '%d',
),
# mon2 = device('nicos.devices.generic.VirtualCounter',
# type = 'monitor',
# lowlevel = True,
# fmtstr = '%d',
# ),
det1 = device('nicos.devices.generic.VirtualCounter',
type = 'counter',
lowlevel = True,
fmtstr = '%d',
),
det2 = device('nicos.devices.generic.VirtualCounter',
type = 'counter',
lowlevel = True,
fmtstr = '%d',
),
det3 = device('nicos.devices.generic.VirtualCounter',
type = 'counter',
lowlevel = True,
fmtstr = '%d',
),
# det4 = device('nicos.devices.generic.VirtualCounter',
# type = 'counter',
# lowlevel = True,
# fmtstr = '%d',
# ),
# det5 = device('nicos.devices.generic.VirtualCounter',
# type = 'counter',
# lowlevel = True,
# fmtstr = '%d',
# ),
events = device('nicos.devices.generic.VirtualCounter',
description = 'QMesyDAQ Events channel',
type = 'counter',
lowlevel = True,
fmtstr = '%d',
),
image = device('nicos.devices.generic.VirtualImage',
description = 'QMesyDAQ Image',
fmtstr = '%d',
pollinterval = 86400,
lowlevel = True,
sizes = (1, 5),
),
det = device('nicos.devices.generic.Detector',
# description = 'Puma detector device (5 counters)',
description = 'Puma detector QMesydaq device (3 counters)',
timers = ['timer'],
# monitors = ['mon1', 'mon2'],
monitors = ['mon1'],
# counters = ['det1', 'det2', 'det3', 'det4', 'det5'],
counters = ['det1', 'det2', 'det3'],
images = [],
maxage = 1,
pollinterval = 1,
),
)
startupcode = '''
SetDetectors(det)
'''
|
"""
Application global configuration.
"""
from dataclasses import dataclass
from .interfaces import IStdLibFilter
class AppConfigurationAlreadySetException(Exception):
"""
Exception when you try to set app configuration twice.
"""
@dataclass(frozen=True)
class AppConfiguration:
"""
Configuration define at application level (in general in main).
"""
std_lib_filter: IStdLibFilter
class AppConfigurationSingleton:
"""
App configuration singleton
"""
_instance: AppConfiguration
_already_set: bool = False
@classmethod
def define_app_configuration(cls, configuration: AppConfiguration) -> None:
"""
Define the application configuration to use during this run.
Can only be called once.
"""
if cls._already_set:
raise AppConfigurationAlreadySetException(
"app configuration can be set once"
)
cls._instance = configuration
cls._already_set = True
@classmethod
def get_instance(cls) -> AppConfiguration:
"""
Get current app configuration instance.
raise if not define.
"""
return cls._instance
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
# This is the version of this source code.
manual_verstr = "1.9"
auto_build_num = "1"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
except (ImportError, ValueError): # pragma NO COVER
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
else: # pragma NO COVER
__version__ = pyutil_Version(verstr)
|
# Copyright 2019 Matthew Treinish
# Copyright (c) 2009 testtools developers.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import sys
import unittest
import extras
def filter_by_ids(suite_or_case, test_ids):
"""Remove tests from suite_or_case where their id is not in test_ids.
:param suite_or_case: A test suite or test case.
:param test_ids: Something that supports the __contains__ protocol.
:return: suite_or_case, unless suite_or_case was a case that itself
fails the predicate when it will return a new unittest.TestSuite with
no contents.
For subclasses of TestSuite, filtering is done by:
- attempting to call suite.filter_by_ids(test_ids)
- if there is no method, iterating the suite and identifying tests to
remove, then removing them from _tests, manually recursing into
each entry.
For objects with an id() method - TestCases, filtering is done by:
- attempting to return case.filter_by_ids(test_ids)
- if there is no such method, checking for case.id() in test_ids
and returning case if it is, or TestSuite() if it is not.
For anything else, it is not filtered - it is returned as-is.
To provide compatibility with this routine for a custom TestSuite, just
define a filter_by_ids() method that will return a TestSuite equivalent to
the original minus any tests not in test_ids.
Similarly to provide compatibility for a custom TestCase that does
something unusual define filter_by_ids to return a new TestCase object
that will only run test_ids that are in the provided container. If none
would run, return an empty TestSuite().
The contract for this function does not require mutation - each filtered
object can choose to return a new object with the filtered tests. However
because existing custom TestSuite classes in the wild do not have this
method, we need a way to copy their state correctly which is tricky:
thus the backwards-compatible code paths attempt to mutate in place rather
than guessing how to reconstruct a new suite.
"""
# Compatible objects
if extras.safe_hasattr(suite_or_case, 'filter_by_ids'):
return suite_or_case.filter_by_ids(test_ids)
# TestCase objects.
if extras.safe_hasattr(suite_or_case, 'id'):
if suite_or_case.id() in test_ids:
return suite_or_case
else:
return unittest.TestSuite()
# Standard TestSuites or derived classes [assumed to be mutable].
if isinstance(suite_or_case, unittest.TestSuite):
filtered = []
for item in suite_or_case:
filtered.append(filter_by_ids(item, test_ids))
suite_or_case._tests[:] = filtered
# Everything else:
return suite_or_case
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
def list_test(test):
"""Return the test ids that would be run if test() was run.
When things fail to import they can be represented as well, though
we use an ugly hack (see http://bugs.python.org/issue19746 for details)
to determine that. The difference matters because if a user is
filtering tests to run on the returned ids, a failed import can reduce
the visible tests but it can be impossible to tell that the selected
test would have been one of the imported ones.
:return: A tuple of test ids that would run and error strings
describing things that failed to import.
"""
unittest_import_strs = {
'unittest2.loader.ModuleImportFailure.',
'unittest.loader.ModuleImportFailure.',
'discover.ModuleImportFailure.'
}
test_ids = []
errors = []
for test in iterate_tests(test):
# Much ugly.
for prefix in unittest_import_strs:
if test.id().startswith(prefix):
errors.append(test.id()[len(prefix):])
break
else:
test_ids.append(test.id())
return test_ids, errors
class TestProgram(unittest.TestProgram):
# defaults for testing
module = None
verbosity = 1
failfast = catchbreak = buffer = progName = None
_discovery_parser = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=unittest.defaultTestLoader,
exit=False, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None, tb_locals=False):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.tb_locals = tb_locals
if warnings is None and not sys.warnoptions:
# even if DeprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
# XXX: Local edit (see http://bugs.python.org/issue22860)
self.listtests = False
self.load_list = None
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
# XXX: Local edit (see http://bugs.python.org/issue22860)
if self.load_list:
# TODO(mtreinish): preserve existing suites (like testresources
# does in OptimisingTestSuite.add, but with a standard protocol).
# This is needed because the load_tests hook allows arbitrary
# suites, even if that is rarely used.
source = open(self.load_list, 'rb')
try:
lines = source.readlines()
finally:
source.close()
test_ids = {line.strip().decode('utf-8') for line in lines}
self.test = filter_by_ids(self.test, test_ids)
# XXX: Local edit (see http://bugs.python.org/issue22860)
if not self.listtests:
self.runTests()
else:
runner = self._get_runner()
if extras.safe_hasattr(runner, 'list'):
try:
runner.list(self.test, loader=self.testLoader)
except TypeError:
runner.list(self.test)
else:
for test in iterate_tests(self.test):
sys.stdout.write('%s\n' % test.id())
def _getParentArgParser(self):
parser = super(TestProgram, self)._getParentArgParser()
# XXX: Local edit (see http://bugs.python.org/issue22860)
parser.add_argument(
'-l', '--list', dest='listtests', default=False,
action='store_true', help='List tests rather than executing them')
parser.add_argument(
'--load-list', dest='load_list', default=None,
help='Specifies a file containing test ids, only tests matching '
'those ids are executed')
return parser
def _get_runner(self):
testRunner = self.testRunner
try:
testRunner = self.testRunner(failfast=self.failfast,
tb_locals=self.tb_locals)
except TypeError:
testRunner = self.testRunner()
# If for some reason we failed to initialize the runner initialize
# with defaults
if isinstance(testRunner, functools.partial):
testRunner = self.testRunner()
return testRunner
def runTests(self):
if self.catchbreak:
unittest.installHandler()
testRunner = self._get_runner()
self.result = testRunner.run(self.test)
|
import requests
import os
switch = {
409: "User is not authorized to access Asset.",
404: "Asset not found."
}
def downloadPlace(placeID, auth):
headers = {
'authority': 'assetdelivery.roblox.com',
'sec-ch-ua': '"(Not(A:Brand";v="8", "Chromium";v="99", "Google Chrome";v="99"',
'accept': 'application/json',
'x-requested-with': 'XMLHttpRequest',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://assetdelivery.roblox.com/docs',
'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7,ru;q=0.6',
'cookie': ".ROBLOSECURITY=" + auth,
'dnt': '1',
'sec-gpc': '1',
}
result = requests.get("https://assetdelivery.roblox.com/v1/assetId/" + placeID, headers=headers)
if result.status_code == 200:
url = result.json()["location"]
result = requests.get(url)
if result.status_code == 200:
print("Downloading place: " + placeID)
return result.content
else:
if result.status_code in switch:
print(str(result.status_code) + " : " + switch[result.status_code])
else:
print("Error: " + result.status_code)
#save to file
with open(os.getcwd() + '/.remodel/place.rbxl', 'wb') as file:
file.write(downloadPlace(os.environ['ASSETID'], os.environ['ROBLOSECURITY']))
|
from abc import ABC, abstractmethod
class ByteInterface(ABC):
@abstractmethod
def convert(self):
pass
|
# @date 2020-10-03
# @author Frederic Scherma
# @license Copyright (c) 2020 Dream Overflow
# Volume Profile indicator and composite
from strategy.indicator.indicator import Indicator
from strategy.indicator.models import VolumeProfile
from instrument.instrument import Instrument
from database.database import Database
from common.utils import truncate
import numpy as np
# @todo Support of evening session and overnight session.
class BaseVolumeProfileIndicator(Indicator):
"""
Single or multiple Volume Profile indicator base model.
"""
__slots__ = '_length', '_sensibility', '_volume_area', '_size', '_vps', '_current', \
'_session_offset', '_price_precision', '_tick_size', '_range', '_bins'
@classmethod
def indicator_type(cls):
return Indicator.TYPE_VOLUME
@classmethod
def indicator_class(cls):
return Indicator.CLS_INDEX
def __init__(self, name, timeframe, length=10, sensibility=10, volume_area=70):
super().__init__(name, timeframe)
self._compute_at_close = True # only at close
self._length = length # number of volumes profiles to keep back
self._sensibility = sensibility
self._volume_area = volume_area
self._session_offset = 0.0
self._price_precision = 1
self._tick_size = 1.0
self._range = (1.0, 1.0)
self._bins = tuple()
self._current = None
self._vps = []
def setup(self, instrument):
if instrument is None:
return
self._price_precision = instrument.price_precision or 8
self._tick_size = instrument.tick_price or 0.00000001
self._session_offset = instrument.session_offset
def setup_range(self, instrument, min_price, max_price):
self._range = (min_price, max_price)
self._bins = tuple(instrument.adjust_price(price) for price in np.exp(
np.arange(np.log(min_price), np.log(max_price), self._sensibility * 0.01)))
@property
def length(self):
return self._length
@length.setter
def length(self, length):
self._length = length
@property
def sensibility(self):
return self._sensibility
@property
def range(self):
return self._range
@property
def bins(self):
return self._bins
@property
def current(self):
return self._current
@property
def vps(self):
return self._vps
def finalize(self, vp):
"""
Finalize the computation of the last VP and push it.
Does by update when the last trade timestamp open a new session.
"""
if vp is None:
return
vp.poc = BaseVolumeProfileIndicator.find_poc(vp)
# volumes arranged by price
volumes_by_price = BaseVolumeProfileIndicator.sort_volumes_by_price(vp)
# find peaks and valley
vp.peaks, vp.valleys = BaseVolumeProfileIndicator.basic_peaks_and_valleys_detection(
self._bins, self._sensibility, vp)
#
# internal computing
#
def adjust_price(self, price):
"""
Format the price according to the precision.
"""
if price is None:
price = 0.0
# adjusted price at precision and by step of pip meaning
return truncate(round(price / self._tick_size) * self._tick_size, self._price_precision)
def bin_lookup(self, price):
# idx = int(np.log(price) * self._sensibility)
# if 0 <= idx < len(self._bins):
# return self._bins[idx]
# else:
# return None
if price < self._bins[0]:
# underflow
return None
prev = 0.0
for b in self._bins:
if b > price >= prev:
return prev
prev = b
# last bin or overflow
return self._bins[-1]
@staticmethod
def find_poc(vp):
"""
Detect the price at the max volume.
"""
poc_price = 0.0
poc_vol = 0.0
for b, v in vp.volumes.items():
if v > poc_vol:
poc_vol = v
poc_price = b
return poc_price
@staticmethod
def sort_volumes_by_price(vp):
return sorted([(b, v) for b, v in vp.volumes.items()], key=lambda x: x[0])
@staticmethod
def single_volume_area(vp, volumes_by_price, poc_price, volume_area):
"""
Simplest method to detect the volume area.
Starting from the POC goes left and right until having the inner volume reached.
Its not perfect because it could miss some peaks that will be important to have
and sometime the best choice might not be try with centered algorithm.
"""
if not volumes_by_price or not poc_price:
return 0.0, 0.0
index = -1
for i, bv in enumerate(volumes_by_price):
if bv[0] == poc_price:
index = i
break
if index < 0:
return 0.0, 0.0
low_price = 0.0
high_price = 0.0
sum_vols = sum(vp.volumes.values())
in_area = sum_vols * volume_area * 0.01
out_area = 1.0 - in_area
left = index
right = index
max_index = len(volumes_by_price)-1
summed = 0.0
while summed < in_area:
if left >= 0:
summed += volumes_by_price[left][1]
low_price = volumes_by_price[left][0]
left -= 1
if right < max_index:
summed += volumes_by_price[right][1]
right += 1
high_price = volumes_by_price[right][0]
if left < 0 and right > max_index:
break
return low_price, high_price
@staticmethod
def basic_peaks_and_valleys_detection(src_bins, sensibility, vp):
"""
Simplest peaks and valleys detection algorithm.
"""
if not vp or not vp.volumes:
return [], []
peaks = []
valleys = []
bins = np.array(src_bins)
volumes = np.zeros(len(src_bins))
avg = np.average(list(vp.volumes.values()))
for i, b in enumerate(src_bins):
if b in vp.volumes:
volumes[i] = vp.volumes[b]
# @todo high_region, low_region detection
sens = sensibility * 0.01 * 10
last_peak = -1
last_valley = -1
for i in range(1, len(volumes)-1):
v = volumes[i]
vl = v - v * sens
vh = v + v * sens
# peaks
# if volumes[i] > avg and volumes[i-1] < volumes[i] and volumes[i+1] < volumes[i]:
# peaks.append(bins[i])
if volumes[i-1] < vl and volumes[i+1] < vl and i - last_valley > 1 and i - last_peak > 2:
peaks.append(bins[i])
last_peak = i
# valleys
# if volumes[i] < avg and volumes[i-1] > volumes[i] and (volumes[i+1] > volumes[i]):# or volumes[i+1] == 0.0):
# valleys.append(bins[i])
if volumes[i-1] > vh and volumes[i+1] > vh and i - last_peak > 1 and i - last_valley > 2:
valleys.append(bins[i])
last_valley = i
return peaks, valleys
#
# cache management
#
def load(self, strategy_trader, base_timestamp, from_date, to_date=None):
"""
Load from DB a range of daily volume profile, inclusive.
"""
self._vps = Database.inst().get_cached_volume_profile(
strategy_trader.trader().name, strategy_trader.instrument.market_id, strategy_trader.strategy.identifier,
self._timeframe, from_date, to_date=to_date,
sensibility=self._sensibility, volume_area=self._volume_area)
if self._vps:
if self._vps[-1].timestamp <= base_timestamp < self._vps[-1].timestamp + self._timeframe:
# current detected
self._current = self._vps.pop()
class VolumeProfileIndicator(BaseVolumeProfileIndicator):
"""
Volume Profile indicator based on OHLCs list update.
"""
def __init__(self, timeframe, length=10, sensibility=10, volume_area=70):
super().__init__("volumeprofile", timeframe, length, sensibility, volume_area)
def compute(self, timestamp, timestamps, highs, lows, closes, volumes):
# only update at close, no overwrite
delta = min(int((timestamp - self._last_timestamp) / self._timeframe) + 1, len(timestamps))
# base index
num = len(timestamps)
for b in range(num-delta, num):
# ignore non closed candles
if timestamp < timestamps[b] + self._timeframe:
break
# for any new candles
if self._current and timestamps[b] >= self._current.timestamp + self._timeframe:
self.finalize(self._current)
self._vps.append(self._current)
self._current = None
if self._current is None:
basetime = Instrument.basetime(self._timeframe, timestamps[b])
self._current = VolumeProfile(basetime, self._timeframe)
# avg price based on HLC3
hlc3 = (highs[b] + lows[b] + closes[b]) / 3
# round price to bin
lbin = self.bin_lookup(hlc3)
if lbin:
if lbin not in self._current.volumes:
# set volume to the bin
self._current.volumes[lbin] = volumes[b]
else:
# or merge
self._current.volumes[lbin] += volumes[b]
self._last_timestamp = timestamp
class TickVolumeProfileIndicator(BaseVolumeProfileIndicator):
"""
Volume Profile indicator based on tick or trade update.
"""
def __init__(self, timeframe, length=10, sensibility=10, volume_area=70):
super().__init__("tickbar-volumeprofile", timeframe, length, sensibility, volume_area)
@classmethod
def indicator_base(cls):
return Indicator.BASE_TICK
def compute(self, timestamp, tick):
# @todo session_offset, evening/overnight session
if self._current and tick[0] >= self._current.timestamp + self._timeframe:
self.finalize(self._current)
self._vps.append(self._current)
self._current = None
if self._current is None:
basetime = Instrument.basetime(self._timeframe, tick[0])
self._current = VolumeProfile(basetime, self._timeframe)
# round price to bin
lbin = self.bin_lookup(tick[3])
if lbin:
if lbin not in self._current.volumes:
# set volume to the bin
self._current.volumes[lbin] = tick[4]
else:
# or merge
self._current.volumes[lbin] += tick[4]
self._last_timestamp = timestamp
class CompositeVolumeProfile(object):
"""
Composite volume profile.
The composite volume profile is managed by a volume profile indicator,
automatically or manually.
The timeframe is the base timeframe, not the cumulated duration.
Then the cumulated duration is timeframe x length.
"""
__slots__ = '_timeframe', '_length', '_use_current', '_vp', '_volume_profile', \
'_last_timestamp', '_last_base_timestamp'
def __init__(self, timeframe, length, volume_profile, use_current=True):
self._timeframe = timeframe
self._length = length
self._use_current = use_current
self._last_timestamp = 0.0
self._last_base_timestamp = 0.0
self._volume_profile = volume_profile
self._vp = VolumeProfile(0, timeframe)
@property
def vp(self):
return self._vp
def is_update_needed(self, timestamp, partial_update=True):
"""
Returns True of the close timestamp was reached.
@param timestamp Current timestamp.
@param partial_update If True it will return True at each intermediate volume profile realized,
else it will wait for the length of new volumes profiles completed.
"""
if partial_update:
return timestamp >= self._last_base_timestamp + self._timeframe
else:
return timestamp >= self._last_base_timestamp + self._timeframe * self._length
def composite(self, timestamp):
"""
Build a composite profile of length, eventually use the current volume profile in addiction.
"""
if self._volume_profile is None or not self._volume_profile.vps:
return
volume_profile = self._volume_profile
base_index = max(-self._length, -len(volume_profile.vps))
base_timestamp = volume_profile.vps[base_index]
cvp = VolumeProfile(Instrument.basetime(self._timeframe, base_timestamp), self._timeframe)
for vp in volume_profile.vps[base_index:]:
for b, v in vp.volumes.items():
if b not in cvp.volumes:
cvp.volumes[b] = v
else:
cvp.volumes[b] += v
self._last_base_timestamp = volume_profile.vps[-1].timestamp
# append current VP
if self._use_current and volume_profile.current:
vp = volume_profile.current
for b, v in vp.volumes.items():
if b not in cvp.volumes:
cvp.volumes[b] = v
else:
cvp.volumes[b] += v
self._last_base_timestamp = volume_profile.current.timestamp
self._finalize(volume_profile, cvp)
self._last_timestamp = timestamp
return cvp
#
# internal methods
#
def _finalize(self, volume_profile, vp):
"""
Finalize the computation of the last VP and push it.
Does by update when the last trade timestamp open a new session.
"""
if vp is None:
return
vp.poc = BaseVolumeProfileIndicator.find_poc(vp)
# volumes arranged by price
volumes_by_price = BaseVolumeProfileIndicator.sort_volumes_by_price(vp)
# find peaks and valley
vp.peaks, vp.valleys = BaseVolumeProfileIndicator.basic_peaks_and_valleys_detection(
volume_profile.bins, volume_profile.sensibility, vp)
|
import unittest
from translator import englishToFrench, frenchToEnglish
class TestTranslateEnToFr(unittest.TestCase):
"""
Class to test the function englishToFrench
"""
def test1(self):
"""
Function to test the function englishToFrench
"""
self.assertIsNone(englishToFrench(None))
self.assertEqual(englishToFrench("Hello"), "Bonjour")
self.assertNotEqual(englishToFrench("Bonjour"), "Hello")
class TestTranslateFrToEn(unittest.TestCase):
"""
Class to test the function frenchToEnglish
"""
def test2(self):
"""
Function to test the function frenchToEnglish
"""
self.assertIsNone(frenchToEnglish(None))
self.assertEqual(frenchToEnglish("Bonjour"), "Hello")
self.assertNotEqual(frenchToEnglish("Hello"), "Bonjour")
unittest.main()
|
""" Get the county museum lists from wikipedia.
Doing it this way, rather than cutting and pasting ensures that the same
encoding is used as urllib2 uses."""
__author__ = "Joe Collins"
__copyright__ = "Copyright (c) 2016 Black Radley Limited."
import io
import requests # for getting the pages from Wikipedia
import helpers_list
print '\nGet Wikipedia Data\n---'
COUNTIES_ENGLAND_CEREMONIAL = helpers_list.get_counties_england_ceremonial()
DATA = None
HEADERS = {'User-Agent' : 'ProjectHeathMynd (+http://www.blackradley.com/contact-us/)'}
for county in COUNTIES_ENGLAND_CEREMONIAL:
url = 'https://en.wikipedia.org/wiki/List_of_museums_' + county
print url
response = requests.get(url)
content = response.text
county_museums_file_path = helpers_list.get_canonical_path_for(
'../download/List_of_museums_' + county + '.htm')
county_museums_file = io.open(county_museums_file_path, 'w', encoding='utf-8')
county_museums_file.write(content)
county_museums_file.close()
|
'''
Exercício Python 086: Crie um programa que declare uma matriz de dimensão 3x3
e preencha com valores lidos pelo teclado. No final, mostre a matriz na tela,
com a formatação correta.
'''
## VERSÃO GUANABARA:
matriz = [[0,0,0],[0,0,0],[0,0,0]]
for l in range(0,3):
for c in range(0,3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
print("=-" * 30)
for l in range(0,3):
for c in range(0,3):
print(f'[{matriz[l][c]:^5}]', end='')
print()
## VERSÃO BRUNO
matriz = []
for i in range(3):
num = []
for c in range(3):
num.append(int(input(f"Digite um valor para [{i}, {c}]: ")))
matriz.append(num)
print("=-" * 30)
for i in range(len(matriz)):
print("")
for c in matriz[i]:
print(f"[{c:^5}]", end=" ")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.