content stringlengths 5 1.05M |
|---|
import unittest
import sys
sys.path.append('..')
import modes
import os
os.system("tshark -r xml/smtp.pcap -T pdml > tmp.pdml")
class TestModeAll(unittest.TestCase):
def test_expression_false(self):
result = modes.modeAll(['AVG(fake.attr) == 5'])
self.assertEqual(result, False)
def test_syntax_error(self):
result = modes.modeAll(['fake.attr == 5 ='])
self.assertEqual(result, False)
def test_no_values(self):
result = modes.modeAll(['fake.attr == 5'])
self.assertEqual(result, False)
def test_OK(self):
result = modes.modeAll(['smtp.rsp == "asd"'])
self.assertEqual(result, True)
if __name__ == '__main__':
unittest.main()
|
import csv
import requests
import dateutil.parser
import re
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = '/category/market-information/'
strip_char = ':;,. \n\t'
def get_sale_head(line):
"""Return the total number of head sold at the sale.
If present, the number is usually at the top of the market report."""
head_string = line[1].get_text().replace('\n', ' ')
match = re.search(r'([0-9,]+)', head_string)
if match:
head = match.group(1).replace(',','')
else:
head = None
return head
def get_sale_date(line):
"""Return the date of the sale."""
date_string = line[0].get_text().replace('\n', ' ')
sale_date = dateutil.parser.parse(date_string, fuzzy=True)
return sale_date
def is_heading(this_line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
is_succinct = len(this_line.find_all('td')) < 3
string = this_line.get_text()
is_short = len(string.split()) < 10
cattle_clue = r'bulls?|steers?|strs?|cows?|heifers?|hfrs?|calf|calves|pairs?|yearlings?'
has_cattle = re.search(cattle_clue, string, re.IGNORECASE)
return bool(is_succinct and is_short and has_cattle)
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
td = this_line.find_all('td')
is_not_succinct = sum(1 for this_td in td if this_td.string) > 3
has_price = False
has_range = False
no_test = False
for td in td:
if re.search(r'[0-9]+\.[0-9]{2}', td.get_text()):
has_price = True
if re.search(r'\bto\b', td.get_text(), re.IGNORECASE):
has_range = True
break
if re.search('no test', td.get_text(), re.IGNORECASE):
no_test = True
break
return has_price and is_not_succinct and not has_range and not bool(no_test)
def get_sale_location(word):
"""Convert address strings into a list of address components."""
sale_location = ' '.join(word)
if ',' in sale_location:
sale_location = sale_location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', sale_location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/]|cwt|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale(word, cattle):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
number_word = [idx for idx, val in enumerate(word) if is_number(val)]
sale_location = get_sale_location(word[:number_word[0]])
sale = {
'consignor_city': sale_location.pop(0).strip(strip_char).title(),
}
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
cattle_string = cattle + ' ' + ' '.join(word[number_word[0]+1:number_word[1]])
sale['cattle_cattle'] = cattle_string.strip(strip_char)
head_string = word[number_word[0]].strip(strip_char).replace(',', '')
try:
float(head_string)
sale['cattle_head'] = head_string
except ValueError:
pass
weight_string = word[number_word[1]].strip(strip_char).replace(',', '')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
price_string = word[number_word[2]]
match = False
if not match:
match = re.search(r'([0-9,.]+) ?/?he?a?d?', price_string, re.IGNORECASE)
key = 'cattle_price'
if not match:
match = re.search(r'([0-9,.]+) ?/?c?w?t?', price_string, re.IGNORECASE)
key = 'cattle_price_cwt'
if match:
sale[key] = match.group(1).replace(',', '').strip(strip_char)
sale = {k:v for k,v in sale.items() if v}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
cattle = ''
for this_line in line:
if is_heading(this_line):
cattle = this_line.get_text().strip(strip_char)
elif is_sale(this_line):
sale = this_default_sale.copy()
word = [td.get_text().replace('\xa0','') for td in this_line.find_all('td') if td.get_text() != '']
sale.update(get_sale(word, cattle))
writer.writerow(sale)
def main():
# Collect individual reports into a list
response = requests.get(
base_url + report_path,
headers=scrape_util.url_header,
)
soup = BeautifulSoup(response.content, 'lxml')
report = [soup]
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# Write a CSV file for each report not in the archive
for this_report in report:
table = soup.find('table')
line = [tr for tr in table.find_all('tr')]
if line[0].get_text().strip():
sale_date = get_sale_date(line)
else:
date_string = soup.time.get_text()
sale_date = dateutil.parser.parse(date_string)
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
continue
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
sale_head = get_sale_head(line)
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_head': sale_head,
})
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
|
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This script do cross-checking with TF using multiple dense embedding layers.
"""
import argparse
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../"))) # where to find SOK
import sparse_operation_kit as sok
import tensorflow as tf
import numpy as np
import json
import pickle
import utils
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../documents/tutorials/DenseDemo")))
from models import SOKDenseModel, TFDenseModel
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../documents/tutorials")))
import utility
def test_sok_multi_dense_emb(args):
comm_options = tf.distribute.experimental.CommunicationOptions(
bytes_per_pack=0,
timeout_seconds=None,
implementation=tf.distribute.experimental.CommunicationImplementation.NCCL
)
if args.worker_num == 1:
strategy = tf.distribute.MirroredStrategy()
else:
port = 12345
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {"worker": ["localhost" + ":" + str(port + i)
for i in range(args.worker_num)]},
"task": {"type": "worker", "index": args.task_id}
})
strategy = tf.distribute.MultiWorkerMirroredStrategy(
communication_options=comm_options)
replica_batch_size = args.global_batch_size // (args.worker_num * 1)
dataset = utility.TFDataset(filename=args.file_prefix + str(args.task_id) + ".file",
batchsize=replica_batch_size,
as_sparse_tensor=False,
repeat=1)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
dynamic_input = True if args.dynamic_input == 1 else False
with strategy.scope():
sok.Init(global_batch_size=args.global_batch_size)
model = SOKDenseModel(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size_list=args.embedding_vec_size_list,
slot_num_list=args.slot_num_list,
nnz_per_slot_list=[args.nnz_per_slot for _ in range(len(args.slot_num_list))],
num_dense_layers=args.num_dense_layers,
dynamic_input=dynamic_input)
emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
if args.mixed_precision:
emb_opt = tf.keras.mixed_precision.LossScaleOptimizer(emb_opt, initial_scale=1024)
# set initial value to embedding variables.
sok_saver = sok.Saver()
for i, layer in enumerate(model.embedding_layers):
init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size_list[i],
num=args.worker_num)
sok_saver.load_embedding_values(layer.embedding_variable, init_tensors)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
_dtype = loss.dtype
loss = tf.cast(loss, tf.float32)
loss = tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
return tf.cast(loss, _dtype)
@tf.function
def _train_step(inputs, labels):
with tf.GradientTape() as tape:
logit, all_vectors = model(inputs, training=True)
loss = _replica_loss(labels, logit)
if args.mixed_precision:
_loss = emb_opt.get_scaled_loss(loss)
else:
_loss = loss
emb_variable, other_variable = sok.split_embedding_variable_from_others(model.trainable_variables)
grads, emb_grads = tape.gradient(_loss, [other_variable, emb_variable])
if args.mixed_precision:
grads = emb_opt.get_unscaled_gradients(grads)
emb_grads = emb_opt.get_unscaled_gradients(emb_grads)
if "plugin" not in args.optimizer:
with sok.OptimizerScope(emb_variable):
emb_opt.apply_gradients(zip(emb_grads, emb_variable),
experimental_aggregate_gradients=False)
else:
emb_opt.apply_gradients(zip(emb_grads, emb_variable),
experimental_aggregate_gradients=False)
with tf.control_dependencies(emb_grads):
# mannually all-reduce dense gradients
replica_context = tf.distribute.get_replica_context()
grads = replica_context.all_reduce("sum", grads,
options=comm_options)
dense_opt.apply_gradients(zip(grads, other_variable),
experimental_aggregate_gradients=False)
# manually all-reduce loss, it is ok, because replica_loss has already been used to
# update local variables.
loss = replica_context.all_reduce(tf.distribute.ReduceOp.SUM, loss,
options=comm_options)
return loss, all_vectors
# save its results
sok_results = list()
for i, (inputs, labels) in enumerate(dataset):
if args.stop_iter >= 0 and i >= args.stop_iter:
break
total_loss, all_vectors = strategy.run(_train_step, args=(inputs, labels))
print("[INFO]: Iteration: {}, loss={}".format(i, total_loss))
with tf.device("CPU:0"):
sok_results.append(all_vectors)
return sok_results
def test_tf_multi_dense_emb(args):
dataset_filenames = [args.file_prefix + str(task_id) + ".file"
for task_id in range(args.worker_num)]
samples_total = [list() for _ in range(args.dataset_iter_num)]
labels_total = [list() for _ in range(args.dataset_iter_num)]
replica_batch_size = args.global_batch_size // args.worker_num
for worker_id in range(args.worker_num):
samples, labels = utils.restore_from_file(dataset_filenames[worker_id])
for i in range(args.dataset_iter_num):
samples_total[i].extend(samples[i * replica_batch_size : (i + 1) * replica_batch_size])
labels_total[i].extend(labels[i * replica_batch_size : (i + 1) * replica_batch_size])
samples_total = np.concatenate(samples_total, axis=0)
labels_total = np.concatenate(labels_total, axis=0)
dataset = utils.tf_dataset(samples_total, labels_total,
batchsize=args.global_batch_size,
to_sparse_tensor=False,
repeat=1)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
model = TFDenseModel(vocabulary_size=args.max_vocabulary_size_per_gpu * args.worker_num,
embedding_vec_size_list=args.embedding_vec_size_list,
slot_num_list=args.slot_num_list,
nnz_per_slot_list=[args.nnz_per_slot for _ in range(len(args.slot_num_list))],
num_dense_layers=args.num_dense_layers)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)
if args.mixed_precision:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, initial_scale=1024)
# set initial value to embedding variables
for i, param in enumerate(model.embedding_params):
init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu * args.worker_num,
embedding_vec_size=args.embedding_vec_size_list[i],
num=1)
param.assign(init_tensors[0])
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def _train_step(inputs, labels):
with tf.GradientTape() as tape:
logit, all_vectors = model(inputs, training=True)
loss = loss_fn(labels, logit)
if args.mixed_precision:
_loss = optimizer.get_scaled_loss(loss)
else:
_loss = loss
grads = tape.gradient(_loss, model.trainable_variables)
if args.mixed_precision:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss, all_vectors
# save its results
tf_results = list()
for i, (inputs, labels) in enumerate(dataset):
if args.stop_iter >= 0 and i >= args.stop_iter:
break
loss, all_vectors = _train_step(inputs, labels)
print("[INFO]: Iteration: {}, loss={}".format(i, loss))
with tf.device("CPU:0"):
tf_results.append(all_vectors)
return tf_results
def compare_sok_and_tf(args):
sok_results = test_sok_multi_dense_emb(args)
utils.save_to_file("./sok_results_" + str(args.task_id) + ".file", sok_results)
# only process-0 to do the cross-checking.
# if args.task_id != 0:
# return
tf_results = test_tf_multi_dense_emb(args)
all_sok_results_list = list()
for i in range(args.worker_num):
sok_results = utils.restore_from_file("./sok_results_" + str(i) + ".file")
sok_results = tf.concat(sok_results, axis=0) # [iter-num, replica-bs, vectors]
all_sok_results_list.append(sok_results)
all_sok_results_list = tf.concat(all_sok_results_list, axis=1)
all_sok_results_list = tf.split(all_sok_results_list, num_or_size_splits=len(tf_results), axis=0)
all_sok_results_list = [tf.squeeze(item) for item in all_sok_results_list]
if len(all_sok_results_list) != len(tf_results):
raise ValueError("The length of sok results is not equal to that of tensorflow.")
if args.dynamic_input == 1:
atol = 1e0
rtol = 1e-2
else:
atol = 1e-4
rtol = 1e-4
for i, sok_vector in enumerate(all_sok_results_list):
tf.debugging.assert_near(tf.reshape(sok_vector,
shape=[-1, tf.shape(sok_vector)[-1]]),
tf_results[i],
atol=atol,
rtol=rtol,
message=("the values is not consistent on Iteration: %d" %i))
print("\n[INFO]: For multiple dense embedding layer: with MPI + MultiWorkerMirroredStrategy, the embedding"+\
" vectors obtained from SOK and TF are consistent for %d iterations."
" With mixed_precision = %s"
%(len(sok_results), args.mixed_precision))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="run DNN model with SparseOperationKit")
parser.add_argument("--file_prefix", type=str,
help="the file_prefix for each GPU.", required=True)
parser.add_argument("--global_batch_size", type=int,
required=True)
parser.add_argument("--max_vocabulary_size_per_gpu", type=int,
required=True)
parser.add_argument("--slot_num_list", type=int, nargs="+", required=True,
help="the number of feature fields")
parser.add_argument("--nnz_per_slot", type=int, required=True,
help="the number of keys in each slot")
parser.add_argument("--num_dense_layers", type=int, required=True,
help="the number of fully connected layers in this DNN model")
parser.add_argument("--embedding_vec_size_list", type=int, nargs="+", required=True,
help="the dimension of embedding vectors")
parser.add_argument('--optimizer', type=str,
help="use what optimizer",
required=False, default='plugin_adam',
choices=['plugin_adam', 'adam', 'sgd'])
parser.add_argument("--dataset_iter_num", type=int, required=True,
help="the iter num for MPI + SOK")
parser.add_argument("--stop_iter", type=int, required=False, default=-1,
help="early stop at which iteration.")
parser.add_argument("--dynamic_input", type=int, required=False, default=0, choices=[0, 1],
help="whether to use unique before dense_fprop. 1 means dynamic_input,"+\
"0 means static_input.")
parser.add_argument("--mixed_precision", type=int, choices=[0, 1], default=0)
args = parser.parse_args()
args.mixed_precision = True if 1 == args.mixed_precision else False
if args.mixed_precision:
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
size = os.getenv("OMPI_COMM_WORLD_SIZE")
if size is None:
raise RuntimeError("This app must be launched with mpi.")
size = int(size)
args.worker_num = size
task_id = int(os.getenv("OMPI_COMM_WORLD_RANK"))
args.task_id = task_id
gpus = tf.config.list_physical_devices("GPU")
tf.config.set_visible_devices(gpus[task_id], "GPU")
compare_sok_and_tf(args)
# use these as a barrier
from mpi4py import MPI
MPI.COMM_WORLD.Barrier()
|
"""
RRT_2D
@author: huiming zhou
Modified by David Filliat
"""
class Env:
def __init__(self):
self.x_range = (0, 50)
self.y_range = (0, 30)
self.obs_boundary = self.obs_boundary()
self.obs_circle = self.obs_circle()
self.obs_rectangle = self.obs_rectangle()
@staticmethod
def obs_boundary():
obs_boundary = [
[0, 0, 1, 30],
[0, 30, 50, 1],
[1, 0, 50, 1],
[50, 1, 1, 30]
]
return obs_boundary
@staticmethod
def obs_rectangle():
obs_rectangle = [
[14, 12, 8, 2],
[18, 22, 8, 3],
[26, 7, 2, 12],
[32, 14, 10, 2]
]
return obs_rectangle
@staticmethod
def obs_circle():
obs_cir = [
[7, 12, 3],
[46, 20, 2],
[15, 5, 2],
[37, 7, 3],
[37, 23, 3]
]
return obs_cir
class Env2:
def __init__(self):
self.x_range = (0, 50)
self.y_range = (0, 30)
self.obs_boundary = self.obs_boundary()
self.obs_circle = self.obs_circle()
self.obs_rectangle = self.obs_rectangle()
@staticmethod
def obs_boundary():
obs_boundary = [
[0, 0, 1, 30],
[0, 30, 50, 1],
[1, 0, 50, 1],
[50, 1, 1, 30]
]
return obs_boundary
@staticmethod
def obs_rectangle():
obs_rectangle = [
[19, 1, 7, 6],
[19, 7, 7, 6],
[19, 13, 7, 6],
[19, 19, 7, 6],
[30, 7, 7, 6],
[30, 12, 7, 6],
[30, 18, 7, 6],
[30, 24, 7, 6]
]
return obs_rectangle
@staticmethod
def obs_circle():
obs_cir = [
]
return obs_cir
|
"""
Performance tests for the stats api.
"""
import os
import random
import tempfile
import time
import yappi
from dogapi import DogStatsApi
class NullReporter(object):
"""
A DogAPI to nowhere.
"""
def flush(self, metrics):
print('flushing metrics')
class NullDogStatsApi(DogStatsApi):
"""
A DogStats API that does nothing, for comparing the effects
of including DogApi in your program.
"""
def start(self, *args, **kwargs):
pass
def stop(self, *args, **kwargs):
pass
def gauge(self, *args, **kwargs):
pass
def increment(self, *args, **kwargs):
pass
def histogram(self, *args, **kwargs):
pass
class CPUBoundProgram(object):
def __init__(self, dog_stats_api):
self.dog_stats_api = dog_stats_api
def run(self):
for i in range(100000):
self.dog_stats_api.gauge('current.number', i)
self.dog_stats_api.increment('numbers.checked')
j = 0
start = time.time()
while j < i:
j += 1
self.dog_stats_api.histogram('number.check.time', time.time() - start)
def profile_cpu_bound_program():
real_dog = DogStatsApi()
real_dog.reporter = NullReporter()
fake_dog = NullDogStatsApi()
for type_, dog in [('real', real_dog), ('fake', fake_dog)]:
print('\n\n\nTESTING %s\n\n' % type_)
dog.start()
program = CPUBoundProgram(dog)
yappi.start()
program.run()
yappi.print_stats(sort_type=yappi.SORTTYPE_TSUB, sort_order=yappi.SORTORDER_DESC)
yappi.stop()
yappi.clear_stats()
def measure_thousands_of_metrics():
dog = DogStatsApi()
dog.start(api_key='apikey_3', api_host="https://app.datad0g.com")
yappi.start()
@dog.timed('timed')
def timed():
pass
for i in range(100):
for j in range(1000):
name = j % 100
dog.gauge('gauge.%s' % name, j)
dog.increment('counter.%s' % name, j)
dog.histogram('histogram.%s' % name, j)
timed()
print('run %s' % i)
yappi.print_stats(sort_type=yappi.SORTTYPE_TSUB, sort_order=yappi.SORTORDER_DESC)
if __name__ == '__main__':
measure_thousands_of_metrics()
|
def get_book_by_osis_id(id):
"""
Retrieves book meta data
:param id: the osis book id
:return:
"""
sort = find_key(id, osis_ids)
return get_book_by_sort(sort)
def get_book_by_sort(sort):
"""
Retrieves book metadata
:param sort: the sort order of the book to look up
:return:
"""
if sort in osis_ids and sort in usfm_ids and sort in en_names:
return {
'osis_id': osis_ids[sort],
'usfm_id': usfm_ids[sort],
'en_name': en_names[sort],
'sort': sort
}
return None
def find_key(value, dict):
"""
Looks up the key for the case insensitive value
:param value:
:return:
"""
for k, v in dict.iteritems():
if v.lower() == value.lower():
return k
osis_ids = {
# OT
'01':'Gen',
'02':'Exod',
'03':'Lev',
'04':'Num',
'05':'Deut',
'06':'Josh',
'07':'Judg',
'08':'Ruth',
'09':'1Sam',
'10':'2Sam',
'11':'1Kgs',
'12':'2Kgs',
'13':'1Chr',
'14':'2Chr',
'15':'Ezra',
'16':'Neh',
'17':'Esth',
'18':'Job',
'19':'Ps',
'20':'Prov',
'21':'Eccl',
'22':'Song',
'23':'Isa',
'24':'Jer',
'25':'Lam',
'26':'Ezek',
'27':'Dan',
'28':'Hos',
'29':'Joel',
'30':'Amos',
'31':'Obad',
'32':'Jonah',
'33':'Mic',
'34':'Nah',
'35':'Hab',
'36':'Zeph',
'37':'Hag',
'38':'Zech',
'39':'Mal',
# NT
'41':'Matt',
'42':'Mark',
'43':'Luke',
'44':'John',
'45':'Acts',
'46':'Rom',
'47':'1Cor',
'48':'2Cor',
'49':'Gal',
'50':'Eph',
'51':'Phil',
'52':'Col',
'53':'1Thess',
'54':'2Thess',
'55':'1Tim',
'56':'2Tim',
'57':'Titus',
'58':'Phlm',
'59':'Heb',
'60':'Jas',
'61':'1Pet',
'62':'2Pet',
'63':'1John',
'64':'2John',
'65':'3John',
'66':'Jude',
'67':'Rev'
}
usfm_ids = {
# OT
'01': 'GEN',
'02': 'EXO',
'03': 'LEV',
'04': 'NUM',
'05': 'DEU',
'06': 'JOS',
'07': 'JDG',
'08': 'RUT',
'09': '1SA',
'10': '2SA',
'11': '1KI',
'12': '2KI',
'13': '1CH',
'14': '2CH',
'15': 'EZR',
'16': 'NEH',
'17': 'EST',
'18': 'JOB',
'19': 'PSA',
'20': 'PRO',
'21': 'ECC',
'22': 'SNG',
'23': 'ISA',
'24': 'JER',
'25': 'LAM',
'26': 'EZK',
'27': 'DAN',
'28': 'HOS',
'29': 'JOL',
'30': 'AMO',
'31': 'OBA',
'32': 'JON',
'33': 'MIC',
'34': 'NAM',
'35': 'HAB',
'36': 'ZEP',
'37': 'HAG',
'38': 'ZEC',
'39': 'MAL',
# NT
'41': 'MAT',
'42': 'MRK',
'43': 'LUK',
'44': 'JHN',
'45': 'ACT',
'46': 'ROM',
'47': '1CO',
'48': '2CO',
'49': 'GAL',
'50': 'EPH',
'51': 'PHP',
'52': 'COL',
'53': '1TH',
'54': '2TH',
'55': '1TI',
'56': '2TI',
'57': 'TIT',
'58': 'PHM',
'59': 'HEB',
'60': 'JAS',
'61': '1PE',
'62': '2PE',
'63': '1JN',
'64': '2JN',
'65': '3JN',
'66': 'JUD',
'67': 'REV',
# APO/DEUT
'68': 'TOB',
'69': 'JDT',
'70': 'ESG',
'71': 'WIS',
'72': 'SIR',
'73': 'BAR',
'74': 'LJE',
'75': 'S3Y',
'76': 'SUS',
'77': 'BEL',
'78': '1MA',
'79': '2MA',
'80': '3MA',
'81': '4MA',
'82': '1ES',
'83': '2ES',
'84': 'MAN',
'85': 'PS2',
'86': 'ODA',
'87': 'PSS',
'A4': 'EZA',
'A5': '5EZ',
'A6': '6EZ',
'B2': 'DAG',
'B3': 'PS3',
'B4': '2BA',
'B5': 'LBA',
'B6': 'JUB',
'B7': 'ENO',
'B8': '1MQ',
'B9': '2MQ',
'C0': '3MQ',
'C1': 'REP',
'C2': '4BA',
'C3': 'LAO',
'A0': 'FRT',
'A1': 'BAK',
'A2': 'OTH',
'A7': 'INT',
'A8': 'CNC',
'A9': 'GLO',
'B0': 'TDX',
'B1': 'NDX'
}
en_names = {
'01':'Genesis',
'02':'Exodus',
'03':'Leviticus',
'04':'Numbers',
'05':'Deuteronomy',
'06':'Joshua',
'07':'Judges',
'08':'Ruth',
'09':'1 Samuel',
'10':'2 Samuel',
'11':'1 Kings',
'12':'2 Kings',
'13':'1 Chronicles',
'14':'2 Chronicles',
'15':'Ezra',
'16':'Nehemiah',
'17':'Esther (Hebrew)',
'18':'Job',
'19':'Psalms',
'20':'Proverbs',
'21':'Ecclesiastes',
'22':'Song of Songs',
'23':'Isaiah',
'24':'Jeremiah',
'25':'Lamentations',
'26':'Ezekiel',
'27':'Daniel (Hebrew)',
'28':'Hosea',
'29':'Joel',
'30':'Amos',
'31':'Obadiah',
'32':'Jonah',
'33':'Micah',
'34':'Nahum',
'35':'Habakkuk',
'36':'Zephaniah',
'37':'Haggai',
'38':'Zechariah',
'39':'Malachi',
'41':'Matthew',
'42':'Mark',
'43':'Luke',
'44':'John',
'45':'Acts',
'46':'Romans',
'47':'1 Corinthians',
'48':'2 Corinthians',
'49':'Galatians',
'50':'Ephesians',
'51':'Philippians',
'52':'Colossians',
'53':'1 Thessalonians',
'54':'2 Thessalonians',
'55':'1 Timothy',
'56':'2 Timothy',
'57':'Titus',
'58':'Philemon',
'59':'Hebrews',
'60':'James',
'61':'1 Peter',
'62':'2 Peter',
'63':'1 John',
'64':'2 John',
'65':'3 John',
'66':'Jude',
'67':'Revelation',
'68':'Tobit',
'69':'Judith',
'70':'Esther Greek',
'71':'Wisdom of Solomon',
'72':'Sirach',
'73':'Baruch',
'74':'Letter of Jeremiah',
'75':'Song of the 3 Young Men',
'76':'Susanna',
'77':'Bel and the Dragon',
'78':'1 Maccabees',
'79':'2 Maccabees',
'80':'3 Maccabees',
'81':'4 Maccabees',
'82':'1 Esdras (Greek)',
'83':'2 Esdras (Latin)',
'84':'Prayer of Manasseh',
'85':'Psalm 151',
'86':'Odae/Odes',
'87':'Psalms of Solomon',
'A4':'Ezra Apocalypse',
'A5':'5 Ezra',
'A6':'6 Ezra',
'B2':'Daniel Greek',
'B3':'Psalms 152-155',
'B4':'2 Baruch (Apocalypse)',
'B5':'Letter of Baruch',
'B6':'Jubilees',
'B7':'Enoch',
'B8':'1 Meqabyan/Mekabis',
'B9':'2 Meqabyan/Mekabis',
'C0':'3 Meqabyan/Mekabis',
'C1':'Reproof',
'C2':'4 Baruch',
'C3':'Letter to the Laodiceans',
'A0':'Front Matter',
'A1':'Back Matter',
'A2':'Other Matter',
'A7':'Introduction Matter',
'A8':'Concordance',
'A9':'Glossary / Wordlist',
'B0':'Topical Index',
'B1':'Names Index'
} |
#!/usr/bin/env python
'''
Align text-bound stand-off annotations to noun phrases.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2011-11-02
'''
# XXX: This script is a mess, should be refactored:
# * Handle ALL annotations at once, currently we don't do interactions
# between annotations from a1 and a2
# XXX: We are currently ignoring coordination!
# TODO: Minimal NP;s, maximal NP;s heuristics
# TODO: Expand spans only
# TODO: Shrink spans only
# Default is to fit onto the NP
# TODO: fname is actually fpath
from argparse import ArgumentParser, FileType
from itertools import chain
from re import compile as re_compile
from string import whitespace
from sys import maxint
from sys import stderr
### Constants
ARGPARSER = ArgumentParser()
ARGPARSER.add_argument('text_file', type=FileType('r'))
ARGPARSER.add_argument('ptb_file', type=FileType('r'))
ARGPARSER.add_argument('stand_off_file', nargs='+', type=FileType('r'))
ARGPARSER.add_argument('-a', '--non-alpha-heuristic', action='store_true', help="if an np head is covered by annotations of the same type apart fron it's non-alpha characters create an annotation (example: \"p16(INK4a)\")")
ARGPARSER.add_argument('-n', '--no-warn', action='store_true')
ARGPARSER.add_argument('-g', '--generate', action='store_true',
help=('generate additional annotations by eliminating determiners '
'(DT), pronouns (PRP and PRP$) and cardinal numbers (CD) from '
'the beginning of the annotation'))
ARGPARSER.add_argument('-m', '--merge', action='store_true')
ARGPARSER.add_argument('-d', '--debug', action='store_true')
ARGPARSER.add_argument('-r', '--dry-run', action='store_true')
ARGPARSER.add_argument('-v', '--verbose', action='store_true')
PTB_TAGS_REGEX = re_compile(r'\((?P<tag>[^ ]+)')
PTB_TOKENS_REGEX = re_compile(r'(?P<token>[^ )]+?)\)')
WHITESPACE_CHARS = set(whitespace)
###
def _ptb_token_gen(ptb):
for match in PTB_TOKENS_REGEX.finditer(ptb):
yield match.groupdict()['token']
def _ptb_tag_gen(ptb):
for match in PTB_TAGS_REGEX.finditer(ptb):
yield match.groupdict()['tag']
class Span(object):
def __init__(self, start, end, type_=None, text=None, id_=None):
self.start = start
self.end = end
self.type_ = type_
self.text = text
self.id_ = id_
def __contains__(self, other):
if isinstance(other, Span):
return other.start >= self.start and other.end <= self.end
else:
return other >= self.start and other < self.end
def __repr__(self):
return str(self)
def __hash__(self):
# Note: We are leaving out id and text here
return hash(hash(self.start) + hash(self.end) + hash(self.type_))
def __cmp__(self, other):
return other.start - self.start
def __str__(self):
return 'Span({}{}, {}{}{})'.format(
self.type_ + ', ' if self.type_ is not None else '',
self.start, self.end,
', ' + self.text if self.text is not None else '',
', ' + self.id_ if self.id_ is not None else '')
# NOTE: O(n), could be optimised if necessary
class Spans(object):
def __init__(self, it=None):
self.spans = []
if it is not None:
for span in it:
self.spans.append(span)
def add(self, span):
self.spans.append(span)
# Find the first overlapping span
# TODO: Find all spans
def find_all(self, other):
for span in self:
if other in span:
yield span
def __iter__(self):
return iter(self.spans)
def __contains__(self, other):
for span in self:
if other in span:
return True
else:
return False
def __repr__(self):
return str(self)
def __str__(self):
return 'Spans([' + ', '.join(str(s) for s in self.spans) + '])'
# TODO: function extracted from main
# Does not include quoting
PTB_ESCAPES = {
'(': '-LRB-',
')': '-RRB-',
'[': '-LSB-',
']': '-RSB-',
'{': '-LCB-',
'}': '-RCB-',
}
PTB_SEXP_QUOTE_ESCAPES = {
'(`` ")': '(`` ``)',
"('' \")": "('' '')",
# XXX: Exception? Probably a failed parse...
'(POS ")': "(POS '')",
'(NN ")': "(NN '')",
}
def _unescape(s):
for _from, to in PTB_ESCAPES.iteritems():
s = s.replace(to, _from)
return s
def _sexp_quotes_unescape(s):
#print 'YYY:', s
for _from, to in PTB_SEXP_QUOTE_ESCAPES.iteritems():
#print to, _from
s = s.replace(to, _from)
#print 'XXX:', s
return s
def _token_i_mapping(tokens, text, text_start=0):
#print >> stderr, 'text:', text
#print >> stderr, 'tokens:', tokens
token_i_to_offsets = {}
text_pos = text_start
for token_i, token in enumerate(tokens):
#print >> stderr, 'token:', token
token_start_txt_pos = text_pos
token_pos = 0
while token_pos < len(token):
token_char = token[token_pos]
text_char = text[text_pos]
#print >> stderr, ('token_char: "{}" text_char: "{}"'
# ).format(token_char, text_char)
if token_char == text_char:
token_pos += 1
text_pos += 1
elif text_char in WHITESPACE_CHARS:
text_pos += 1
# If we are yet to begin matching the token it should not
# start with whitespace
if token_pos == 0:
token_start_txt_pos += 1
else:
# Really nasty corner-case where the type of parentheses may
# have gone lost through PTB escaping etc.
if (token_char == '"'
and text[text_pos:text_pos + 2] in ('``', "''", )):
# Skip ahead and assign this single token character as two
# in the text
token_pos += 1
text_pos += 2
else:
print >> stderr, 'ERROR: Failed to align token'
exit(-1) # XXX: Rude exit
token_start = token_start_txt_pos
token_end = text_pos
token_i_to_offsets[token_i] = Span(token_start, token_end, text=token)
# Token length sanity check, XXX: Can't be used with the corner-case above
#assert len(token) == token_end - token_start, '"{}" {} {} {}'.format(
# token, token_start, token_end, token_end - token_start)
return token_i_to_offsets, text_pos
def _paren_range(s):
depth = 0
for char_i, char in enumerate(s):
if char == '(':
depth += 1
elif char == ')':
depth -= 1
if depth == 0:
break
else:
print >> stderr, (
'ERROR: Unmatched parentheses in S-expression')
exit(-1) # XXX: Nasty exit
return char_i + 1
def _create_token_spans(ptb_sexp, txt_data, token_i_to_offsets):
tag_spans = []
for match in PTB_TAGS_REGEX.finditer(ptb_sexp):
tag = match.groupdict()['tag']
# How many tokens proceed this tag?
ptb_sexp_prior = ptb_sexp[:match.start()]
tokens_prior = len(PTB_TOKENS_REGEX.findall(ptb_sexp_prior))
# Find the S-expression span covered by the tag
paren_end = _paren_range(ptb_sexp[match.start():])
tag_start = match.start()
tag_end = tag_start + (paren_end - 1)
# Use the span to calculate the number of tokens covered
ptb_sexp_segment = ptb_sexp[tag_start:tag_end + 1]
tokens_contained = len(PTB_TOKENS_REGEX.findall(ptb_sexp_segment))
tag_start_token_i = tokens_prior
tag_end_token_i = tag_start_token_i + tokens_contained - 1
tag_txt_start = token_i_to_offsets[tag_start_token_i].start
tag_txt_end = token_i_to_offsets[tag_end_token_i].end
tag_txt = txt_data[tag_txt_start:tag_txt_end]
tag_span = Span(tag_txt_start, tag_txt_end, text=tag_txt, type_=tag)
# Monkey patch the S-expression details in there
tag_span.sexp = ptb_sexp_segment
tag_span.sexp_start = tag_start
tag_span.sexp_end = tag_end
tag_spans.append(tag_span)
return tag_spans
def _parse_tbs(stand_off_files):
tb_spans_by_fname = {}
for stand_off_file in stand_off_files:
fname = stand_off_file.name
tb_spans = []
for line in (l.rstrip('\n') for l in stand_off_file):
# Skip non-textbounds
if not line.startswith('T'):
continue
# NOTE: Fails if no text is present
id_, type_offsets, text = line.split('\t')
type_, start, end = type_offsets.split(' ')
start = int(start)
end = int(end)
tb_spans.append(Span(start, end, type_=type_, text=text, id_=id_))
tb_spans_by_fname[fname] = tb_spans
return tb_spans_by_fname
PRE_DROPS = set(('PRP', 'PRP$', 'DT', 'CD', ))
def main(args):
argp = ARGPARSER.parse_args(args[1:])
txt_data = argp.text_file.read()
txt_pos = 0
# Plural although a collection, oh dear... "stand_off_files"
tb_spans_by_fname = _parse_tbs(argp.stand_off_file)
from collections import defaultdict
new_tb_spans_by_fname = defaultdict(list)
expanded_ids_by_fname = defaultdict(set)
tb_nums = (int(tb.id_[1:]) for tb in chain(*tb_spans_by_fname.itervalues()))
next_tb_id = (i for i in xrange(max(chain((1, ), tb_nums)) + 1, maxint))
# Restore the PTB quotes at the S-Expression stage or `` and '' fails
for ptb_sexp_i, ptb_sexp in enumerate((l.rstrip('\n')
for l in argp.ptb_file), start=1):
if argp.debug:
print >> stderr, ('Processing S-expression ({}): {}'
).format(ptb_sexp_i, ptb_sexp)
#print ptb_sexp
quotes_unescaped_ptb_sexp = _sexp_quotes_unescape(ptb_sexp)
#print >> stderr, quotes_unescaped_ptb_sexp
#continue
unescaped_tokens = [_unescape(t) for t in _ptb_token_gen(
quotes_unescaped_ptb_sexp)]
#print >> stderr, 'txt_data:', txt_data
# Being unescaped the tokens should now align with the text
token_i_to_offsets, txt_pos = _token_i_mapping(unescaped_tokens,
txt_data, text_start=txt_pos)
# Now calculate which tags covers which tokens
tag_spans = _create_token_spans(quotes_unescaped_ptb_sexp, txt_data,
token_i_to_offsets)
sentence_tag = tag_spans[0]
assert sentence_tag.type_ == 'S1'
# XXX: How did we handle co-ordination?
# Head-finding algorithm (similar to Bunescu & Mooney (2004)):
# * Strip PP and VP from the right
# * Once no further PP and/or VP can be stripped, the last noun;ish
# thing of the current NP is the head
for fname in tb_spans_by_fname:
# Discard textbound annotations outside the sentence
tb_spans = [tb for tb in tb_spans_by_fname[fname]
if tb in sentence_tag]
if argp.debug:
print >> stderr, 'Enchancing: {}'.format(fname)
print >> stderr, tb_spans
for np in (s for s in tag_spans if s.type_ == 'NP'):
# Get the relevant PoS tags for this NP
tags = [s for s in tag_spans if s in np]
if any(s for s in tags if s.type_ == 'CC'):
if not argp.no_warn:
#print >> stderr, tags
print >> stderr, ('WARNING: {}: Skipping NP due to CC'
).format(fname)
#raw_input()
continue
#print tags
# Find the span of the NP head
#print >> stderr, 'before_head:', tags
while any(s for s in tags if s.type_ in ('PP', 'VP', )):
# Find the first PP or VP from the right
for span_i, span in enumerate(tags[::-1],
start=len(tags) - 1):
if span.type_ in ('PP', 'VP', ):
break
else:
assert False, 'can not happen (tm)'
to_strip = span
# Now remove all sub-spans what we removed
tags = [s for s in tags
if s not in to_strip and s != to_strip]
#print >> stderr, 'during_head:', tags
for span in tags[::-1]:
if span.type_ in set(('NN', 'NNS', 'NNP', 'NNPS', )):
np_head = span
break
else:
# No nouns present, choose the right-most token
np_head = tags[-1]
# Check all annotations contained in the NP
tb_candidates = []
in_np_head = [s for s in tb_spans if s in np_head]
for tb_span in in_np_head:
# Restrict the debug output slightly, only in same NP
if argp.debug and tb_span in np:
print >> stderr, np.text
print >> stderr, '{}|{}|'.format(
' ' * (np_head.start - np.start),
' ' * (np_head.end - np_head.start - 2))
print >> stderr, '{}|{}|'.format(
' ' * (tb_span.start - np.start),
' ' * (tb_span.end - tb_span.start - 2))
# If they cover the head, they can potentially be used
if np_head in tb_span:
tb_candidates.append(tb_span)
# Remove duplicate canditates (some annotations aren't uniqued)
tb_candidates = [s for s in set(tb_candidates)]
if len(tb_candidates) > 1:
if not argp.no_warn: #XXX:
print >> stderr, ('WARNING: {}: Skipping NP due to '
'multiple TB;s ({}) overlapping the NP head '
'({})').format(fname, ', '.join(s.type_
for s in tb_candidates), np_head.text)
continue
if argp.non_alpha_heuristic and any(c for c in np_head.text
if not (c.isalpha() or c.isdigit())):
alphanum_char_pos = set(i for i, c in enumerate(np_head.text)
if c.isalpha() or c.isdigit())
# If we have non-alphanum characters and not varying type
# among the annotations for the np head
if alphanum_char_pos and len(set(s.type_ for s in in_np_head)) == 1:
for tb_span in in_np_head:
# Remove any overlapping character
alphanum_char_pos = alphanum_char_pos - set(
xrange(tb_span.start - np_head.start,
tb_span.end - np_head.start))
# If there are no characters left, we covered the head
if not alphanum_char_pos:
start = in_np_head[0].start
end = in_np_head[-1].end
text = np.text[start - np.start:end - np.end]
tb_candidates.append(Span(start, end,
type_=in_np_head[0].type_,
text=text,
id_='T{}'.format(next(next_tb_id))))
if tb_candidates:
new_tb_spans = [
Span(np.start, np.end,
type_=tb_span.type_,
text=np.text,
id_='T{}'.format(next(next_tb_id)))
for tb_span in tb_candidates]
if argp.generate:
# Generate new spans, dropping pre spans
gen_spans = []
for new_tb_span in new_tb_spans:
tags = [s for s in tag_spans if s in new_tb_span]
# Drop the initial np
tags = tags[1:]
gen_tags = tags
while (len(gen_tags) > 1
and gen_tags[0].type_ in PRE_DROPS):
gen_tags = gen_tags[1:]
first = gen_tags[0]
gen_spans.append(Span(first.start, np.end,
type_=tb_span.type_, text=np.text[first.start - np.start:],
id_='T{}'.format(next(next_tb_id))))
#print >> stderr, 'GENERATED:', gen_spans
for s in gen_spans:
new_tb_spans.append(s)
ann_lines = []
for new_tb_span in new_tb_spans:
ann_line = '{}\t{} {} {}\t{}'.format(new_tb_span.id_,
new_tb_span.type_, new_tb_span.start,
new_tb_span.end, new_tb_span.text)
ann_lines.append(ann_line)
if argp.verbose or argp.debug:
#if argp.debug:
# print >> stderr, 'np.text:', np.text
print >> stderr, ann_line
if argp.debug:
raw_input()
if not argp.dry_run:
out_fname = fname
if not argp.merge:
out_fname += '.expanded'
with open(out_fname, 'a') as expanded_file:
expanded_file.write('\n'.join(ann_lines) + '\n')
return 0
if __name__ == '__main__':
from sys import argv
exit(main(argv))
|
# Script to translate text into proper images
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from PIL import Image
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Function to show image to screen
def show_img(s,t):
img = mpimg.imread(s)
plt.imshow(img)
plt.show(block=False)
plt.pause(t)
plt.close()
# Create dictionary
word_dict = dict()
stem_dict = dict()
# Create stemmer
stemmer = PorterStemmer()
# Read in the dictionary file
dict_dir = "../img/"
dict_filename = dict_dir + "dict.txt"
dict_file = open(dict_filename, "r")
dict_lines = dict_file.read().splitlines()
# Make map of word and word stem to file
for line in dict_lines:
split = line.split()
word = split[0]
file = dict_dir + split[1]
if word not in word_dict.keys():
word_dict[word] = file
stem = stemmer.stem(word)
if stem not in stem_dict.keys():
stem_dict[stem] = file
# Words that do not need a sign
non_words = ["is", "are", "be"]
alpha = "abcdefghijklmnopqrstuvwxyz"
# Translate the sentences
sentences_file = open("./tests/sentences.txt", "r")
sentences = sentences_file.read().splitlines()
for s in sentences:
tokens = word_tokenize(s)
for t in tokens:
t = t.lower()
# Skip words that do not need a sign
if t in non_words:
continue
if t in word_dict.keys():
show_img(word_dict[t],2)
elif stemmer.stem(t) in stem_dict.keys():
show_img(stem_dict[stemmer.stem(t)],2)
else:
chars = list(t)
for c in chars:
if c not in alpha:
continue
path = "../img/letters/{}.png".format(c)
show_img(path,0.5)
time.sleep(1)
# Quick test to make sure files exist
# for line in dict_lines:
# split = line.split()
# file = dict_dir + split[1]
# try:
# open(file, "r")
# except:
# print("{} DOES NOT EXIST".format(file))
|
from django.contrib.auth.models import User
from django.db.models import Q
from tasktrack.models import Collaborators
def get_username_from_userid(user_id):
return User.objects.get(id=user_id).username
def get_user_notifications(user_id):
current_user = User.objects.get(id=user_id)
raw_notifications = Collaborators.objects.filter(Q(user_id_1=current_user)|Q(user_id_2=current_user), ~Q(action_user_id=current_user),status=0).values()
raw_notifications = list(raw_notifications)
username_cache = {}
notifications = []
for notif in raw_notifications:
tmp = {}
tmp['id'] = notif['id']
other_user_id = list(filter(lambda x: int(x)!=int(user_id) , [notif.get('user_id_1_id'),notif.get('user_id_2_id')]))[0]
if other_user_id not in username_cache.keys():
other_username = get_username_from_userid(other_user_id)
else:
other_username = username_cache[other_user_id]
tmp['username'] = other_username
notifications.append(tmp)
return notifications
|
from pathlib import Path
import zipfile
import io
import numpy as np
import holoviews as hv
from holoviews import opts
from holoviews import streams
import panel as pn
from PIL import Image
import cv2
hv.extension('bokeh')
def get_image_from_in_memory_zipfile(image_file_path, zipped_file):
return cv2.imdecode(np.frombuffer(zipped_file.read(image_file_path), np.uint8),
cv2.IMREAD_GRAYSCALE)
# Global variables
zipped_file = None
image_file_names = []
num_image_rows = 9
max_num_images_to_display = 9
# Simplifying functions
file = lambda s: f"slices/{s}"
image = lambda f: get_image_from_in_memory_zipfile(file(f), zipped_file)
# Set up options for hv.Images objects
ymax, xmax = 1600, 2560
scale_factor = 8
bounds=(0, 0, xmax, ymax) # Coordinate system: (left, bottom, right, top)
options = {'cmap': 'gray',
'clim': (0, 255),
'aspect': 'equal',
'frame_width': int(xmax/scale_factor),
'frame_height': int(ymax/scale_factor),
}
# Create and organize basic elements in layout
file_input = pn.widgets.FileInput()
item_selector = pn.widgets.MultiSelect()
controls = pn.Column(file_input, "")
layout = pn.Row(controls, pn.Column(""))
# print(layout)
# Action when zip file is selected
@pn.depends(file_contents=file_input, watch=True)
def _image_selector(file_contents):
global zipped_file
zipped_file = zipfile.ZipFile(io.BytesIO(file_contents))
global image_file_names
image_file_names = sorted([
f.filename.strip('slices/') for f in zipped_file.filelist \
if f.filename.startswith('slices/') and f.filename.endswith('.png')
])
initially_selected_files = image_file_names[:2].copy()
# Set up selector object
item_selector.options = image_file_names
item_selector.value = initially_selected_files
item_selector.size = 25
item_selector.width = 120
# Place selector object in proper position in layout
layout[0][1] = item_selector
# Re-make Panel object containing images when different files are selected
@pn.depends(selected_file_names=item_selector, watch=True)
def _images_col(selected_file_names):
n_tot = len(selected_file_names)
if n_tot > max_num_images_to_display: n_tot = max_num_images_to_display
n_rows = num_image_rows
n_cols = int(n_tot / n_rows)
if (n_tot % num_image_rows) != 0: n_cols += 1
# Set up Row and Columns with images
row = pn.Row()
for i in range(n_cols):
col = pn.Column()
for j in range(n_rows):
im = i * n_rows + j
if im == n_tot:
break
file_name = selected_file_names[im]
col.append(
hv.Image(image(file_name), bounds=bounds).opts(title=file_name, **options)
# f"{file_name}"
)
row.append(col)
# Replace appropriate spot in layout with new pn.Row object containing selected images
layout[1] = row
# layout.show()
layout.servable()
|
import os
import sys
import shlex
import six
encoding = os.getenv("MOCKET_ENCODING", 'utf-8')
text_type = six.text_type
byte_type = six.binary_type
basestring = six.string_types
PY2 = sys.version_info[0] == 2
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler
from urlparse import urlsplit, parse_qs, unquote
def unquote_utf8(qs):
if isinstance(qs, text_type):
qs = qs.encode(encoding)
s = unquote(qs)
if isinstance(s, byte_type):
return s.decode(encoding)
else:
return s
FileNotFoundError = IOError
else:
from http.server import BaseHTTPRequestHandler
from urllib.parse import urlsplit, parse_qs, unquote as unquote_utf8
FileNotFoundError = FileNotFoundError
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
def encode_to_bytes(s, charset=encoding):
if isinstance(s, text_type):
s = s.encode(charset)
return byte_type(s)
def decode_from_bytes(s, charset=encoding):
if isinstance(s, byte_type):
s = s.decode(charset)
return text_type(s)
def shsplit(s):
if PY2:
s = encode_to_bytes(s)
else:
s = decode_from_bytes(s)
return shlex.split(s)
|
# SPDX-FileCopyrightText: 2022 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# DOESN'T WORK
import time
import board
import digitalio
def blink(pin, interval, count):
with digitalio.DigitalInOut(pin) as led:
led.switch_to_output(value=False)
for _ in range(count):
led.value = True
time.sleep(interval)
led.value = False
time.sleep(interval)
def main():
blink(board.D1, 0.25, 10)
# DOESN'T WORK
# Second LED blinks only after the first one is finished.
blink(board.D2, 0.1, 20)
main()
|
import numpy as np
import pandas as pd
import random as rnd
from dsbase.KFoldDSBase import KFoldDSBase
def evaluateParams(X, y, k_fold, model_class, model_prefix_name, params_list, num_tries):
tries = {}
for i in range(num_tries):
kf = KFoldDSBase(X, y, k_fold, model_class, model_prefix_name, params_list[i])
kf.train()
score_train, score_test = kf.getMeanScore()
print('****** Result try',i,':',score_train,' / ',score_test)
tries[i]=(score_train,score_test,kf.getBestModel())
return tries
def randomElement(vector):
return vector[rnd.randrange(0,len(vector))]
def showSearchOptimumHyperParametersReport(tries):
for tr in tries:
print(tr,':',tries[tr][0],'/',tries[tr][1],'(',tries[tr][1]/tries[tr][0],')')
def getColumnsWithLessValue(df_columns, feature_importance_vector, level):
ser = pd.Series(feature_importance_vector).value_counts().sort_index()
acc = 0
for i in range(level+1):
acc += ser.iloc[i]
return ser,df_columns[feature_importance_vector.argsort()][0:acc] |
from django.template import Context
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from html2text import html2text
from tardis.tardis_portal.models import ExperimentParameter, ExperimentParameterSet, ParameterName, Schema
from tardis.tardis_portal.ParameterSetManager import ParameterSetManager
from tardis.apps.ands_register.publishing import PublishHandler
import rifcsprovider
class SchemaRifCsProvider(rifcsprovider.RifCsProvider):
def __init__(self):
self.namespace = None
self.sample_desc_schema_ns = None
self.related_info_schema_ns = settings.RELATED_INFO_SCHEMA_NAMESPACE
self.creative_commons_schema_ns = 'http://www.tardis.edu.au/schemas/creative_commons/2011/05/17'
self.annotation_schema_ns = 'http://www.tardis.edu.au/schemas/experiment/annotation/2011/07/07'
def can_publish(self, experiment):
return experiment.public_access != experiment.PUBLIC_ACCESS_NONE
def is_schema_valid(self, experiment):
eps = ExperimentParameter.objects.filter(
parameterset__experiment = experiment,
name__schema__namespace = self.namespace)
return True # TJD: temporary
# if len(eps) > 0:
# schema = Schema.objects.get(namespace = self.namespace)
# return True
# return False
def get_beamlines(self, experiment):
# sch = Schema.objects.get(namespace=self.namespace)
# param = ParameterName.objects.get(schema=sch, name='beamline')
# res = ExperimentParameter.objects.get(parameterset__experiment = experiment, name=param)
# return res.string_value
return self._get_params('beamline', self.namespace, experiment)
def get_proposal_id(self, experiment):
sch = Schema.objects.get(namespace=self.namespace)
param = ParameterName.objects.get(schema=sch, name='EPN')
res = ExperimentParameter.objects.get(parameterset__experiment = experiment, name=param)
return res.string_value
def get_description(self, experiment):
phandler = PublishHandler(experiment.id)
desc = phandler.custom_description()
if not desc:
desc = experiment.description
return self.format_desc(desc)
def format_desc(self, desc):
formatted_desc = desc
if self._is_html_formatted(desc):
formatted_desc = html2text(desc)
return formatted_desc.strip()
def get_authors(self, experiment):
phandler = PublishHandler(experiment.id)
authors = phandler.custom_authors()
if authors:
return "* " + "\n* ".join(authors)
else:
return self.get_investigator_list(experiment)
def get_url(self, experiment, server_url):
"""Only public experiments can show the direct link to the experiment
in the rif-cs"""
if experiment.public_access != experiment.PUBLIC_ACCESS_NONE:
return "%s/experiment/view/%s/" % (server_url, experiment.id)
def get_investigator_list(self, experiment):
authors = [a.author for a in experiment.experimentauthor_set.all()]
return "* " + "\n* ".join(authors)
def get_sample_description_list(self, experiment):
sch = Schema.objects.get(namespace=self.sample_desc_schema_ns)
params = ParameterName.objects.get(schema=sch, name='SampleDescription')
descriptions = [x.string_value for x in
ExperimentParameter.objects.filter(
parameterset__experiment=experiment, name=params)]
return "\n".join(descriptions)
def get_anzsrcfor_subjectcodes(self, experiment):
return self._get_params("anzsrcfor_codes", self.annotation_schema_ns, experiment)
def get_local_subjectcodes(self, experiment):
return self._get_params("local_subject_codes", self.annotation_schema_ns, experiment)
def get_notes(self, experiment):
return self._get_params("exp_notes", self.annotation_schema_ns, experiment)
def get_address(self, experiment):
return self._get_param("exp_address", self.annotation_schema_ns, experiment)
def get_license_uri(self, experiment):
return self._get_param("license_uri", self.creative_commons_schema_ns, experiment)
def get_license_title(self, experiment):
return self._get_param("license_name", self.creative_commons_schema_ns, experiment)
def get_related_info_list(self, experiment):
related_info_dicts = []
# Get all the titles, notes and urls belonging to that experiment
sch = Schema.objects.get(namespace=self.related_info_schema_ns)
exp_params = ExperimentParameter.objects.filter(name__schema=sch, parameterset__experiment=experiment)
selected_values = exp_params.values('parameterset__id', 'string_value', 'name__name')
# Get the list of unique parameterset ids in the param set
ids = [x['parameterset__id'] for x in selected_values]
uniq_ids = list(set(ids))
for id in uniq_ids:
# Get the title, notes and url belonging to a specific parameter set
related_info_params = [x for x in selected_values if x['parameterset__id'] == id]
related_info_dicts.append(self._create_related_info_dict(related_info_params))
return related_info_dicts
def get_group(self):
return settings.RIFCS_GROUP
def get_located_in(self):
return settings.RIFCS_MYTARDIS_KEY
def _create_related_info_dict(self, related_info_params):
dict= {}
for x in related_info_params:
dict[x['name__name']] = x['string_value']
return dict
def get_rifcs_context(self, experiment):
c = Context({})
beamlines = self.get_beamlines(experiment)
c['blnoun'] = 'beamline'
c['experiment'] = experiment
c['beamlines'] = beamlines
try:
c['sample_description_list'] = self.get_sample_description_list(experiment)
except Schema.DoesNotExist:
pass
c['investigator_list'] = self.get_authors(experiment)
c['license_title'] = self.get_license_title(experiment)
c['license_uri'] = self.get_license_uri(experiment)
c['description'] = self.get_description(experiment)
c['anzsrcfor'] = self.get_anzsrcfor_subjectcodes(experiment)
c['localcodes'] = self.get_local_subjectcodes(experiment)
c['license_title'] = self.get_license_title(experiment)
c['license_uri'] = self.get_license_uri(experiment)
c['address'] = self.get_address(experiment)
try:
c['related_info_list'] = self.get_related_info_list(experiment)
except Schema.DoesNotExist:
pass
c['group'] = self.get_group()
try:
c['proposal_id'] = self.get_proposal_id(experiment)
except Schema.DoesNotExist:
pass
try:
c['located_in'] = self.get_located_in()
except AttributeError:
pass
c['rights'] = []
c['access_rights'] = []
return c
def _get_param(self, key, namespace, experiment):
parameterset = ExperimentParameterSet.objects.filter(
schema__namespace=namespace,
experiment__id=experiment.id)
if len(parameterset) > 0:
psm = ParameterSetManager(parameterset=parameterset[0])
try:
return psm.get_param(key, True)
except MultipleObjectsReturned:
return psm.get_params(key, True)
except ObjectDoesNotExist:
return None
def _get_params(self, key, namespace, experiment):
parameterset = ExperimentParameterSet.objects.filter(
schema__namespace=namespace,
experiment__id=experiment.id)
if len(parameterset) > 0:
psm = ParameterSetManager(parameterset=parameterset[0])
return psm.get_params(key, True)
else:
return []
|
def sol():
n = int(input())
print( int(n * (n + 1) / 2) )
if __name__ == "__main__":
sol()
|
from __future__ import annotations
import random
from typing import Dict, List, Tuple, Union, Iterator, Any, TYPE_CHECKING
import tcod
import numpy as np #type: ignore
import entity_factories as ef
from game_map import GameMap
import tile_types
if TYPE_CHECKING:
from engine import Engine
from entity import Entity
# Item distribution ======================================================
# Define weights for items/item families, difficulty-dependant
# Heuristic: 100 is "typical" weight, so 50 is half as common etc.
# TODO: Make a distribution of individual commands, rather than amulets,
# so that it can be re-used for spellbooks, scrolls, etc.
# TODO Should be based on item-assigned "rarity" or "value," rather than
# these vague classes
item_chances: Dict[int,List[Tuple[Union[Entity,ef.Family],int]]] = {
0: [(ef.moderate_item,100),
(ef.good_item,20),
],
2: [(ef.good_item,50)],
5: [(ef.good_item,100),
(ef.great_item,30),
],
7: [(ef.great_item,50),
(ef.moderate_item,50)
],
10: [(ef.amazing_item,50),
],
}
enemy_chances: Dict[int,List[Tuple[Union[Entity,ef.Family]]]] = {
0:[(ef.nano,100)],
1:[(ef.ed,100)],
2:[(ef.sed,100),(ef.ed,50)],
3:[(ef.gedit,100),(ef.ed,10),(ef.nano,10)],
4:[(ef.needle,50),(ef.vimic,5)],
5:[(ef.needle,100)],
6:[(ef.vimic,30)],
8:[(ef.emacs,50),(ef.vimic,50)],
10:[(ef.vimpire,50)],
12:[(ef.emax,200)],
}
def sample_from_dist(
dist:Dict[int,List[Tuple[Union[Any,ef.Family],int]]],
k:int, difficulty:int) -> List[Any]:
""" Return k items sampled from the given distribution."""
weights = {}
for key, values in dist.items():
if key > difficulty:
break
weights.update({k:v for k,v in values})
items = list(weights.keys())
weight_list = list(weights.values())
return random.choices(items,weights=weight_list,k=k)
# Number of enemies, items, and landmines
enemies_per_level = [
(0,6),
(2,7),
(4,8), # After this point, placing more enemies is probably excessive
]
items_per_level = [
(0,3),
(5,4),
]
traps_per_level = [
(0,0),
(1,1),
(3,2),
(6,3),
(9,4),
]
landmine_chance_per_level = [
(0,0),
(1,0.05),
(2,0.1),
(5,0.15),
(7,0.2)
]
# Helper for interpreting these lists:
def get_level_val(stat_list:List[Tuple[int,int]],level:int):
val = 0
for k,v in stat_list:
if k > level:
return val
else:
val = v
return val
# Map generation ========================================================
class RectangularRoom:
def __init__(self, x:int, y:int, width:int, height:int):
self.x1 = x
self.y1 = y
self.x2 = x + width
self.y2 = y + height
@property
def center(self) -> Tuple[int,int]:
center_x = (self.x1 + self.x2)//2
center_y = (self.y1 + self.y2)//2
return center_x, center_y
@property
def inner(self) -> Tuple[slice,slice]:
return slice(self.x1+1,self.x2), slice(self.y1+1,self.y2)
@property
def inner_list(self) -> List[Tuple[int,int]]:
""" Inner, but as a list of tuples."""
#return [(x,y) for x,y in np.transpose(self.inner.nonzero())]
return [(x,y) for x in range(self.x1+1,self.x2) for y in range(self.y1+1,self.y2)]
def intersects(self, other: RectangularRoom) -> bool:
""" Return true if this room overlaps with other room. """
return (
self.x1 <= other.x2
and self.x2 >= other.x1
and self.y1 <= other.y2
and self.y2 >= other.y1
)
def tunnel_between(start:Tuple[int,int],
end:Tuple[int,int],diagonal=False) -> Iterator[Tuple[int,int]]:
""" Give a path between two rooms, either L-shaped, or stright-line,
depending on `diagonal`.
"""
(x1,y1),(x2,y2) = start,end
if diagonal:
yield from tcod.los.bresenham(start,end)
yield from tcod.los.bresenham((x1+1,y1),(x2+1,y2))
else:
corner = (x2,y1) if random.random() < 0.5 else (x1,y2)
yield from tcod.los.bresenham(start,corner)
yield from tcod.los.bresenham(corner,end)
# Base class for generating levels
class LevelGenerator:
def __init__(self,name:str):
self.name=name
self.difficulty=1 # Should be set by "set difficulty" function at time of generation
# TODO Either set num_items etc. here, or remove this function from
# the class entirely and just pass difficulty in to the various
# sub-functions manually.
def room_mask(self,shape:Tuple[int,int]) -> np.ndarray:
""" Should return a boolean array that is True for
walkable tiles and False otherwise.
Using a boolean mask rather than directly returning a gamemap makes
it easier to isolate the procedural generation into a library that
could be used for other things, maybe?"""
raise NotImplementedError()
def place_items(self,dungeon:GameMap) -> None:
""" Place items in the dungeon."""
# TODO possible landmine under item at higher levels?
num_items = get_level_val(items_per_level,self.difficulty)
landmine_chance = get_level_val(landmine_chance_per_level,self.difficulty)
for item in sample_from_dist(item_chances,k=num_items,
difficulty=self.difficulty):
if isinstance(item,ef.Family):
item = item.sample()
location = dungeon.place_randomly(item,spawn=True)
if random.random() < landmine_chance:
ef.landmine.spawn(dungeon,*location)
def place_enemies(self,dungeon:GameMap) -> None:
""" Place enemies in the dungeon."""
# TODO Avoid duplicated code between this and place_items
# (Sampling from families should happen in sample_from_dist.)
num_enemies = get_level_val(enemies_per_level,self.difficulty)
for enemy in sample_from_dist(enemy_chances,k=num_enemies,
difficulty=self.difficulty):
if isinstance(enemy,ef.Family):
enemy = enemy.sample()
dungeon.place_randomly(enemy,spawn=True,
stay_away_center=dungeon.engine.player.pos,
stay_away_radius=9)
def place_traps(self,dungeon:GameMap) -> None:
num_traps = get_level_val(traps_per_level,self.difficulty)
for i in range(num_traps):
dungeon.place_randomly(ef.landmine,spawn=True)
def place_player(self,dungeon:GameMap,upstairs:bool=True) -> None:
dungeon.place_randomly(dungeon.engine.player,spawn=False)
if upstairs:
location = dungeon.engine.player.pos
dungeon.upstairs_location = location
dungeon.tiles[location] = tile_types.up_stairs
elif self.difficulty == 0:
# Top level, place a victory altar
location = dungeon.engine.player.pos
ef.altar.spawn(dungeon,*location)
def place_stairs(self,dungeon:GameMap) -> None:
location = dungeon.get_random_navigable(dungeon.engine.player,
stay_away_center=dungeon.engine.player.pos,
stay_away_radius=15)
dungeon.downstairs_location = location
dungeon.tiles[location] = tile_types.down_stairs
def generate(self,shape:Tuple[int,int],
engine:Engine,difficulty:int,upstairs:bool=True) -> GameMap:
map_width, map_height = shape
# Set difficulty first, as other functions may use it
self.difficulty = difficulty
player = engine.player
dungeon = GameMap(engine,map_width, map_height,entities=[player],
name=f"{self.name} (L{self.difficulty})")
mask = self.room_mask(shape)
dungeon.tiles[mask] = tile_types.floor # Set floor based on mask
# Place player first, so that we can (maybe) ensure you don't start
# right next to monsters or the exit.
self.place_player(dungeon,upstairs=upstairs)
self.place_stairs(dungeon)
self.place_items(dungeon)
self.place_enemies(dungeon)
self.place_traps(dungeon)
return dungeon
class BasicDungeon(LevelGenerator):
rooms:List[RectangularRoom]
def __init__(self,*args,
room_size_range:Tuple[int,int,int,int]=((8,12),(8,12)), # min_w, max_w, min_h, max_h,
max_rooms:int=20,
diagonal=False,
invert=False,
do_tunnels=True,
allow_overlap=False):
super().__init__(*args)
self.room_size_range=room_size_range
self.max_rooms=max_rooms
# TODO Later, will set the following based on difficulty
self.num_items_range=(2,4)
self.num_enemies_range=(2,4)
self.allow_overlap = allow_overlap
self.diagonal = diagonal # Whether to generate diagonal tunnels
self.invert = invert # Invert floor and all
self.do_tunnels = do_tunnels
def room_mask(self,shape) -> np.ndarray:
""" Should return a boolean array that is True for
walkable tiles and False otherwise."""
map_width,map_height = shape
mask = np.full(shape,False)
# Add some rooms
rooms = []
for r in range(self.max_rooms):
(min_w,max_w),(min_h,max_h) = self.room_size_range
room_width = random.randint(min_w,max_w)
room_height = random.randint(min_h,max_h)
x = random.randint(0,map_width - room_width - 1)
y = random.randint(0,map_height - room_height - 1)
new_room = RectangularRoom(x,y,room_width,room_height)
if not self.allow_overlap:
if any(new_room.intersects(other_room) for other_room in rooms):
continue
mask[new_room.inner] = True
if len(rooms) > 0:
if self.do_tunnels:
# Dig tunnel to previous room.
for x,y in tunnel_between(rooms[-1].center,new_room.center,
diagonal=self.diagonal):
mask[x,y] = True
rooms.append(new_room)
self.rooms = rooms
if self.invert:
mask = np.logical_not(mask)
return mask
class TestDungeon(LevelGenerator):
""" A manually created dungeon, for testing."""
def generate(self,shape:Tuple[int,int],engine:Engine,
difficulty:int):
map_width,map_height = shape
player = engine.player
dungeon = GameMap(engine,map_width, map_height,entities=[player])
player.place((40,26),dungeon)
ef.nano.spawn(dungeon,29,19)
ef.ed.spawn(dungeon,28,19)
ef.gedit.spawn(dungeon,29,21)
#ef.sed.spawn(dungeon,28,20)
#ef.needle.spawn(dungeon,28,20)
#ef.vimic.spawn(dungeon,28,20)
#ef.vimpire.spawn(dungeon,28,20)
#ef.emacs.spawn(dungeon,28,20)
#ef.emax.spawn(dungeon,28,20)
ef.amulet_of_yendor.spawn(dungeon,39,24)
ef.magnet.spawn(dungeon,38,22)
ef.scrolls[1].spawn(dungeon,39,23)
ef.scrolls[2].spawn(dungeon,39,23)
ef.scrolls[5].spawn(dungeon,39,23)
ef.scrolls[0].spawn(dungeon,39,23)
ef.bat_ears.spawn(dungeon,41,20)
ef.landmine.spawn(dungeon,42,25)
ef.altar.spawn(dungeon,37,25)
room_1 = RectangularRoom(x=10,y=10,width=20,height=15)
room_2 = RectangularRoom(x=35,y=15,width=10,height=15)
dungeon.tiles[room_1.inner] = tile_types.floor
dungeon.tiles[room_2.inner] = tile_types.floor
dungeon.tiles[15:36,18] = tile_types.floor
return dungeon
class TutorialDungeon(LevelGenerator):
""" A manually created dungeon to serve as a tutorial."""
def generate(self,shape:Tuple[int,int],engine:Engine,
difficulty:int,upstairs=False):
map_width,map_height = shape
player = engine.player
dungeon = GameMap(engine,map_width, map_height,entities=[player])
player.place((3,3),dungeon)
rooms = []
rooms.append(RectangularRoom(x=1,y=1,width=20,height=8)) # move
rooms.append(RectangularRoom(x=23,y=1,width=10,height=6)) # attack
rooms.append(RectangularRoom(x=35,y=1,width=10,height=6)) # attack
rooms.append(RectangularRoom(x=35,y=13,width=10,height=6)) # yank
rooms.append(RectangularRoom(x=35,y=24,width=10,height=6)) # help
rooms.append(RectangularRoom(x=20,y=16,width=10,height=6)) # middle
rooms.append(RectangularRoom(x=20,y=10,width=10,height=6)) # observe
rooms.append(RectangularRoom(x=1,y=10,width=14,height=10)) # fight
rooms.append(RectangularRoom(x=3,y=22,width=10,height=6)) # yank corpse
rooms.append(RectangularRoom(x=3,y=30,width=10,height=6)) # eat corpse
rooms.append(RectangularRoom(x=15,y=30,width=10,height=6)) # end
# TODO Add messages somehow
# Dig out rooms
for room in rooms:
dungeon.tiles[room.inner] = tile_types.floor
# Tunnels
for i, (room1, room2) in enumerate(zip(rooms[:-1],rooms[1:])):
if i == 4:
# Skip tunnel for demonstrating power of M
continue
for x,y in tunnel_between(room1.center,room2.center):
dungeon.tiles[x,y] = tile_types.floor
# Room-sized messages
messages:List[Tuple[int,str]] = [
(1,"Use d + movement to delete (attack) the dummy"),
(2,"Figure out how to attack this dummy"),
(3,"Use y + movement to yank (pick up) the amulet"),
(4,"Type :help M (and hit <enter>) to learn about M"),
(5,"Many more commands await!"),
(6,"Look at something (learn how using :help look)"),
(7,"Fight the ed!"),
(8,"Yank the ed corpse."),
(9,"Eat the ed corpse (learn how using :help eat)."),
(10,"You are ready. Use :new to start a new game."),
]
for i, s in messages:
message = ef.tutorial_message(s)
for x,y in rooms[i].inner_list:
message.spawn(dungeon,x,y)
# Room 1 water and walls
dungeon.tiles[8:10,2:9] = tile_types.water
dungeon.tiles[15:16,2:7] = tile_types.wall
dungeon.tiles[16:21,6] = tile_types.water
m1 = ef.tutorial_message("Use hjkl to move")
m2 = ef.tutorial_message("Type 5l to move right by 5")
m3 = ef.tutorial_message("Figure out how to cross this water")
for j in range(2,9):
for i in range(2,7):
m1.spawn(dungeon,i,j)
m2.spawn(dungeon,7,j)
for i in range(15,21):
for j in range(7,9):
m3.spawn(dungeon,i,j)
# Room 2 dummies
ef.dummy.spawn(dungeon,28,4)
# Room 3 island and dummy
dungeon.tiles[39:42,3:6] = tile_types.water
dungeon.tiles[40,4] = tile_types.floor
ef.dummy.spawn(dungeon,40,4)
# Room 4 item
ef.amulet["M"].spawn(dungeon,*rooms[3].center)
# Room 6 items
ef.amulet["$"].spawn(dungeon,25,18)
ef.amulet["f"].spawn(dungeon,23,18)
ef.amulet["u"].spawn(dungeon,27,18)
# Room 8 ed
new_ed = ef.ed.spawn(dungeon,4,15)
new_ed.corpse_drop_chance = 1 # Force nano to drop corpse
new_ed.wandering = False # For convenience
return dungeon
|
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
sys.path.append("/home/smp16fm/forked_amrvac/amrvac/tools/python")
from amrvac_pytools.datfiles.reading import amrvac_reader
from amrvac_pytools.vtkfiles import read, amrplot
def subplot_animation(path2files, save_dir, dummy_name='', refiner=None,
text_x_pos=0.85, text_y_pos=0.01, time_start=0,
time_end=None, start_frame=0, fps=1, in_extension='png',
out_extension='avi'):
'''
For making movies with subplotting using polyplot for vtk files.
Inputs:
path2files - (str) give path to files (use * to select multple files).
dummy name - (str) useful for picking out particular file names.
refiner - (str) will remove paths contain str put in here.
text_x_pos and text_y_pos - (float) location of time on plots.
time_start - (int) starting point for reading vtk files.
start_frame - (int) first frame for ffmpeg.
fps - (int) frames per second.
in_extension - (str) expects input to png.
out_extension - (str) decides files type the movie is.
save_dir - (str) save location of the images and movies
'''
var_names = ["rho",
"v1",
"v2",
"p",
"b1",
"b2",
"trp1",
"T",
"Cs",
"beta",
"sch",
"e"]
function = [
lambda x: x.rho,
lambda x: x.v1,
lambda x: x.v2,
lambda x: x.p,
lambda x: x.b1,
lambda x: x.b2,
lambda x: x.trp1,
lambda x: x.T,
lambda x: x.Cs,
lambda x: x.beta,
lambda x: x.sch,
lambda x: x.en,
]
cmaps_colours = ['gist_heat',
'seismic',
'seismic',
'BuGn',
'seismic',
'hot',
'inferno',
'coolwarm',
'copper',
'bone',
'binary',
'BuGn']
list_of_names = []
list_of_indexs = []
list_of_paths = []
list_of_full_dummy_paths = glob.glob((path2files+dummy_name+'*0000.vtu'))
# print(list_of_full_dummy_paths)
# removes any array element that contains refiner terms.
if refiner is not None:
indexs_to_remove = [ind for ind, it in enumerate(list_of_full_dummy_paths) if refiner in it]
shift = 0
for indx in indexs_to_remove:
# removes element from array
del list_of_full_dummy_paths[indx+shift]
# adject postion based on new array
shift -= 1
for indx, item in enumerate(list_of_full_dummy_paths):
list_of_names.append(item.split('/')[-1])
dummy_path = item.split('/')[0:-1]
list_of_paths.append(os.path.join(*dummy_path)+'/')
for j in range(len(list_of_names)):
name = list_of_names[j][0:-8] # -8 clips 0000.vtu
path2save_images = save_dir+'/'+name+"/images"
path2save_movies = save_dir+'/'+name+"/movies"
filename = '/'+list_of_paths[j] + name
try:
os.makedirs(save_dir)
except FileExistsError:
# directory already exists
pass
try:
os.makedirs(path2save_images)
os.makedirs(path2save_movies)
except FileExistsError:
# directory already exists
pass
# loads for time step
ds = read.load_vtkfile(5, file=filename, type='vtu')
ds0 = read.load_vtkfile(0, file=filename, type='vtu')
rho_range = [min(ds.rho), max(ds.rho)]
maxv1 = np.max(abs(ds.v1))
maxv1 -= 0.2*maxv1
maxv2 = np.max(abs(ds.v2))
maxv2 += 0.5*maxv2
v1_range = [-maxv1, maxv1] # [-6.5e6, 6.5e6] #
v2_range = [-maxv2, maxv2] # [-8e6, 8e6] #
p_range = [min(ds.p), max(ds.p)]
maxb1 = 0.05*np.max(abs(ds.b1))
# maxb1 -= 0.1*maxv1
maxb2 = np.max(abs(ds0.b2))
# maxb2 -= 0.1*maxv1
b1_range = [-maxb1, maxb1] # [-60, 60] #
b2_range = [-0.5*maxb2, maxb2] # [-60, 60] #
trp1_range = [0, 100]
T_range = [8e3, 2e6]
Cs_range = [min(ds.Cs), max(ds.Cs)]
beta_range = [min(ds.beta), max(ds.beta)]
sch_range = [min(ds.sch), max(ds.sch)]
en_range = [min(ds.en), max(ds.en)]
cmap_ranges = [
rho_range,
v1_range,
v2_range,
p_range,
b1_range,
b2_range,
trp1_range,
T_range,
Cs_range,
beta_range,
sch_range,
en_range,
]
if time_end is None:
number_of_files = len(glob.glob(list_of_full_dummy_paths[j][0:-7]+'*.vtu'))
else:
number_of_files = time_end
for k in range(time_start, number_of_files):
ds = read.load_vtkfile(k, file=filename, type='vtu')
fig, ((ax11, ax12, ax13, ax14), (ax21, ax22, ax23, ax24), (ax31, ax32, ax33, ax34)) = plt.subplots(nrows=3, ncols=4,figsize=(18,10))
axis_list = [ax11, ax12, ax13, ax14,
ax21, ax22, ax23, ax24,
ax31, ax32, ax33, ax34]
for i in range(len(axis_list)):
p1 = amrplot.polyplot(function[i](ds),
ds,
clear=False,
fig=fig,
axis=axis_list[i],
min=cmap_ranges[i][0],
max=cmap_ranges[i][-1],
orientation="vertical",
function=function[i],
cmap=cmaps_colours[i],
title=var_names[i],
yrange=[0, 1e9],
xrange=[-6e8, 6e8],
log_info=False
)
spacer = 0.4
plt.subplots_adjust(wspace=spacer, hspace=spacer)
time = ds.time
time_text = 'Time: '+str(round(time, 2))+' s'
fig.text(text_x_pos, text_y_pos, time_text, size=14)
# mng = plt.get_current_fig_manager()
# mng.resize(*mng.window.maxsize())
# plt.show()
plt.savefig(path2save_images+'/'+name+str(k).zfill(4)+'.png')
plt.clf()
# image_2_video = 'ffmpeg -y -framerate '+str(fps)+' -start_number '+str(start_frame)+' -i \
# '+os.path.join(path2save_images, name+'%4d.'+in_extension)+' \
# -c:v libx264 -r '+str(fps)+' -pix_fmt yuv420p \
# '+os.path.join(path2save_movies, name+'.'+out_extension)
image_2_video = 'ffmpeg -framerate '+str(fps)+' -i \
'+os.path.join(path2save_images, name+'%4d.'+in_extension)+' -y \
'+os.path.join(path2save_movies, name+'.'+out_extension)
print(image_2_video)
os.system(image_2_video)
|
"""
ID: tony_hu1
PROG: pprime
LANG: PYTHON3
"""
import math
def generate_primes(smallest,largest):
outstring = ''
if smallest == 5:
outstring = '5\n'
odd = {0,2,4,5,6,8}
for i in range(smallest,largest+1):
if not i in odd:
if is_pal(i):
if is_prime(i):
outstring = outstring + str(i) +'\n'
return outstring
def generate_pals(smallest,largest):
outstring = ''
if smallest == 5:
outstring += '5\n'
if smallest <= 7:
outstring += '7\n'
if smallest <= 99 and largest >= 10:
for i in range(1,10,2):
if (not i == 5) and (not i == 9):
x = 10*i+i
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
if smallest <= 999 and largest >= 100:
for j in range(1,10,2):
if not j == 5:
for k in range(10):
x = 100*j + 10*k + j
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
if smallest <= 9999 and largest >= 1000:
for l in range(1,10,2):
if not l == 5:
for m in range(10):
x = 1000*l + +100*m + 10*m + l
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
if smallest <= 99999 and largest >= 10000:
for n in range(1,10,2):
if not n == 5:
for o in range(10):
for p in range(10):
x = 10000*n + 1000*o +100*p + 10*o + n
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
if smallest <= 999999 and largest >= 100000:
for q in range(1,10,2):
if not q == 5:
for r in range(10):
for s in range(10):
x = 100000*q + 10000*r + 1000*s +100*s + 10*r + q
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
if smallest <= 9999999 and largest >= 1000000:
for t in range(1,10,2):
if not t == 5:
for u in range(10):
for v in range(10):
for w in range(10):
x = 1000000*t +100000*u + 10000*v + 1000*w +100*v + 10*u + t
if smallest <= x <= largest:
if is_prime(x):
outstring = outstring + str(x) +'\n'
return outstring
def is_prime(num):
upper = int(math.sqrt(num))
for i in range(2,upper+1):
if num%i == 0:
return False
return True
def is_pal(num):
num = str(num)
x = len(num)
for i in range(x//2):
if not num[i]==num[x-1-i]:
return False
return True
a = []
x= []
with open('pprime.in') as filename:
for line in filename:
a.append(line.rstrip())
smallest = int(a[0].split(' ')[0])
largest = int(a[0].split(' ')[1])
outstring = generate_pals(smallest,largest)
print(outstring)
fout = open ('pprime.out', 'w')
fout.write(outstring)
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1387
#!/usr/bin/env python2.7
# encoding : utf-8
while True:
a,b = [int(i) for i in raw_input().split(" ")]
if a== 0 and b == 0:
break
else:
print a+b
|
# 1514. Path with Maximum Probability
# User Accepted:1828
# User Tried:3791
# Total Accepted:1908
# Total Submissions:9912
# Difficulty:Medium
# You are given an undirected weighted graph of n nodes (0-indexed),
# represented by an edge list where edges[i] = [a, b] is an undirected edge connecting
# the nodes a and b with a probability of success of traversing that edge succProb[i].
# Given two nodes start and end,
# find the path with the maximum probability of success to go from start to end
# and return its success probability.
# If there is no path from start to end, return 0.
# Your answer will be accepted if it differs from the correct answer by at most 1e-5.
# Example 1:
# Input: n = 3, edges = [[0,1],[1,2],[0,2]], succProb = [0.5,0.5,0.2], start = 0, end = 2
# Output: 0.25000
# Explanation: There are two paths from start to end,
# one having a probability of success = 0.2 and the other has 0.5 * 0.5 = 0.25.
# Example 2:
# Input: n = 3, edges = [[0,1],[1,2],[0,2]], succProb = [0.5,0.5,0.3], start = 0, end = 2
# Output: 0.30000
# Example 3:
# Input: n = 3, edges = [[0,1]], succProb = [0.5], start = 0, end = 2
# Output: 0.00000
# Explanation: There is no path between 0 and 2.
# Constraints:
# 2 <= n <= 10^4
# 0 <= start, end < n
# start != end
# 0 <= a, b < n
# a != b
# 0 <= succProb.length == edges.length <= 2*10^4
# 0 <= succProb[i] <= 1
# There is at most one edge between every two nodes.
from collections import defaultdict
from heapq import heappush, heappop
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
graph = defaultdict(set)
for (x, y), z in zip(edges, succProb):
graph[x].add((y, z))
graph[y].add((x, z))
heap = [(start, -1)]
prob = [0] * n
prob[start] = 1
while heap:
curr_node, curr_prob = heappop(heap)
if prob[curr_node] > -curr_prob:
continue
for nei, p in graph[curr_node]:
if prob[curr_node] * p > prob[nei]:
prob[nei] = prob[curr_node] * p
heappush(heap, (nei, -prob[nei]))
return prob[end]
|
#!/usr/bin/env python
#
# Jiao Lin
"""
compute multiple scattering
"""
def sqe(mpsqe, Ei):
"""given multiphonon SQE, compute multiple scattering sqe
"""
# multiple scattering (MS) is uniform along Q
# so we want to compute the average S from multi-phonon
# scattering and assign the value to MS result
# first compute the mask
from .sqe import dynamical_range_mask
import numpy as np
mask = dynamical_range_mask(mpsqe, Ei)
# set outside to zero
mpsqe.I[mask] = 0
# average
aveS = mpsqe.I.sum(0)/np.logical_not(mask).sum(0)
# res
mssqe = mpsqe.copy()
mssqe.I[:] = aveS[np.newaxis, :]
mssqe.I[mask] = np.nan
return mssqe
# End of file
|
# Copyright 2018
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as nexception
from oslo_log import log as logging
from networking_ipvs._i18n import _
LOG = logging.getLogger(__name__)
class RevisionCannotWork(nexception.NeutronException):
message = _("Revision sub-system cannot work. Need to fetch all data.")
class ResourceNotFound(nexception.NotFound):
message = _("%(resource_kind)s kind of resource with id %(id)s not found.")
class VirtualServerEntityExists(nexception.Conflict):
message = _("A virtualserver entity with %(listen_ip)s:%(listen_port)s on "
"%(neutron_network_id)s already exists.")
class RealServerEntityExists(nexception.Conflict):
message = _("A realserver entity with %(server_ip)s:%(server_port)s on "
"%(ipvs_virtualserver_id)s already exists.")
class ResourceInUse(nexception.InUse):
message = _("Resource %(resource)s with id %(id)s is in use.")
class AdminStateUpCannotUpdateWithOtherAttr(nexception.BadRequest):
message = _("Admin_state_up cannot be updated with other attributes. "
"Will update admin_state_up, but ignore other attrs.")
class OnlyAdminCanSetOtherTenantQuota(nexception.BadRequest):
message = _("Only admin can set other tenant quota.")
class QuotaExceed(nexception.Conflict):
message = _("Quota exceed.")
|
import sys
from ..converter import Converter, ConversionError, ValidationError
class Ginkgo(Converter):
"""Convert Transcriptic samples.json to sample-set schema"""
VERSION = '0.0.2'
FILENAME = 'ginkgo_samples'
def convert(self, input_fp, output_fp=None, verbose=True, config={}, enforce_validation=True):
"""Do the conversion by running a method in runner.py"""
from .runner import convert_ginkgo
passed_config = config if config != {} else self.options
return convert_ginkgo(self.targetschema, self.encoding, input_fp,
verbose=verbose,
config=passed_config,
output_file=output_fp,
enforce_validation=enforce_validation)
|
import csv
file_path = 'D:/Programming/web/Python/Python-Basic-02/141-Memperbarui File CSV/myfile2.csv'
tmp_file = []
fieldnames = ['no','nama','profesi']
# # Mengubah data pada kolom
# Membaca dan menyalin data pada file ke variable list
with open(file_path, mode='r', newline='\n') as my_file_csv:
file = csv.DictReader(my_file_csv)
for data in file: tmp_file.append(data)
# Menampilkan data dari variable list
print('\nno \t nama \t\t profesi')
for data in tmp_file: print(f"{data['no']} \t {data['nama']} \t {data['profesi']}")
# Mengubah data pada variable list
col = 'nama'
no = '1'
nama = 'diky indra h'
profesi = 'manager'
print(f"\nmengubah seluruh data pada kolom {col}")
for i in range(len(tmp_file)):
if tmp_file[i][col]:
if col == 'no': tmp_file[i][col] = no
elif col == 'nama': tmp_file[i][col] = nama
elif col == 'profesi': tmp_file[i][col] = profesi
else: print('data tidak diketahui.')
# Menulis data dari variable list ke file
with open(file_path, mode='w', newline='\n') as my_file_csv:
file = csv.DictWriter(my_file_csv, fieldnames)
file.writeheader()
file.writerows(tmp_file) |
import datetime
from services.db.database import DbSingleton, Round, RoundImport
from services.rounds import Rounds
def test_round_import_with_lead_investor():
db = DbSingleton.get_instance()
db.prep_test()
# create mock objects
RoundImport.create(
uuid='asdf',
name='Test Name',
type='person',
permalink='test-name',
cb_url='https://hi.com',
rank=100,
created_at=datetime.date.today(),
updated_at=datetime.date.today(),
country_code='US',
# TODO: other properties
)
# TODO: create other mock objects
# call the service you'll write
Rounds().create_new_rounds()
# NOTE: the complier bitches unless you add database=None to Peewee queries. :facepalm:
cb_round = Round.select().first(database=None)
assert cb_round.Lead_Investor__c == 'Nick Weber'
assert cb_round.Amount__c > 500000
def test_round_import_with_unqualified_rounds():
# TODO: write a test to show that given two RoundImports, one with a qualifying `Amount___c` and another without
# that the application properly skips the unqualified round import.
pass
|
#!/usr/bin/python
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.spawn
import os
import re
import shutil
import subprocess
import pytest
from utils.common import (
build_image,
latest_build_artifact,
reboot,
run_after_connect,
determine_active_passive_part,
make_tempdir,
)
from utils.helpers import Helpers
@pytest.mark.commercial
@pytest.mark.min_mender_version("2.1.0")
class TestDeltaUpdateModule:
@pytest.mark.only_with_image("ext4")
def test_build_and_run_module(
self, request, bitbake_variables, prepared_test_build, bitbake_image
):
build_image(
prepared_test_build["build_dir"],
prepared_test_build["bitbake_corebase"],
bitbake_image,
['IMAGE_INSTALL_append = " mender-binary-delta"'],
[
'BBLAYERS_append = " %s/../meta-mender-commercial"'
% bitbake_variables["LAYERDIR_MENDER"]
],
)
image = latest_build_artifact(
request, prepared_test_build["build_dir"], "core-image*.ext4"
)
output = subprocess.check_output(
["debugfs", "-R", "ls -p /usr/share/mender/modules/v3", image]
).decode()
# Debugfs has output like this:
# /3018/100755/0/0/mender-binary-delta/142672/
# /3015/100755/0/0/rootfs-image-v2/1606/
assert "mender-binary-delta" in [
line.split("/")[5] for line in output.split("\n") if line.startswith("/")
]
def do_install_mender_binary_delta(
self,
request,
prepared_test_build,
bitbake_variables,
bitbake_image,
connection,
http_server,
board_type,
use_s3,
s3_address,
):
build_image(
prepared_test_build["build_dir"],
prepared_test_build["bitbake_corebase"],
bitbake_image,
['IMAGE_INSTALL_append = " mender-binary-delta"'],
[
'BBLAYERS_append = " %s/../meta-mender-commercial"'
% bitbake_variables["LAYERDIR_MENDER"]
],
)
image = latest_build_artifact(
request, prepared_test_build["build_dir"], "core-image*.mender"
)
Helpers.install_update(
image, connection, http_server, board_type, use_s3, s3_address
)
reboot(connection)
run_after_connect("true", connection)
connection.run("mender commit")
return image
@pytest.mark.only_with_image("ext4")
def test_runtime_checksum(
self,
request,
setup_board,
prepared_test_build,
bitbake_variables,
bitbake_image,
connection,
http_server,
board_type,
use_s3,
s3_address,
):
"""Check that the checksum of the running root filesystem is what we
expect. This is important in order for it to match when applying a delta
update.
"""
if (
"read-only-rootfs"
not in bitbake_variables["IMAGE_FEATURES"].strip().split()
):
pytest.skip("Only works when using read-only-rootfs IMAGE_FEATURE")
image = self.do_install_mender_binary_delta(
request,
prepared_test_build,
bitbake_variables,
bitbake_image,
connection,
http_server,
board_type,
use_s3,
s3_address,
)
# Check that checksum of the currently mounted rootfs matches that
# of the artifact which we just updated to.
(active, _) = determine_active_passive_part(bitbake_variables, connection)
output = connection.run("sha256sum %s" % active)
rootfs_sum = output.stdout.split()[0]
output = subprocess.check_output(
"mender-artifact read %s" % image, shell=True
).decode()
match = re.search("checksum: *([0-9a-f]+)", output)
assert match is not None, (
"Could not find checksum in mender-artifact output: %s" % output
)
artifact_sum = match.group(1)
assert rootfs_sum == artifact_sum
# Not testable on QEMU/ARM combination currently. See MEN-4297.
@pytest.mark.not_for_machine("vexpress-qemu")
# mender-binary-delta 1.2.0 requires mender-artifact 3.5.0
@pytest.mark.min_mender_version("2.5.0")
@pytest.mark.only_with_image("ext4")
def test_perform_update(
self,
request,
setup_board,
prepared_test_build,
bitbake_variables,
bitbake_image,
connection,
http_server,
board_type,
use_s3,
s3_address,
):
"""Perform a delta update.
"""
if (
"read-only-rootfs"
not in bitbake_variables["IMAGE_FEATURES"].strip().split()
):
pytest.skip("Only works when using read-only-rootfs IMAGE_FEATURE")
if distutils.spawn.find_executable("mender-binary-delta-generator") is None:
pytest.fail("mender-binary-delta-generator not found in PATH")
built_artifact = self.do_install_mender_binary_delta(
request,
prepared_test_build,
bitbake_variables,
bitbake_image,
connection,
http_server,
board_type,
use_s3,
s3_address,
)
with make_tempdir() as tmpdir:
# Copy previous build
artifact_from = os.path.join(tmpdir, "artifact_from.mender")
shutil.copyfile(built_artifact, artifact_from)
# Create new image installing some extra software
build_image(
prepared_test_build["build_dir"],
prepared_test_build["bitbake_corebase"],
bitbake_image,
['IMAGE_INSTALL_append = " nano"'],
)
built_artifact = latest_build_artifact(
request, prepared_test_build["build_dir"], "core-image*.mender"
)
artifact_to = os.path.join(tmpdir, "artifact_to.mender")
shutil.copyfile(built_artifact, artifact_to)
# Create delta Artifact using mender-binary-delta-generator
artifact_delta = os.path.join(tmpdir, "artifact_delta.mender")
subprocess.check_call(
f"mender-binary-delta-generator -n v2.0-deltafrom-v1.0 {artifact_from} {artifact_to} -o {artifact_delta}",
shell=True,
)
# Verbose provides/depends of the different Artifacts and the client (when supported)
connection.run("mender show-provides", warn=True)
subprocess.check_call(
"mender-artifact read %s" % artifact_from, shell=True,
)
subprocess.check_call(
"mender-artifact read %s" % artifact_to, shell=True,
)
subprocess.check_call(
"mender-artifact read %s" % artifact_delta, shell=True,
)
# Install Artifact, verify partitions and commit
(active, passive) = determine_active_passive_part(
bitbake_variables, connection
)
Helpers.install_update(
artifact_delta, connection, http_server, board_type, use_s3, s3_address
)
reboot(connection)
run_after_connect("true", connection)
(new_active, new_passive) = determine_active_passive_part(
bitbake_variables, connection
)
assert new_active == passive
assert new_passive == active
connection.run("mender commit")
|
"""Message packet."""
import re
from collections import namedtuple
from .packet import Packet
class MessageComponent(namedtuple("Component", ("type", "data", "text"))):
""":obj:`MessagePacket` component.
Valid Types:
========= ===================================== ===================
Type Description Sample Data
========= ===================================== ===================
text Plaintext of any length. Hello, world.
emoji Single emoji. 🌵
tag Single user tag or mention. Username
url URL. https://google.com
variable Key to be replaced with live values. %ARGS%
========= ===================================== ===================
Parameters
----------
type : :obj:`str`
Component type.
data : :obj:`str`
Component data.
text : :obj:`str`
Text representation of the component.
"""
class MessagePacket(Packet):
"""Packet to store messages.
Parameters
----------
message : :obj:`dict`, :obj:`tuple`, :obj:`str`, or :obj:`MessageComponent`
Message content components.
:obj:`dict` should contain ``"type"``, ``"data"``, and ``"text"`` keys.
:obj:`tuple` will be interpreted as ``(type, data, text)``. If not
supplied, ``text`` will be equivalent to ``data``.
:obj:`str` will be interpreted as a component with ``type`` text.
user : :obj:`str`
The sender of the MessagePacket.
role : :obj:`int`
The role ID of the sender.
action : :obj:`bool`
Whether or not the message was sent in action form.
target : :obj:`str` or :obj:`None`
The single user target of the message.
"""
def __init__(self, *message, user="", role=1, action=False, target=None):
super().__init__()
message = list(message)
for index, chunk in enumerate(message):
if isinstance(chunk, dict):
message[index] = MessageComponent(**chunk)
elif isinstance(chunk, tuple):
if len(chunk) == 2:
chunk = chunk + (chunk[1],)
message[index] = MessageComponent(*chunk)
elif isinstance(chunk, str):
message[index] = MessageComponent("text", chunk, chunk)
self.message = message
self._condense()
self.user = user
self.role = role
self.action = action
self.target = target
def __str__(self):
return "<Message: {} - \"{}\">".format(self.user, self.text)
def __len__(self):
return len(''.join(
chunk.text for chunk in self.message
if chunk.type == "text"
))
def __getitem__(self, key):
if isinstance(key, int):
return ''.join(
chunk.text for chunk in self.message
if chunk.type == "text"
)[key]
elif isinstance(key, slice):
if key.stop is not None or key.step is not None:
raise NotImplementedError # TODO
count = key.start or 0
message = self.message.copy()
for index, component in enumerate(message.copy()):
if component.type == "text":
if len(component.text) <= count:
count -= len(component.text)
message.pop(0)
else:
while count > 0:
new_text = component.text[1:]
component = message[index] = component._replace(
text=new_text, data=new_text)
count -= 1
else:
message.pop(0)
if count == 0:
return self.copy(*message)
return self.copy(*message)
raise TypeError
def __contains__(self, item):
for chunk in self.message:
if chunk.type == "text" and item in chunk.text:
return True
return False
def __iter__(self):
return self.message.__iter__()
def __add__(self, other):
return MessagePacket(
*(self.message + other.message),
user=self.user or other.user,
role=self.role or other.role,
action=self.action or other.action,
target=self.target or other.target
)
def _condense(self):
message = [self.message[0]]
for component in self.message[1:]:
if message[-1].type == component.type == "text":
new_text = message[-1].text + component.text
message[-1] = message[-1]._replace(
data=new_text, text=new_text)
else:
message.append(component)
self.message = message
return self
@property
def text(self):
"""Pure text representation of the packet.
Returns
-------
:obj:`str`
Joined ``text`` of every component.
Examples
--------
>>> MessagePacket("Hello, world! ", ("emoji", "😃")).text
'Hello, world! 😃'
"""
return ''.join(chunk.text for chunk in self.message)
@property
def json(self):
"""JSON representation of the packet.
Returns
-------
:obj:`dict`
Object attributes, in a JSON-compatible format.
Examples
--------
>>> import pprint
>>> pprint.pprint(MessagePacket("Hello, world! ", ("emoji", "😃")).json)
{'action': False,
'message': [{'data': 'Hello, world! ',
'text': 'Hello, world! ',
'type': 'text'},
{'data': '😃', 'text': '😃', 'type': 'emoji'}],
'role': 1,
'target': None,
'user': ''}
"""
return {
"message": [
dict(component._asdict()) for component in self.message
],
"user": self.user,
"role": self.role,
"action": self.action,
"target": self.target
}
@classmethod
def from_json(cls, json):
"""Convert :obj:`MessagePacket` JSON into an object.
Parameters
----------
json : :obj:`dict`
The JSON dictionary to convert.
Returns
-------
:obj:`MessagePacket`
Examples
--------
>>> MessagePacket.from_json({
... 'action': False,
... 'message': [{'type': 'text',
... 'data': 'Hello, world! ',
... 'text': 'Hello, world! '},
... {'data': '😃', 'text': '😃', 'type': 'emoji'}],
... 'role': 1,
... 'target': None,
... 'user': ''
... }).text
'Hello, world! 😃'
"""
return cls(*json.pop("message"), **json)
def copy(self, *args, **kwargs):
"""Return a copy of :obj:`self`.
Parameters
----------
*args
If any are provided, will entirely override :attr:`self.message`.
**kwargs
Each will override class attributes provided in :func:`__init__`.
Returns
-------
:obj:`MessagePacket`
Copy of :obj:`self`, with replaced attributes as specified in
``args`` and ``kwargs``.
"""
_args = args or self.message
_kwargs = {
"user": self.user,
"role": self.role,
"action": self.action,
"target": self.target
}
_kwargs.update(kwargs)
return MessagePacket(*_args, **_kwargs)
def replace(self, **values):
"""Replace text in packet.
Parameters
----------
values : :obj:`dict`
The text to replace.
Returns
-------
:obj:`MessagePacket`
:obj:`self`, with replaced text.
Note
----
Modifies the object itself. Does *not* return a copy.
Examples
--------
>>> packet = MessagePacket("Hello, world!")
>>> packet.replace(world="universe").text
'Hello, universe!'
>>> packet = MessagePacket("Hello, world!")
>>> packet.replace(**{
... "Hello": "Goodbye",
... "world": "Python 2"
... }).text
'Goodbye, Python 2!'
"""
for index, chunk in enumerate(self.message):
for old, new in values.items():
if new is not None:
new_text = chunk.text.replace(old, new)
new_data = new_text if chunk.type == "text" else chunk.data
self.message[index] = self.message[index]._replace(
data=new_data, text=new_text)
chunk = self.message[index]
return self
def sub(self, pattern, repl):
"""Perform regex substitution on packet.
Parameters
----------
pattern : :obj:`str`
Regular expression to match.
repl
The replacement for the `pattern`.
Accepts the same argument types as :func:`re.sub`.
Returns
-------
:obj:`MessagePacket`
:obj:`self`, with replaced patterns.
Note
----
Modifies the object itself. Does *not* return a copy.
Examples
--------
>>> packet = MessagePacket("I would like 3 ", ("emoji", "😃"), "s.")
>>> packet.sub(r"\\d+", "<number>").text
'I would like <number> 😃s.'
"""
for index, chunk in enumerate(self.message):
if chunk.type in ("text", "url"):
self.message[index] = self.message[index]._replace(
text=re.sub(pattern, repl, chunk.text))
return self
def split(self, separator=' ', maximum=None):
"""Split into multiple MessagePackets, based on a separator.
Parameters
----------
separator : :obj:`str`, default `' '`
The characters to split the string with.
maximum : :obj:`int` or :obj:`None`
The maximum number of splits to perform.
If less than the total number of potential splits, will result in a
list of length `maximum + 1`.
Otherwise, will perform all splits.
If :obj:`None`, will perform all splits.
Returns
-------
:obj:`list` of :obj:`MessagePacket`s
Examples
--------
>>> packet = MessagePacket("0 1 2 3 4 5 6 7")
>>> [component.text for component in packet.split()]
['0', '1', '2', '3', '4', '5', '6', '7']
>>> packet = MessagePacket("0 1 2 3 4 5 6 7")
>>> [component.text for component in packet.split("2")]
['0 1 ', ' 3 4 5 6 7']
>>> packet = MessagePacket("0 1 2 3 4 5 6 7")
>>> [component.text for component in packet.split(maximum=3)]
['0', '1', '2', '3 4 5 6 7']
"""
result = []
components = []
if maximum is None:
maximum = float('inf')
for component in self:
if len(result) == maximum:
components.append(component)
continue
is_text = component.type == "text"
if not is_text or separator not in component.text:
components.append(component)
continue
new = MessageComponent("text", "", "")
for index, character in enumerate(component.text):
if len(result) == maximum:
new_text = new.text + component.text[index:]
new = new._replace(data=new_text, text=new_text)
break
if character == separator:
components.append(new._replace())
result.append(components.copy())
components.clear()
new = new._replace(data="", text="")
else:
new_text = new.text + character
new = new._replace(data=new_text, text=new_text)
components.append(new)
result.append(components)
result = [
filter(lambda component: component.text, message)
for message in result
if any(component.text for component in message)
]
return [self.copy(*message) for message in result]
@classmethod
def join(cls, *packets, separator=''):
"""Join multiple message packets together.
Parameters
----------
*packets : :obj:`MessagePacket`
The packets to join.
separator : str
The string to place between every packet.
Returns
-------
:obj:`MessagePacket`
Packet containing joined contents.
Examples
--------
>>> MessagePacket.join(MessagePacket("a"), MessagePacket("b"), Message\
Packet("c")).text
'abc'
>>> MessagePacket.join(MessagePacket("a"), MessagePacket("b"), Message\
Packet("c"), separator='-').text
'a-b-c'
"""
if not packets:
return MessagePacket("")
result = packets[0]
for packet in packets[1:]:
result += MessagePacket(separator)
result += packet
return result
|
from __future__ import print_function
import sys
import mxnet as mx
import numpy as np
import random
import datetime
import multiprocessing
import cv2
from mxnet.executor_manager import _split_input_slice
from rcnn.config import config
from rcnn.io.image import tensor_vstack
from rcnn.io.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor_fpn, get_crop_batch, AA
class CropLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(CropLoader, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
#self.feat_stride = feat_stride
#self.anchor_scales = anchor_scales
#self.anchor_ratios = anchor_ratios
#self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
self.feat_stride = config.RPN_FEAT_STRIDE
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
#self.data_name = ['data']
#self.label_name = []
#self.label_name.append('label')
#self.label_name.append('bbox_target')
#self.label_name.append('bbox_weight')
self.data_name = ['data']
#self.label_name = ['label', 'bbox_target', 'bbox_weight']
self.label_name = []
prefixes = ['face']
if config.HEAD_BOX:
prefixes.append('head')
names = []
for prefix in prefixes:
names += [prefix+'_label', prefix+'_bbox_target', prefix+'_bbox_weight']
if prefix=='face' and config.FACE_LANDMARK:
names += [prefix+'_landmark_target', prefix+'_landmark_weight']
#names = ['label', 'bbox_weight']
for stride in self.feat_stride:
for n in names:
k = "%s_stride%d"%(n,stride)
self.label_name.append(k)
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# infer shape
feat_shape_list = []
_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
_data_shape = dict(_data_shape)
for i in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[i].infer_shape(**_data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
self.aa = AA(feat_shape_list)
self._debug = False
self._debug_id = 0
self._times = [0.0, 0.0, 0.0, 0.0]
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
dummy_boxes = np.zeros((0, 5))
dummy_info = [ [max_shapes['data'][2], max_shapes['data'][3], 1.0] ]
dummy_label = {'gt_boxes' : dummy_boxes}
dummy_blur = np.zeros((0,))
dummy_label['gt_blur'] = dummy_blur
label_dict = {}
if config.HEAD_BOX:
head_label_dict = self.aa.assign_anchor_fpn(dummy_label, dummy_info, False, prefix='head')
label_dict.update(head_label_dict)
if config.FACE_LANDMARK:
dummy_landmarks = np.zeros( (0,5,3) )
dummy_label['gt_landmarks'] = dummy_landmarks
face_label_dict = self.aa.assign_anchor_fpn(dummy_label, dummy_info, config.FACE_LANDMARK, prefix='face')
label_dict.update(face_label_dict)
label_list = []
for k in self.label_name:
label_list.append(label_dict[k])
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label_list)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
assert cur_to==cur_from+self.batch_size
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_crop_batch(iroidb)
data_list += data
label_list += label
#data_list.append(data)
#label_list.append(label)
# pad data first and then assign anchor (read label)
#data_tensor = tensor_vstack([batch['data'] for batch in data_list])
#for i_card in range(len(data_list)):
# data_list[i_card]['data'] = data_tensor[
# i_card * config.TRAIN.BATCH_IMAGES:(1 + i_card) * config.TRAIN.BATCH_IMAGES]
#iiddxx = 0
select_stride = 0
if config.RANDOM_FEAT_STRIDE:
select_stride = random.choice(config.RPN_FEAT_STRIDE)
for data, label in zip(data_list, label_list):
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
feat_shape_list = []
for s in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
im_info = data['im_info']
gt_boxes = label['gt_boxes']
gt_label = {'gt_boxes':gt_boxes}
if config.USE_BLUR:
gt_blur = label['gt_blur']
gt_label['gt_blur'] = gt_blur
if self._debug:
img = data['data'].copy()[0].transpose( (1,2,0) )[:,:,::-1].copy()
print('DEBUG SHAPE', data['data'].shape, label['gt_boxes'].shape)
box = label['gt_boxes'].copy()[0][0:4].astype(np.int)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
filename = './debugout/%d.png' % (self._debug_id)
print('debug write', filename)
cv2.imwrite(filename, img)
self._debug_id+=1
#print('DEBUG', img.shape, bbox.shape)
label_dict = {}
if config.HEAD_BOX:
head_label_dict = self.aa.assign_anchor_fpn(gt_label, im_info, False, prefix='head', select_stride = select_stride)
label_dict.update(head_label_dict)
if config.FACE_LANDMARK:
gt_landmarks = label['gt_landmarks']
gt_label['gt_landmarks'] = gt_landmarks
#ta = datetime.datetime.now()
#face_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, config.FACE_LANDMARK, prefix='face', select_stride = select_stride)
face_label_dict = self.aa.assign_anchor_fpn(gt_label, im_info, config.FACE_LANDMARK, prefix='face', select_stride = select_stride)
#tb = datetime.datetime.now()
#self._times[0] += (tb-ta).total_seconds()
label_dict.update(face_label_dict)
#print('im_info', im_info.shape)
#print(gt_boxes.shape)
for k in self.label_name:
label[k] = label_dict[k]
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = 0 if key.startswith('bbox_') else -1
#print('label vstack', key, pad, len(label_list), file=sys.stderr)
all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
#print(self._times)
class CropLoader2(mx.io.DataIter):
def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(CropLoader2, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
#self.feat_stride = feat_stride
#self.anchor_scales = anchor_scales
#self.anchor_ratios = anchor_ratios
#self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
self.feat_stride = config.RPN_FEAT_STRIDE
# infer properties from roidb
self.size = len(roidb)
# decide data and label names
#self.data_name = ['data']
#self.label_name = []
#self.label_name.append('label')
#self.label_name.append('bbox_target')
#self.label_name.append('bbox_weight')
self.data_name = ['data']
#self.label_name = ['label', 'bbox_target', 'bbox_weight']
self.label_name = []
prefixes = ['face']
if config.HEAD_BOX:
prefixes.append('head')
names = []
for prefix in prefixes:
names += [prefix+'_label', prefix+'_bbox_target', prefix+'_bbox_weight']
if prefix=='face' and config.FACE_LANDMARK:
names += [prefix+'_landmark_target', prefix+'_landmark_weight']
#names = ['label', 'bbox_weight']
for stride in self.feat_stride:
for n in names:
k = "%s_stride%d"%(n,stride)
self.label_name.append(k)
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.q_in = [multiprocessing.Queue(1024) for i in range(config.NUM_CPU)]
#self.q_in = multiprocessing.Queue(1024)
self.q_out = multiprocessing.Queue(1024)
self.start()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
pass
@staticmethod
def input_worker(q_in, roidb, batch_size):
index = np.arange(len(roidb))
np.random.shuffle(index)
cur_from = 0
while True:
cur_to = cur_from + batch_size
if cur_to>len(roidb):
np.random.shuffle(index)
cur_from = 0
continue
_roidb = [roidb[index[i]] for i in range(cur_from, cur_to)]
istart = index[cur_from]
q_in[istart%len(q_in)].put(_roidb)
cur_from = cur_to
@staticmethod
def gen_worker(q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
_roidb = deq
data, label = get_crop_batch(_roidb)
print('generated')
q_out.put( (data, label) )
def start(self):
input_process = multiprocessing.Process(target=CropLoader2.input_worker, args=(self.q_in, self.roidb, self.batch_size))
#gen_process = multiprocessing.Process(target=gen_worker, args=(q_in, q_out))
gen_process = [multiprocessing.Process(target=CropLoader2.gen_worker, args=(self.q_in[i], self.q_out)) \
for i in range(config.NUM_CPU)]
input_process.start()
for p in gen_process:
p.start()
def next(self):
self.get_batch()
return mx.io.DataBatch(data=self.data, label=self.label,
provide_data=self.provide_data, provide_label=self.provide_label)
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
dummy_boxes = np.zeros((0, 5))
dummy_info = [ [max_shapes['data'][2], max_shapes['data'][3], 1.0] ]
dummy_label = {'gt_boxes' : dummy_boxes}
# infer shape
feat_shape_list = []
for i in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[i].infer_shape(**max_shapes)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
label_dict = {}
if config.HEAD_BOX:
head_label_dict = assign_anchor_fpn(feat_shape_list, dummy_label, dummy_info, False, prefix='head')
label_dict.update(head_label_dict)
if config.FACE_LANDMARK:
dummy_landmarks = np.zeros( (0,11) )
dummy_label['gt_landmarks'] = dummy_landmarks
face_label_dict = assign_anchor_fpn(feat_shape_list, dummy_label, dummy_info, config.FACE_LANDMARK, prefix='face')
label_dict.update(face_label_dict)
label_list = []
for k in self.label_name:
label_list.append(label_dict[k])
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label_list)]
return max_data_shape, label_shape
def get_batch(self):
deq = self.q_out.get()
print('q_out got')
data_list, label_list = deq
for data, label in zip(data_list, label_list):
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
feat_shape_list = []
for s in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
#for k in self.label_name:
# label[k] = [0 for i in range(config.TRAIN.BATCH_IMAGES)]
im_info = data['im_info']
gt_boxes = label['gt_boxes']
gt_label = {'gt_boxes':gt_boxes}
label_dict = {}
head_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, False, prefix='head')
label_dict.update(head_label_dict)
if config.FACE_LANDMARK:
gt_landmarks = label['gt_landmarks']
gt_label['gt_landmarks'] = gt_landmarks
face_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, config.FACE_LANDMARK, prefix='face')
label_dict.update(face_label_dict)
#print('im_info', im_info.shape)
#print(gt_boxes.shape)
for k in self.label_name:
label[k] = label_dict[k]
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = 0 if key.startswith('bbox_') else -1
#print('label vstack', key, pad, len(label_list), file=sys.stderr)
all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
|
# -*- coding: utf-8 -*-
# @Time : 2020-01-10 15:38
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : tnew_transformer.py
import os
from tqdm import tqdm
import json
import logging
import numpy as np
# import hanlp
import pickle
from random import random, randrange
from pathlib import Path
from .base_transformer import BaseTransformer
from aispace.datasets import BaseTokenizer
from aispace.utils.io_utils import json_dumps
from aispace.utils.file_utils import default_download_dir, maybe_create_dir
from aispace.utils.io_utils import maybe_download, load_from_file
__all__ = [
"GovTitleTriggerTransformer",
"GovTitleRoleTransformer"
]
logger = logging.getLogger(__name__)
@BaseTransformer.register("gov_title/trigger")
class GovTitleTriggerTransformer(BaseTransformer):
def __init__(self, hparams, **kwargs):
super(GovTitleTriggerTransformer, self).__init__(hparams, **kwargs)
# tokenizer
self.tokenizer = \
BaseTokenizer. \
by_name(self._hparams.dataset.tokenizer.name) \
(self._hparams.dataset.tokenizer)
def transform(self, data_path, split="train"):
limit = 1000000
with open(data_path, "r") as inf:
for idx, line in enumerate(inf):
if idx >= limit:
break
json_obj = json.loads(line)
context = json_obj['context']
titles: list = json_obj['titles']
titles.sort(key=lambda s: s['span_start'])
tokens = []
labels = []
pre_idx = 0
for title in titles:
ss, se = title['span_start'], title['span_end']
pre_str = context[pre_idx: ss]
pre_tokens = self.tokenizer.tokenize(pre_str)
tokens.extend(pre_tokens)
labels.extend(["O"] * len(pre_tokens))
cur_str = title['text']
cur_tokens = self.tokenizer.tokenize(cur_str)
tokens.extend(cur_tokens)
labels.extend(["B-TITLE"] + ["I-TITLE"] * (len(cur_tokens) - 1))
pre_idx = se
pre_str = context[pre_idx:]
pre_tokens = self.tokenizer.tokenize(pre_str)
tokens.extend(pre_tokens)
labels.extend(["O"] * len(pre_tokens))
output = self.tokenizer.encode(tokens)
if self._hparams.dataset.tokenizer.name != "gpt_tokenizer":
labels = ["O"] + labels[: self.tokenizer.max_len - 2]
labels = labels + ['O'] * (self.tokenizer.max_len - len(labels))
else:
labels = labels[: self.tokenizer.max_len]
labels = labels + ['O'] * (self.tokenizer.max_len - len(labels))
feature = {
"input_ids": output['input_ids'],
"token_type_ids": output['segment_ids'],
"attention_mask": output['input_mask'],
"label": labels,
}
if idx == 0:
print(feature)
print(len(feature['label']))
yield feature
@BaseTransformer.register("gov_title/role")
class GovTitleRoleTransformer(BaseTransformer):
def __init__(self, hparams, **kwargs):
super(GovTitleRoleTransformer, self).__init__(hparams, **kwargs)
# tokenizer
self.tokenizer = \
BaseTokenizer. \
by_name(self._hparams.dataset.tokenizer.name) \
(self._hparams.dataset.tokenizer)
def transform(self, data_path, split="train"):
with open(data_path, "r") as inf:
for idx, line in enumerate(inf):
json_obj = json.loads(line)
trigger = json_obj['trigger']
roles: list = json_obj['roles']
status = json_obj['status']
context = json_obj['context']
roles.append(trigger)
roles.sort(key=lambda s: s['span_start'])
windows = [(5, 0), (10, 0), (20, 0), (40, 0), (80, 0), (160, 0), (320, 0), (10000, 10000)] + \
[(5, 5), (10, 10), (20, 20), (40, 40), (80, 50), (160, 60), (320, 80)]
context_span_visited = set()
# 以职位触发词为中心进行不同窗口切割,从而实现数据增强的目的
for pre_offset, post_offset in windows:
context_s, \
context_e = \
max(0, trigger['span_start'] - pre_offset), \
min(len(context), trigger['span_end'] + post_offset)
if (context_s, context_e) in context_span_visited:
continue
context_span_visited.add((context_s, context_e))
tokens = []
labels = []
pre_idx = context_s
trigger_span_start, trigger_span_end = 1, 1
for role in roles:
ss, se = role['span_start'], role['span_end']
if ss < context_s or se > context_e:
continue
entity_type = role['entity_type']
pre_str = context[pre_idx: ss]
pre_tokens = self.tokenizer.tokenize(pre_str)
tokens.extend(pre_tokens)
labels.extend(["O"] * len(pre_tokens))
cur_str = role['text']
cur_tokens = self.tokenizer.tokenize(cur_str)
tokens.extend(cur_tokens)
if role['entity_type'] != "TITLE":
labels.extend([f"B-{entity_type}"] + [f"I-{entity_type}"] * (len(cur_tokens) - 1))
else:
trigger_span_start = len(labels)
labels.extend(["O"] * len(cur_tokens))
trigger_span_end = len(labels)
pre_idx = se
pre_str = context[pre_idx: context_e]
pre_tokens = self.tokenizer.tokenize(pre_str)
tokens.extend(pre_tokens)
labels.extend(["O"] * len(pre_tokens))
query = f"{status}{trigger['text']}"
query_tokens = self.tokenizer.tokenize(query)
if trigger_span_end > self.tokenizer.max_len - 3 - len(query_tokens):
continue
tokens = tokens[: self.tokenizer.max_len - 3 - len(query_tokens)]
labels = labels[: self.tokenizer.max_len - 3 - len(query_tokens)]
labels = ["O"] + labels + ['O'] * (self.tokenizer.max_len - len(labels) - 1)
position_ids = list(range(0, 1 + len(tokens) + 1 + 2)) + \
list(range(trigger_span_start, trigger_span_end)) + list(range(len(tokens) + len(query_tokens) + 2, self.tokenizer.max_len))
output = self.tokenizer.encode(tokens, query_tokens)
feature = {
"input_ids": output['input_ids'],
"token_type_ids": output['segment_ids'],
"attention_mask": output['input_mask'],
"position_ids": position_ids,
"label": labels,
}
if idx == 0:
print(feature)
yield feature |
# -*- coding: utf-8 -*-
"""
Create ORS programs for newly defined programs in Aasaan
"""
from datetime import date
import json
from collections import Counter
from django.core.management.base import BaseCommand
from schedulemaster.models import ProgramSchedule, ProgramScheduleCounts, \
ProgramCountMaster, ProgramReceiptAmounts
from config.ors.ors import ORSInterface
from config.models import get_configuration
from django.conf import settings
from utils.datedeux import DateDeux
def _return_category(category_name, category_set):
for each_category in category_set:
if each_category.category.count_category == category_name:
return each_category
return None
def _create_or_update(fields, values, schedule):
_counts, _amounts = values
categories = schedule.programreceiptamounts_set.all()
for field in fields:
_base_cat = ProgramCountMaster.objects.get(count_category=field)
_model_cat = _return_category(field, categories) or ProgramReceiptAmounts()
_model_cat.program = schedule
_model_cat.category = _base_cat
_model_cat.receipt_count = _counts[field]
_model_cat.receipt_amount = _amounts[field]
_model_cat.save()
class Command(BaseCommand):
help = "Sync ereceipts amounts"
def add_arguments(self, parser):
parser.add_argument('start_date', nargs='?', type=str)
def handle(self, *args, **options):
_fields_to_update = ["Online Receipts", "Cash Receipts", "Cheque Receipts", "Creditcard Receipts"]
if options['start_date']:
_start_date = DateDeux.fromisodate(options['start_date'])
print('Using start date of ', _start_date)
_schedules = ProgramSchedule.objects.filter(start_date__gte=_start_date)
else:
reference_date = DateDeux.today() - 50
print('Using start date of ', reference_date)
_schedules = ProgramSchedule.objects.filter(start_date__gte=reference_date)
for each_schedule in _schedules:
_counts = Counter()
_amounts = Counter()
print(each_schedule.id, each_schedule)
try:
_receipts = json.loads(each_schedule.receipts)
except:
_receipts = []
if not _receipts:
print("No receipts found")
continue
for receipt in _receipts:
_mode = receipt.get("mode", "Unknown")
_mode = _mode.title() + " Receipts"
_amount = receipt.get("amount", 0)
_counts[_mode] += 1
_amounts[_mode] += _amount
print(_counts, _amounts)
_create_or_update(_fields_to_update, (_counts, _amounts),
each_schedule)
|
import numpy as np
from model.constant_variables import Z_field, Z_lab
def set_up_model_geometry(geom="FieldScale0.5m"):
"""
Set-up model geometry
Arguments
------------------------------
geom Flag for geometry of choice
Results
---------------------------
nz number of nodes
Z total Snow Height [m]
dz node distance cell size [m]
coord snow height coordinates [m]
"""
if geom not in ["FieldScale0.5m", "LabScale0.02m", "layer_based0.5m_2Layer"]:
raise TypeError(
"The option for geom can only be: FieldScale0.5m, LabScale0.02m, layer_based0.5m_2Layer"
)
[nz, Z, coord] = choose_geometry(geom)
dz = node_distance(coord, nz)
return nz, dz, Z, coord
def choose_geometry(geom):
"""
Select geometry of the test cases at initiation
Arguments
----------------------------
geom 'FieldScale0.5m' - 101 nodes or 251,
'LabScale0.02m' - represents the lab scale,
layer_based0.5m_2Layer' - 3 computational nodes to reflect layer-based schemes
Results
-----------------------------
nz number of computational nodes
Z initial height of the snowpack
coord initial z-coordinates
"""
if geom not in ["FieldScale0.5m", "LabScale0.02m", "layer_based0.5m_2Layer"]:
raise TypeError(
"The option for geom can only be: FieldScale0.5m, LabScale0.02m, layer_based0.5m_2Layer"
)
if geom == "FieldScale0.5m":
Z = 0.5 # Z_field # height[m]
nc = 100 # or 250
nz = nc + 1 # number of nodes
coord = np.linspace(0, Z, nz) # [m]
elif geom == "LabScale0.02m":
Z = Z_lab # height [m]
nc = 50
nz = nc + 1 # number of nodes
coord = np.linspace(0, Z, nz) # [m]
elif (
geom == "layer_based0.5m_2Layer"
): # do not combine with Module I and Module II !
Z = 0.5 # [m]
nc = 2
nz = nc + 1 # number of nodes
coord = np.array((0, 0.25, Z))
else:
raise ValueError("Requested geometry not available")
return nz, Z, coord
def node_distance(coord, nz):
"""
Computation of the node distance based on the node coordiantes
Arguments:
------------
coord mesh coordinates [m]
nz number of computational nodes
Results:
-------------
dz node distance [m]
"""
if type(coord) not in [list, tuple, float, int, np.ndarray]:
raise TypeError("coord array has to be an array")
if type(nz) not in [int]:
raise TypeError("nz has to be an integer")
if int(len(coord)) != int(nz):
raise ValueError(
"number of coordinates does not fit with number of computational nodes"
)
dz = np.zeros(nz - 1)
dz = coord[1:] - coord[:-1]
return dz
|
import os
import operator
class grid_cell:
def __init__(self, distance = 0, coord = 0):
self.distance = distance
self.coord = coord
input = open('input.txt', 'r')
coords = []
for row in input:
coords.append((int(row[:row.find(',')]),int(row[row.find(' ')+1:-1])))
input.close()
#create an empty grid
grid = []
for i in range(500):
row = []
for j in range(500):
cell = grid_cell(999999, 0)
row.append(cell)
grid.append(row)
#populate grid. each cell has a distance to and a closest coordinate
for y, row in enumerate(grid):
for x, cell in enumerate(row):
for c, coord in enumerate(coords):
manhattan = abs(coord[0] - x) + abs(coord[1] - y)
if manhattan == cell.distance:
cell.coord = 0
elif manhattan < cell.distance:
cell.distance = manhattan
cell.coord = c + 1
#add all coords closest to the edge to a blacklist, as they will be infinite
blacklist = set()
for cell in grid[0]:
blacklist.add(cell.coord)
for row in grid:
blacklist.add(row[0].coord)
blacklist.add(row[-1].coord)
for cell in grid[-1]:
blacklist.add(cell.coord)
#find area of each Coordinate
areas = {}
for row in grid:
for cell in row:
if cell.coord not in blacklist:
if cell.coord in areas:
areas[cell.coord] += 1
else:
areas[cell.coord] = 1
#print maximum area
print(max(areas.items(), key = operator.itemgetter(1))[1])
|
'''Defines the `JavaCompilationInfo` provider.
'''
JavaCompilationInfo = provider(
doc = "Describes the inputs, outputs, and options of a Java compiler invocation for a particular Java target.",
fields = {
"srcs": "A depset of the Java source `File`s directly included in a Java target. (This does not include either generated or transitive Java sources). This may be `None`. (E.g., a `java_import` target may not have any source files.).",
"srcs_args_file": "A `File` listing the Java source files in the `srcs` field. Each line of this `File` is the `File.path` of one of the `srcs` `File`s. These lines are in no particular order. (This field exists so the various lint aspects don't need to re-create a list of a target's Java sources. They can just use this file generated by the original Java target.)",
"class_path_jars": "A depset of JAR `File`s to be included on the class-path during this compilation.",
"class_files_output_jar": "A `File` pointing to the JAR of class files generated by this Java compilation action.",
"main_class": "Either a `string` or `None`. If non-null, this string is used as the output JAR's `Main-Class` manifest attribute.",
"additional_jar_manifest_attributes": "A list of strings to be included in the manifest of the generated JAR.",
"java_compiler_toolchain_info": "The `JavaCompilerToolchainInfo` which should be used to compile this Java target.",
"resources": "A dict from a `File` to be included in the output JAR to its in-JAR path.",
"javac_flags": "A list of strings to be included in the `javac` invocation."
# TODO(dwtj): Consider supporting compiler plugins
# TODO(dwtj): Consider supporting "generated_sources_output_jar".
# TODO(dwtj): Consider supporting "native_headers_archive" (i.e. `javac -h <directory>).
# TODO(dwtj): Consider supporting Java source code version checks.
},
)
|
"""A script which helps users create their nengo_spinnaker config file."""
import argparse
from nengo_spinnaker.utils.paths import nengo_spinnaker_rc
from rig import wizard
import os
CONFIG_TEMPLATE = """\
### SpiNNaker system configuration.
# File automatically generated by nengo_spinnaker_setup.
[spinnaker_machine]
hostname: {hostname}
"""
def generate_config_file(filename, ip_address):
"""Generate a new config file with the specified filename and
parameters.
"""
try:
os.makedirs(os.path.dirname(filename))
except (IOError, OSError):
# Directory already created, good job!
pass
with open(filename, "w") as f:
f.write(CONFIG_TEMPLATE.format(
hostname=ip_address,
))
def main(args=None):
parser = argparse.ArgumentParser(
description="Interactive tool for creating a nengo_spinnaker "
"config file.")
file_group = parser.add_mutually_exclusive_group()
file_group.add_argument("--user", "-u", dest="file",
const="user", action="store_const",
help="Create a user-specific config file "
"(default).")
file_group.add_argument("--project", "-p", dest="file",
const="project", action="store_const",
help="Create a project-specific config file in "
"the current directory.")
file_group.set_defaults(file="user")
parser.add_argument("--force", "-f", action="store_true",
help="Overwrite an existing config file.")
args = parser.parse_args(args)
filename = nengo_spinnaker_rc[args.file]
if not args.force and os.path.isfile(filename):
print("Config file {} already exists. Use --force to "
"overwrite.".format(filename))
return 1
resp = wizard.cli_wrapper(wizard.ip_address_wizard())
if resp is None:
return 1
else:
generate_config_file(filename, **resp)
print("Successfully created config file in {}".format(filename))
return 0
if __name__ == "__main__": # pragma: no cover
import sys
sys.exit(main())
|
#Types.
from typing import Dict, List, Any
#Meros class.
from python_tests.Meros.Meros import Meros
#NodeError Exception.
from python_tests.Tests.Errors import NodeError
#JSON standard lib.
import json
#Socket standard lib.
import socket
#RPC class.
class RPC:
#Constructor.
def __init__(
self,
meros: Meros
) -> None:
self.meros: Meros = meros
self.socket: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("127.0.0.1", meros.rpc))
#Call an RPC method.
def call(
self,
module: str,
method: str,
args: List[Any] = []
) -> Any:
#Send the call.
self.socket.send(
bytes(
json.dumps(
{
"jsonrpc": "2.0",
"id": 0,
"method": module + "_" + method,
"params": args
}
),
"utf-8"
)
)
#Get the result.
response: bytes = bytes()
next: bytes = bytes()
counter: int = 0
while True:
try:
next = self.socket.recv(1)
except Exception:
raise NodeError()
if len(next) == 0:
raise NodeError()
response += next
if response[-1] == response[0]:
counter += 1
elif (chr(response[-1]) == ']') and (chr(response[0]) == '['):
counter -= 1
elif (chr(response[-1]) == '}') and (chr(response[0]) == '{'):
counter -= 1
if counter == 0:
break
#Raise an exception on error.
result: Dict[str, Any] = json.loads(response)
if "error" in result:
raise Exception(result["error"]["message"])
return result["result"]
#Quit Meros.
def quit(
self
) -> None:
self.call(
"system",
"quit"
)
|
import sys
f = open(sys.argv[1], "r")
class container:
def __init__(self, _type):
self._type = _type
self.open = False
self.burnt = False
prntStack = []
burnables = []
curCont = container(None)
usedTypes = []
lineNum = 0
pages = {}
for line in f.readlines():
parts = line.split()
parts.append('')
lineNum += 1
if parts[0].lower() == "find":
if curCont.open == False:
if " ".join(parts[1:]).strip(" ") not in usedTypes:
curCont = container(" ".join(parts[1:]).strip(" "))
usedTypes.append(" ".join(parts[1:]).strip(" "))
else:
print(f"Error line: {lineNum}, You already burnt this container")
exit()
else:
print(f"Error line: {lineNum}, cannot find a new container without closing current container")
exit()
elif parts[0].lower() == "open" and curCont.open == False:
if curCont.open == False:
if " ".join(parts[1:]).strip(" ") == curCont._type:
curCont.open = True
else:
print(f"Error line: {lineNum}, specified container must be what you are in")
exit()
else:
print(f"Error line: {lineNum}, cannot open an open container")
exit()
elif parts[0].lower() == "write":
pages[parts[1]] = " ".join(parts[2:]).strip(" ")
elif parts[0].lower() == "toss":
if curCont.open == True:
if curCont.burnt == False:
if " ".join(parts[2:]).strip(" ") == curCont._type:
prntStack.append(pages[parts[1]])
pages[parts[1]] = ""
else:
print(f"Error line: {lineNum}, specified container must be what you are in")
exit()
else:
print(f"Error line: {lineNum}, cannot toss into a burnt container")
exit()
else:
print(f"Error line: {lineNum}, cannot toss into a closed container")
exit()
elif parts[0].lower() == "burn":
if curCont.open == True:
if " ".join(parts[1:]).strip(" ") == curCont._type:
for i in prntStack:
print(f"[{curCont._type}]: {i}")
prntStack = []
curCont.burnt = True
else:
print(f"Error line: {lineNum}, specified container must be what you are in")
exit()
else:
print(f"Error line: {lineNum}, cannot burn a closed container")
exit()
elif parts[0].lower() == "close":
if curCont.open == True:
if curCont.burnt == True:
if " ".join(parts[1:]).strip(" ") == curCont._type:
curCont = container(None)
else:
print(f"Error line: {lineNum}, specified container must be what you are in")
exit()
else:
print(f"Error line: {lineNum}, cannot close an un-burnt container")
exit()
else:
print(f"Error line: {lineNum}, cannot close a closed container")
exit()
|
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(XA,yA)
yP = knn.predict(XB) |
"""
Unit tests for examples/*.
"""
import pytest
from pathlib import Path
# this is a workaround to print text, which is printed during import time, before tests are run
# to guarantee independence of test run order
from abcvoting.output import WARNING
from abcvoting.output import output
from test_abcrules import remove_solver_output
import re
def remove_algorithm_info(out):
"""Remove information about algorithms which may differ from system to system or are random."""
filter_patterns = (
"Algorithm: .*\n",
"----------------------\nRandom Serial Dictator\n----------------------"
+ "\n\n1 winning committee:\n {., ., ., .}\n\n",
)
for filter_pattern in filter_patterns:
out = re.sub(filter_pattern, "", out)
assert "Random Serial Dictator" not in out
return out
@pytest.fixture
def check_output(capfd, request):
"""
Pytest fixture to compare output (stdout) with stored text file.
Output might depend on
installed packages, might need to be adjusted to make test work on all platforms.
If a test fails, the actual output is copied to a file called <testname>.new, so it should
be easy to accept changes by `mv expected_output/<testname>.new expected_output/<testname>`.
"""
# reset verbosity, because might have been modified, this is just paranoia
output.set_verbosity(WARNING)
yield
# reset verbosity, examples modify the verbosity level
output.set_verbosity(WARNING)
stdout = capfd.readouterr().out
test_name = request.node.name
fname = Path(__file__).parent / "expected_output" / test_name
try:
with open(fname, encoding="utf8") as file:
expected_output = file.read()
expected_output = remove_algorithm_info(str(expected_output))
except FileNotFoundError:
expected_output = None
stdout = remove_solver_output(str(stdout))
stdout = remove_algorithm_info(stdout)
if expected_output != stdout:
with open(f"{fname}.new", "w", encoding="utf8") as file:
file.write(stdout)
assert expected_output == stdout, f"Unexpected output, output written to {fname}.new"
# noinspection PyUnresolvedReferences
def test_abcbook_example01_py(check_output):
from examples.abcbook import example201 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example02_py(check_output):
from examples.abcbook import example202 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example03_py(check_output):
from examples.abcbook import example203 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example04_py(check_output):
from examples.abcbook import example204 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example05_py(check_output):
from examples.abcbook import example205 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example06_py(check_output):
from examples.abcbook import example206 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example07_py(check_output):
from examples.abcbook import example207 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example08_py(check_output):
from examples.abcbook import example208 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example09_py(check_output):
from examples.abcbook import example209 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example10_py(check_output):
from examples.abcbook import example210 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example11_py(check_output):
from examples.abcbook import example211 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example12_py(check_output):
from examples.abcbook import example212 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example13_py(check_output):
from examples.abcbook import example213 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_example14_py(check_output):
from examples.abcbook import example214 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_remark02_py(check_output):
from examples.abcbook import remark02 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_propositionA2_py(check_output):
from examples.abcbook import propositionA2 # noqa: F401
# noinspection PyUnresolvedReferences
def test_abcbook_propositionA3_py(check_output):
from examples.abcbook import propositionA3 # noqa: F401
@pytest.mark.gurobipy
def test_abcbook_propositionA4_py(check_output):
from examples.abcbook import propositionA4 # noqa: F401
# noinspection PyUnresolvedReferences
def test_simple_py(check_output):
from examples import simple # noqa: F401
@pytest.mark.gurobipy
def test_allrules_py(check_output):
from examples import allrules # noqa: F401
# noinspection PyUnresolvedReferences
def test_handling_preflib_files_py(check_output):
from examples import handling_preflib_files # noqa: F401
# noinspection PyUnresolvedReferences
@pytest.mark.slow
def test_generating_random_profiles_py(check_output):
from examples import generating_random_profiles # noqa: F401
|
import argparse
DEFAULT_VOCAB_FILE = "glove-6b-zip/glove.vocab.txt"
DEFAULT_VECTOR_FILE = "glove-6b-zip/glove.6B.300d.txt"
parser = argparse.ArgumentParser()
parser.add_argument('--vocab_file', default=DEFAULT_VOCAB_FILE, type=str)
parser.add_argument('--vectors_file', default=DEFAULT_VECTOR_FILE, type=str)
|
import os
import py # NOQA
import pytest
import re
import shutil
import subprocess
import tempfile
from types import TracebackType # NOQA
from typing import Any # NOQA
from typing import IO # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
from typing import Type # NOQA
import optuna
from optuna.cli import Studies
from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX
from optuna.storages import RDBStorage
from optuna.structs import CLIUsageError
from optuna.trial import Trial # NOQA
TEST_CONFIG_TEMPLATE = 'default_storage: sqlite:///{default_storage}\n'
class StorageConfigSupplier(object):
def __init__(self, config_template):
# type: (str) -> None
self.tempfile_storage = None # type: Optional[IO[Any]]
self.tempfile_config = None # type: Optional[IO[Any]]
self.config_template = config_template
def __enter__(self):
# type: () -> Tuple[str, str]
self.tempfile_storage = tempfile.NamedTemporaryFile()
self.tempfile_config = tempfile.NamedTemporaryFile()
with open(self.tempfile_config.name, 'w') as fw:
fw.write(self.config_template.format(default_storage=self.tempfile_storage.name))
return 'sqlite:///{}'.format(self.tempfile_storage.name), self.tempfile_config.name
def __exit__(self, exc_type, exc_val, exc_tb):
# type: (Type[BaseException], BaseException, TracebackType) -> None
if self.tempfile_storage:
self.tempfile_storage.close()
if self.tempfile_config:
self.tempfile_config.close()
def _add_option(base_command, key, value, condition):
# type: (List[str], str, str, bool) -> List[str]
if condition:
return base_command + [key, value]
else:
return base_command
@pytest.mark.parametrize('options', [['storage'], ['config'], ['storage', 'config']])
def test_create_study_command(options):
# type: (List[str]) -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
# Create study.
command = ['optuna', 'create-study']
command = _add_option(command, '--storage', storage_url, 'storage' in options)
command = _add_option(command, '--config', config_path, 'config' in options)
subprocess.check_call(command)
# Command output should be in name string format (no-name + UUID).
study_name = str(subprocess.check_output(command).decode().strip())
name_re = r'^no-name-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$'
assert re.match(name_re, study_name) is not None
# study_name should be stored in storage.
study_id = storage.get_study_id_from_name(study_name)
assert study_id == 2
def test_create_study_command_with_study_name():
# type: () -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
study_name = 'test_study'
# Create study with name.
command = ['optuna', 'create-study', '--storage', storage_url, '--study-name', study_name]
study_name = str(subprocess.check_output(command).decode().strip())
# Check if study_name is stored in the storage.
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_name_from_id(study_id) == study_name
def test_create_study_command_without_storage_url():
# type: () -> None
dummy_home = tempfile.mkdtemp()
env = os.environ
env['HOME'] = dummy_home
with pytest.raises(subprocess.CalledProcessError) as err:
subprocess.check_output(['optuna', 'create-study'], env=env)
usage = err.value.output.decode()
assert usage.startswith('usage:')
shutil.rmtree(dummy_home)
def test_create_study_command_with_direction():
# type: () -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
command = ['optuna', 'create-study', '--storage', storage_url, '--direction', 'minimize']
study_name = str(subprocess.check_output(command).decode().strip())
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_direction(study_id) == optuna.structs.StudyDirection.MINIMIZE
command = ['optuna', 'create-study', '--storage', storage_url, '--direction', 'maximize']
# Currently, 'maximize' is not implemented.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(command)
command = ['optuna', 'create-study', '--storage', storage_url, '--direction', 'test']
# --direction should be either 'minimize' or 'maximize'.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(command)
@pytest.mark.parametrize('options', [['storage'], ['config'], ['storage', 'config']])
def test_study_set_user_attr_command(options):
# type: (List[str]) -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
# Create study.
study_name = storage.get_study_name_from_id(storage.create_new_study_id())
base_command = ['optuna', 'study', 'set-user-attr', '--study', study_name]
base_command = _add_option(base_command, '--storage', storage_url, 'storage' in options)
base_command = _add_option(base_command, '--config', config_path, 'config' in options)
example_attrs = {'architecture': 'ResNet', 'baselen_score': '0.002'}
for key, value in example_attrs.items():
subprocess.check_call(base_command + ['--key', key, '--value', value])
# Attrs should be stored in storage.
study_id = storage.get_study_id_from_name(study_name)
study_user_attrs = storage.get_study_user_attrs(study_id)
assert len(study_user_attrs) == 2
assert all([study_user_attrs[k] == v for k, v in example_attrs.items()])
@pytest.mark.parametrize('options', [['storage'], ['config'], ['storage', 'config']])
def test_studies_command(options):
# type: (List[str]) -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
# First study.
study_1 = optuna.create_study(storage)
# Second study.
study_2 = optuna.create_study(storage, study_name='study_2')
study_2.optimize(objective_func, n_trials=10)
# Run command.
command = ['optuna', 'studies']
command = _add_option(command, '--storage', storage_url, 'storage' in options)
command = _add_option(command, '--config', config_path, 'config' in options)
output = str(subprocess.check_output(command).decode().strip())
rows = output.split('\n')
def get_row_elements(row_index):
# type: (int) -> List[str]
return [r.strip() for r in rows[row_index].split('|')[1: -1]]
assert len(rows) == 6
assert tuple(get_row_elements(1)) == Studies._study_list_header
# Check study_name and n_trials for the first study.
elms = get_row_elements(3)
assert elms[0] == study_1.study_name
assert elms[2] == '0'
# Check study_name and n_trials for the second study.
elms = get_row_elements(4)
assert elms[0] == study_2.study_name
assert elms[2] == '10'
def test_create_study_command_with_skip_if_exists():
# type: () -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
study_name = 'test_study'
# Create study with name.
command = ['optuna', 'create-study', '--storage', storage_url, '--study-name', study_name]
study_name = str(subprocess.check_output(command).decode().strip())
# Check if study_name is stored in the storage.
study_id = storage.get_study_id_from_name(study_name)
assert storage.get_study_name_from_id(study_id) == study_name
# Try to create the same name study without `--skip-if-exists` flag (error).
command = ['optuna', 'create-study', '--storage', storage_url, '--study-name', study_name]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(command)
# Try to create the same name study with `--skip-if-exists` flag (OK).
command = ['optuna', 'create-study', '--storage',
storage_url, '--study-name', study_name, '--skip-if-exists']
study_name = str(subprocess.check_output(command).decode().strip())
new_study_id = storage.get_study_id_from_name(study_name)
assert study_id == new_study_id # The existing study instance is reused.
@pytest.mark.parametrize('options', [['storage'], ['config'], ['storage', 'config']])
def test_dashboard_command(options):
# type: (List[str]) -> None
with \
StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path), \
tempfile.NamedTemporaryFile('r') as tf_report:
storage = RDBStorage(storage_url)
study_name = storage.get_study_name_from_id(storage.create_new_study_id())
command = ['optuna', 'dashboard', '--study', study_name, '--out', tf_report.name]
command = _add_option(command, '--storage', storage_url, 'storage' in options)
command = _add_option(command, '--config', config_path, 'config' in options)
subprocess.check_call(command)
html = tf_report.read()
assert '<body>' in html
assert 'bokeh' in html
# An example of objective functions for testing study optimize command
def objective_func(trial):
# type: (Trial) -> float
x = trial.suggest_uniform('x', -10, 10)
return (x + 5) ** 2
@pytest.mark.parametrize('options', [['storage'], ['config'], ['storage', 'config']])
def test_study_optimize_command(options):
# type: (List[str]) -> None
with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
storage = RDBStorage(storage_url)
study_name = storage.get_study_name_from_id(storage.create_new_study_id())
command = ['optuna', 'study', 'optimize', '--study', study_name, '--n-trials', '10',
__file__, 'objective_func']
command = _add_option(command, '--storage', storage_url, 'storage' in options)
command = _add_option(command, '--config', config_path, 'config' in options)
subprocess.check_call(command)
study = optuna.Study(storage=storage_url, study_name=study_name)
assert len(study.trials) == 10
assert 'x' in study.best_params
# Check if a default value of study_name is stored in the storage.
assert storage.get_study_name_from_id(study.study_id).startswith(DEFAULT_STUDY_NAME_PREFIX)
def test_study_optimize_command_inconsistent_args():
# type: () -> None
with tempfile.NamedTemporaryFile() as tf:
db_url = 'sqlite:///{}'.format(tf.name)
# --study argument is missing.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(['optuna', 'study', 'optimize', '--storage', db_url,
'--n-trials', '10', __file__, 'objective_func'])
def test_empty_argv():
# type: () -> None
command_empty = ['optuna']
command_empty_output = str(subprocess.check_output(command_empty))
command_help = ['optuna', 'help']
command_help_output = str(subprocess.check_output(command_help))
assert command_empty_output == command_help_output
def test_get_storage_url(tmpdir):
# type: (py.path.local) -> None
storage_in_args = 'sqlite:///args.db'
storage_in_config = 'sqlite:///config.db'
sample_config_file = tmpdir.join('optuna.yml')
sample_config_file.write('default_storage: {}'.format(storage_in_config))
sample_config = optuna.config.load_optuna_config(str(sample_config_file))
default_config = optuna.config.load_optuna_config(None)
# storage_url has priority over config_path.
assert storage_in_args == optuna.cli.get_storage_url(storage_in_args, sample_config)
assert storage_in_args == optuna.cli.get_storage_url(storage_in_args, default_config)
assert storage_in_config == optuna.cli.get_storage_url(None, sample_config)
# Config file does not have default_storage key.
empty_config_file = tmpdir.join('empty.yml')
empty_config_file.write('')
empty_config = optuna.config.load_optuna_config(str(empty_config_file))
with pytest.raises(CLIUsageError):
optuna.cli.get_storage_url(None, empty_config)
|
from pynput.keyboard import Key, Controller, Listener
import pynput
import keyboard
import time
class MyKeyboard:
controller = Controller()
geral = []
def on_press(self, key):
print('{0} pressed'.format(
key))
self.geral.append({"pressed": key})
def on_release(self, key):
self.geral.append({"released": key})
if key == Key.esc:
return False
print('{0} release'.format(
key))
def listen(self):
with Listener(self.on_press, self.on_release) as listener:
listener.join()
try:
listener.wait()
finally:
listener.stop()
def playit(self):
print('*REPLAYING*')
for dict_key in self.geral:
tipo, tecla = list(dict_key.items())[0]
if tipo == 'released':
self.controller.release(tecla)
elif tipo == 'pressed':
self.controller.press(tecla)
dale = MyKeyboard()
dale.listen()
dale.playit()
|
# Copyright 2017 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
def parametrize_decorator(arg_names, arg_values_list):
"""Parametrize a session.
Add new invocations to the underlying session function using the list of
``arg_values_list`` for the given ``arg_names``. Parametrization is
performed during session discovery and each invocation appears as a
separate session to nox.
Args:
arg_names (Sequence[str]): A list of argument names.
arg_values_list (Sequence[Union[Any, Tuple]]): The list of argument
values determines how often a session is invoked with different
argument values. If only one argument names was specified then
this is a simple list of values, for example ``[1, 2, 3]``. If N
argument names were specified, this must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argument name, for example ``[(1, 'a'), (2, 'b')]``.
"""
# Allow args to be specified as any of 'arg', 'arg,arg2' or ('arg', 'arg2')
if not isinstance(arg_names, (list, tuple)):
arg_names = list(filter(None, [arg.strip() for arg in arg_names.split(",")]))
# If there's only one arg_name, arg_values_list should be a single item
# or list. Transform it so it'll work with the combine step.
if len(arg_names) == 1:
# In this case, the arg_values_list can also just be a single item.
if not isinstance(arg_values_list, (list, tuple)):
arg_values_list = [arg_values_list]
arg_values_list = [[value] for value in arg_values_list]
# Combine arg names and values into a list of dictionaries. These are
# 'call specs' that will be used to generate calls.
# [{arg: value1}, {arg: value2}, ...]
call_specs = []
for arg_values in arg_values_list:
call_spec = dict(zip(arg_names, arg_values))
call_specs.append(call_spec)
def inner(f):
previous_call_specs = getattr(f, "parametrize", None)
new_call_specs = update_call_specs(previous_call_specs, call_specs)
setattr(f, "parametrize", new_call_specs)
return f
return inner
def update_call_specs(call_specs, new_specs):
if not call_specs:
call_specs = [{}]
combined_specs = []
for new_spec in new_specs:
for spec in call_specs:
spec = spec.copy()
spec.update(new_spec)
combined_specs.append(spec)
return combined_specs
def generate_session_signature(func, call_spec):
args = ["{}={}".format(k, repr(call_spec[k])) for k in sorted(call_spec.keys())]
return "({})".format(", ".join(args))
def generate_calls(func, call_specs):
calls = []
for call_spec in call_specs:
def make_call_wrapper(call_spec):
@functools.wraps(func)
def call_wrapper(*args, **kwargs):
kwargs.update(call_spec)
return func(*args, **kwargs)
return call_wrapper
call = make_call_wrapper(call_spec)
call.session_signature = generate_session_signature(func, call_spec)
call.call_spec = call_spec
calls.append(call)
return calls
|
"""
Definitions of the tier 6 food items: Melon, Mushroom, Pizza, and Steak.
"""
__all__ = ['Melon', 'Mushroom', 'Pizza', 'Steak']
# Local application imports
from gym_snape.game.food import Food
# Third party imports
import numpy as np
class Melon(Food):
def __init__(self):
super().__init__()
self._name = 'MELON'
def on_use(self, index):
"""Gives a deck pet the melon armor effect."""
if self._deck[index]:
self._deck[index].effect = 'Mln'
self._last_op_success = True
else:
self._last_op_success = False
class Mushroom(Food):
def __init__(self):
super().__init__()
self._name = 'MUSHROOM'
def on_use(self, index):
"""Gives a deck pet the extra life effect."""
if self._deck[index]:
self._deck[index].effect = '1up'
self._last_op_success = True
else:
self._last_op_success = False
class Pizza(Food):
def __init__(self):
super().__init__()
self._name = 'PIZZA'
self.attack = 2
self.health = 2
def on_use(self, *args, **kwargs):
"""Give 2 random animals +2/+2."""
choices = [pet for pet in self._deck if pet]
n_chosen = min(len(choices), 2)
if n_chosen >= 1:
chosen = np.random.choice(choices, n_chosen, replace=False)
for c in chosen:
c.attack += self.attack
c.health += self.health
self._last_op_success = True
else:
self._last_op_success = False
class Steak(Food):
def __init__(self):
super().__init__()
self._name = 'STEAK'
def on_use(self, index):
"""Give an animal the Steak Attack effect."""
if self._deck[index]:
self._deck[index].effect = 'Stk'
self._last_op_success = True
else:
self._last_op_success = False
|
from setuptools import setup
from buildconf import BuildUnrarCommand
from distutils.command.build import build
with open("README.md", "r") as fh:
long_description = fh.read()
class BuildUnrarBeforeBuild(build):
def run(self):
self.run_command('build_unrar')
build.run(self)
setup(
cmdclass={
'build': BuildUnrarBeforeBuild,
'build_unrar': BuildUnrarCommand
},
name='unrar-cffi',
license='apache-2.0',
description='Read RAR file from python -- cffi edition',
long_description=long_description,
long_description_content_type="text/markdown",
author="Davide Romanini",
author_email="davide.romanini@gmail.com",
url="https://github.com/davide-romanini/unrar-cffi",
keywords=["rar", "unrar", "archive", "cffi"],
use_scm_version=True,
packages=(
'unrar.cffi',
),
install_requires=[
"cffi"
],
setup_requires=[
"cffi",
"pytest-runner",
"wheel",
"setuptools_scm"
],
tests_require=[
"pytest"
],
package_dir={
'unrar.cffi': 'unrar/cffi'
},
package_data={
'unrar.cffi': ['*.dll']
},
include_package_data=True,
cffi_modules=["buildconf.py:create_builder"],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
python_requires='>=3.4'
) |
import os
import sys
print("--- SOME DIAGNOSTIC VALUES: ---")
print("\nsys.executable: ", sys.executable)
print("\ncwd (Current Working Directory): ", os.getcwd())
print("\nsys.path: ", sys.path)
print("\nsys.path again, this time on separate lines: ")
for p in sys.path:
print(" ", p)
|
#!/usr/bin/env python
from mil_msgs.msg import ObjectInImage
from std_msgs.msg import Header
import cv2
import numpy as np
from mil_msgs.msg import Point2D
class Overlay:
def __init__(self, header,object_in_image, color = (0,255,0), brush = 3, font = cv2.FONT_HERSHEY_SIMPLEX, font_scale=1, *args, **kwargs):
self.header = header
self.object = object_in_image
self.shape = ""
self.color = color
self.brush = brush
self.font = font
self.font_scale = font_scale
if len(self.object.points)==0:
self.shape = "none"
elif len(self.object.points)==1:
self.shape = "point"
elif len(self.object.points)==2:
self.shape = "rectangle"
elif len(self.object.points)>2:
self.shape = "polygon"
def draw_on(self, img):
if self.shape == "none":
self.object.points = [None]*1
self.object.points[0] = Point2D()
self.object.points[0].x = 0
self.object.points[0].y = 40
img = self.draw_none(img)
elif self.shape == "point":
img = self.draw_point(img)
elif self.shape == "rectangle":
img = self.draw_rectangle(img)
elif self.shape == "polygon":
img = self.draw_polygon(img)
#TODO: make sure text wont write over other text
p0 = (int(self.object.points[0].x),int(self.object.points[0].y))
cv2.putText(img,self.object.name,p0, self.font, self.font_scale ,self.color,self.brush,cv2.LINE_AA)
return img
def draw_none(self,img):
return img
def draw_point(self,img):
p = (int(self.object.points[0].x),int(self.object.points[0].y))
img = cv2.circle(img, p, self.brush, self.color,-1)
return img
def draw_rectangle(self,img):
p0 = (int(self.object.points[0].x),int(self.object.points[0].y))
p1 = (int(self.object.points[1].x),int(self.object.points[1].y))
img = cv2.rectangle(img, p0,p1,self.color,self.brush)
return img
def draw_polygon(self,img):
p = []
for i in self.object.points:
p.append([int(i.x),int(i.y)])
pts = np.array(p, np.int32)
pts = pts.reshape((-1,1,2))
img = cv2.polylines(img,[pts],True,self.color,self.brush)
return img
|
#!/usr/bin/env python
import argparse
import logging
from dataclasses import dataclass, field
from typing import List
from rl_rpsr import bsr, pomdp, psr, rpsr
from rl_rpsr.policy import ModelPolicy, Policy, RandomPolicy
from rl_rpsr.serializer import IntentsSerializer, TestsSerializer, VF_Serializer
def make_policy(models, pomdp_model, args) -> Policy:
if args.policy == 'random':
policy = RandomPolicy(pomdp_model)
else:
serializer = VF_Serializer()
if args.policy == 'bsr':
vf = serializer.load(args.load_vf_bsr)
elif args.policy == 'psr':
vf = serializer.load(args.load_vf_psr)
elif args.policy == 'rpsr':
vf = serializer.load(args.load_vf_rpsr)
model = models[args.policy]
policy = ModelPolicy(model, vf)
return policy
def return_(rewards, discount):
G, d = 0.0, 1.0
for r in rewards:
G += r * d
d *= discount
return G
@dataclass
class Simulation:
actions: List[int] = field(default_factory=list)
rewards: List[int] = field(default_factory=list)
observations: List[int] = field(default_factory=list)
def append(self, action, reward, observation):
self.actions.append(action)
self.rewards.append(reward)
self.observations.append(observation)
def simulate(
env, policy, num_steps
) -> Simulation: # pylint: disable=too-many-locals
logger = logging.getLogger(__name__)
sim = Simulation()
env.reset()
action = policy.reset()
for _ in range(num_steps - 1):
_, reward, _, info = env.step(action)
observation = info['observation']
logger.info(
'action %s observation %s reward %f',
env.model.actions[action],
env.model.observations[observation],
reward,
)
sim.append(action, reward.item(), observation)
action = policy.step(action, observation)
return sim
def model_rewards(model, actions, observations):
rewards = []
state = model.start.copy()
for action, observation in zip(actions, observations):
rewards.append(model.expected_reward(state, action))
state = model.dynamics(state, action, observation)
return rewards
def main_eval(args):
logger = logging.getLogger(__name__)
logger.info('rl-psr-eval with args %s', args)
pomdp_model = pomdp.POMDP_Model.make(args.pomdp)
models = {}
models['bsr'] = bsr.BSR_Model(pomdp_model)
if args.load_core_psr is not None:
Q = TestsSerializer().load(args.load_core_psr)
models['psr'] = psr.PSR_Model(pomdp_model, Q)
if args.load_core_rpsr is not None:
I = IntentsSerializer().load(args.load_core_rpsr)
models['rpsr'] = rpsr.RPSR_Model(pomdp_model, I)
if args.env == 'bsr':
env = bsr.BSR(models['bsr'])
elif args.env == 'psr':
env = psr.PSR(models['psr'])
elif args.env == 'rpsr':
env = rpsr.RPSR(models['rpsr'])
policy = make_policy(models, pomdp_model, args)
logger.info('pomdp %s env %s policy %s', args.pomdp, args.env, args.policy)
for i in range(args.num_simulations):
logger.info('simulation %d / %d', i, args.num_simulations)
sim = simulate(env, policy, num_steps=args.num_steps)
for key, model in models.items():
rewards = model_rewards(model, sim.actions, sim.observations)
s = f'{args.env} {args.policy} {key} {return_(rewards, env.discount)}'
logger.info(s)
print(s)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('pomdp')
parser.add_argument('env', choices=['bsr', 'psr', 'rpsr'])
parser.add_argument(
'policy', choices=['random', 'bsr', 'psr', 'rpsr'], default='random'
)
parser.add_argument('--load-core-psr', default=None)
parser.add_argument('--load-core-rpsr', default=None)
parser.add_argument('--load-vf-bsr', default=None)
parser.add_argument('--load-vf-psr', default=None)
parser.add_argument('--load-vf-rpsr', default=None)
parser.add_argument('--num-steps', type=int, default=1000)
parser.add_argument('--num-simulations', type=int, default=1)
parser.add_argument('--log-filename', default=None)
parser.add_argument(
'--log-level',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'],
default='INFO',
)
args = parser.parse_args()
if args.env == 'psr' and args.load_core_psr is None:
parser.error(
'argument --env: invalid choice: \'psr\' (psr model not loaded)'
)
if args.env == 'rpsr' and args.load_core_rpsr is None:
parser.error(
'argument --env: invalid choice: \'rpsr\' (rpsr model not loaded)'
)
if args.policy == 'psr' and args.load_core_psr is None:
parser.error(
'argument --policy: invalid choice: \'psr\' (psr model not loaded)'
)
if args.policy == 'rpsr' and args.load_core_rpsr is None:
parser.error(
'argument --policy: invalid choice: \'rpsr\' (rpsr model not loaded)'
)
if args.policy == 'bsr' and args.load_vf_bsr is None:
parser.error(
'argument --policy: invalid choice: \'bsr\' (bsr vf not loaded)'
)
if args.policy == 'psr' and args.load_vf_psr is None:
parser.error(
'argument --policy: invalid choice: \'psr\' (psr vf not loaded)'
)
if args.policy == 'rpsr' and args.load_vf_rpsr is None:
parser.error(
'argument --policy: invalid choice: \'rpsr\' (rpsr vf not loaded)'
)
if args.log_filename is not None:
logging.basicConfig(
filename=args.log_filename,
datefmt='%Y/%m/%d %H:%M:%S',
format='%(asctime)s %(relativeCreated)d %(levelname)-8s %(name)-12s %(funcName)s - %(message)s',
level=getattr(logging, args.log_level),
)
try:
main_eval(args)
except:
logger = logging.getLogger(__name__)
logger.exception('The program raised an uncaught exception')
raise
if __name__ == '__main__':
main()
|
# coding: utf-8
# # The Basal Ganglia
#
# The basal ganglia
# according to [Stewart 2010](http://compneuro.uwaterloo.ca/files/publications/stewart.2010.pdf)
# is an action selector
# that chooses whatever action has the best "salience" or "goodness".
# Its really interesting behaviour manifests itself
# when it interacts with the thalamus and other components of the brain,
# but in this example we will only show the basal ganglia's basic behaviour.
# It will choose between three actions
# that we'll pretend are "eating", "sleeping" and "playing".
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import nengo
# ## Step 1: Create the Network
# Here we create the basal ganglia and the action input node.
# In[ ]:
model = nengo.Network(label='Basal Ganglia')
with model:
basal_ganglia = nengo.networks.BasalGanglia(dimensions=3)
class ActionIterator(object):
def __init__(self, dimensions):
self.actions = np.ones(dimensions) * 0.1
def step(self, t):
# one action at time dominates
dominate = int(t % 3)
self.actions[:] = 0.1
self.actions[dominate] = 0.8
return self.actions
action_iterator = ActionIterator(dimensions=3)
with model:
actions = nengo.Node(action_iterator.step, label="actions")
# ## Step 2: Connect the Network
# Connect the input to the basal ganglia and connect the probes
# In[ ]:
with model:
nengo.Connection(actions, basal_ganglia.input, synapse=None)
selected_action = nengo.Probe(basal_ganglia.output, synapse=0.01)
input_actions = nengo.Probe(actions, synapse=0.01)
# ## Step 3: Simulate the Network and Plot the Results
# In[ ]:
with nengo.Simulator(model) as sim:
# This will take a while
sim.run(6)
# In[ ]:
plt.subplot(2, 1, 1)
plt.plot(sim.trange(), sim.data[input_actions].argmax(axis=1))
plt.ylim(-0.1, 2.1)
plt.xlabel('time [s]')
plt.title("Index of actual max value")
plt.subplot(2, 1, 2)
plt.plot(sim.trange(), sim.data[selected_action].argmax(axis=1))
plt.ylim(-0.1, 2.1)
plt.xlabel('time [s]')
plt.title("Basal ganglia selected max value")
plt.tight_layout()
# As expected, the maximum index
# is found at 0, then 1, then 2
# or "eating", "sleeping", then "playing".
# Note that if you zoom in enough on the basal ganglia values,
# you'll be able to see a bit of a delay between finding max values.
# If you read the aforementioned paper,
# you'll see that this is expected and matches previous experiments.
|
import xmltodict
settings = dict()
def load_settings():
global settings
with open('Data/settings.xml') as f:
settings = xmltodict.parse(f.read())['settings']
return settings
def check_min_setup():
param_missing = False
if not get_app_token():
param_missing = True
print('app_token not defined')
if not get_client_id():
param_missing = True
print('client_id not defined')
if not get_client_secret():
param_missing = True
print('client_secret not defined')
if not get_bot_account():
param_missing = True
print('bot_account not defined')
if not get_channel():
param_missing = True
print('channel not defined')
if param_missing:
#input('\nPress ENTER to exit')
quit()
def get_app_token():
return settings['bot_setup']['app_token']
def get_client_id():
return settings['bot_setup']['client_id']
def get_client_secret():
return settings['bot_setup']['client_secret']
def get_bot_account():
return settings['bot_setup']['bot_account']
def get_prefix():
return settings['bot_setup']['command_prefix']
def get_channel():
return settings['bot_setup']['channel']
def get_logging():
return settings['bot_setup']['logging']
def get_random_tf_id():
return settings['bot_setup']['custom_rewards']['random_tf']
def get_direct_tf_id():
return settings['bot_setup']['custom_rewards']['direct_tf']
def get_periodic_messages():
return settings['bot_setup']['scheduled_messages']['messages']
def configure_periodic_messages():
msg_list = settings['bot_setup']['scheduled_messages']['message']
settings['bot_setup']['scheduled_messages'].pop('message')
settings['bot_setup']['scheduled_messages'].update({'messages' : {}})
if msg_list:
msg_no = 0
if isinstance(msg_list, str):
settings['bot_setup']['scheduled_messages']['messages'].update({'1' : msg_list})
else:
for msg in msg_list:
if msg:
msg_no = len(settings['bot_setup']['scheduled_messages']['messages']) + 1
settings['bot_setup']['scheduled_messages']['messages'].update({str(msg_no) : msg})
return
def add_periodic_message(msg):
msg_dict = get_periodic_messages()
msg_no = len(msg_dict) + 1
settings['bot_setup']['scheduled_messages']['messages'].update({str(msg_no) : msg})
return
def remove_periodic_message(msg_no):
msg_dict = get_periodic_messages()
if msg_no in msg_dict.keys():
settings['bot_setup']['scheduled_messages']['messages'].pop(msg_no)
return True
else:
return False
def get_periodic_timer():
if not settings['bot_setup']['scheduled_messages']['message_interval_minutes']:
settings['bot_setup']['scheduled_messages'].update({'message_interval_minutes' : 15})
return settings['bot_setup']['scheduled_messages']['message_interval_minutes']
def set_periodic_timer(time):
settings['bot_setup']['scheduled_messages']['message_interval_minutes'] = time
def basics_enabled():
enable = False
value = settings['modules']['basics']['enable']
if value.lower() == 'true':
enable = True
return enable
def get_twitter():
return settings['modules']['basics']['twitter']
def get_discord():
return settings['modules']['basics']['discord']
def emotes_enabled():
enable = False
value = settings['modules']['emotes']['enable']
if value.lower() == 'true':
enable = True
return enable
def get_oof():
return settings['modules']['emotes']['haurbuOof']
def get_heart():
return settings['modules']['emotes']['haurbuHeart']
def raffle_enabled():
enable = False
value = settings['modules']['raffle']['enable']
if value.lower() == 'true':
enable = True
return enable
def get_raffle_reminder_interval():
return settings['modules']['raffle']['reminder_interval_minutes']
def quotes_enabled():
enable = False
value = settings['modules']['quotes']['enable']
if value.lower() == 'true':
enable = True
return enable
def vip_quotes_allowed():
allowed = False
value = settings['modules']['quotes']['allow_vip']
if value.lower() == 'true':
allowed = True
return allowed
def tf_enabled():
enable = False
value = settings['modules']['tf']['enable']
if value.lower() == 'true':
enable = True
return enable
def rimworld_enabled():
enable = False
value = settings['modules']['rimworld']['enable']
if value.lower() == 'true':
enable = True
return enable
def get_toolkit_path():
return settings['modules']['rimworld']['toolkit_path']
def get_rimworld_mods():
return settings['modules']['rimworld']['mods']
def avorion_enabled():
enable = False
value = settings['modules']['avorion']['enable']
if value.lower() == 'true':
enable = True
return enable
def get_avorion_link():
return settings['modules']['avorion']['profile_link']
def counter_enabled():
enable = False
value = settings['modules']['counter']['enable']
if value.lower() == 'true':
enable = True
return enable
def vip_counter_allowed():
allowed = False
value = settings['modules']['counter']['allow_vip']
if value.lower() == 'true':
allowed = True
return allowed |
import pandas as pd
from datetime import datetime, date, timedelta
import numpy as np
import os
from pathlib import Path
from pandas.core.frame import DataFrame
from ETL import DataLoader
import logging, coloredlogs
import dicts_and_lists as dal
pd.options.mode.chained_assignment = None
# ------ Logger ------- #
logger = logging.getLogger('DataTransformer.py')
coloredlogs.install(level='DEBUG')
class Transformation():
"""
Transformation represents the second module in the ETL pipeline.
Data passed in this method is polished, arranged and organized in specific columns.
"""
def __init__(self, folder) -> None:
self.folder = folder
def polish_df_month(self, df_month:DataFrame, current_month:str):
df_month = df_month.rename(columns=
{
'Visitor/Neutral' : 'AwayTeam',
'Home/Neutral' : 'HomeTeam',
'PTS' : 'AwayPoints',
'PTS.1' : 'HomePoints'
}
)
df_month = df_month.drop(['Unnamed: 6', 'Unnamed: 7', 'Attend.', 'Notes'], axis=1) # Remove non interesting columns
df_month = df_month.dropna(subset=['AwayPoints', 'HomePoints']) # Remove rows containing games not yet played
csv_path = Path(os.getcwd() + '/' + self.folder + current_month + '_data.csv')
return df_month, csv_path
def append_stats_per_game(self, df:DataFrame, team:str):
dal.data_dict['Team'].append(team)
dal.data_dict['MP'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'MP')]))
dal.data_dict['FG'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'FG')]))
dal.data_dict['FGA'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'FGA')]))
dal.data_dict['FG%'].append(float(df.loc[df.index[-1], ('Basic Box Score Stats', 'FG%')]))
dal.data_dict['3P'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', '3P')]))
dal.data_dict['3PA'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', '3PA')]))
dal.data_dict['3P%'].append(float(df.loc[df.index[-1], ('Basic Box Score Stats', '3P%')]))
dal.data_dict['FT'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'FT')]))
dal.data_dict['FTA'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'FTA')]))
dal.data_dict['FT%'].append(float(df.loc[df.index[-1], ('Basic Box Score Stats', 'FT%')]))
dal.data_dict['ORB'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'ORB')]))
dal.data_dict['DRB'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'DRB')]))
dal.data_dict['TRB'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'TRB')]))
dal.data_dict['AST'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'AST')]))
dal.data_dict['STL'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'STL')]))
dal.data_dict['BLK'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'BLK')]))
dal.data_dict['TOV'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'TOV')]))
dal.data_dict['PF'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'PF')]))
dal.data_dict['PTS'].append(int(df.loc[df.index[-1], ('Basic Box Score Stats', 'PTS')]))
dal.data_dict['+/-'].append(df.loc[df.index[-1], ('Basic Box Score Stats', '+/-')])
def assign_teams_data_to_df(self, df:DataFrame):
df['Team'] = dal.data_dict['Team']
df['MP'] = dal.data_dict['MP']
df['FG'] = dal.data_dict['FG']
df['FGA'] = dal.data_dict['FGA']
df['FG%'] = dal.data_dict['FG%']
df['3P'] = dal.data_dict['3P']
df['3PA'] = dal.data_dict['3PA']
df['3P%'] = dal.data_dict['3P%']
df['FT'] = dal.data_dict['FT']
df['FTA'] = dal.data_dict['FTA']
df['FT%'] = dal.data_dict['FT%']
df['ORB'] = dal.data_dict['ORB']
df['DRB'] = dal.data_dict['DRB']
df['TRB'] = dal.data_dict['TRB']
df['AST'] = dal.data_dict['AST']
df['STL'] = dal.data_dict['STL']
df['BLK'] = dal.data_dict['BLK']
df['TOV'] = dal.data_dict['TOV']
df['PF'] = dal.data_dict['PF']
df['PTS'] = dal.data_dict['PTS']
df['+/-'] = dal.data_dict['+/-']
def split_stats_per_game(self):
"""
Starting from stats_per_game.csv, create a file containing on each row the stats from the two teams.
"""
df = pd.read_csv(self.folder + 'stats_per_game.csv', index_col=False)
spg_away:DataFrame = df.iloc[::2]
spg_home:DataFrame = df.iloc[1::2]
spg_away = spg_away.drop(['+/-'], axis=1)
spg_home = spg_home.drop(['+/-'], axis=1)
spg_away = spg_away.rename(dal.spg_away, axis=1)
spg_home = spg_home.rename(dal.spg_home, axis=1)
spg_away.reset_index(drop=True, inplace=True)
spg_home.reset_index(drop=True, inplace=True)
df = pd.concat([spg_away, spg_home], axis=1)
# Assign a column containing the winner: 0 = Home, 1 = Away
df = df.assign(Winner = 0) # Set the winner as the Home Team
df['Winner'].loc[df['PTS_away'] > df['PTS_home']] = 1 # Change to Away if PTS_away > PTS_home
# Assign the date per single game based on past season DataFrame
season_df = pd.read_csv(self.folder + '2021_2022_season.csv', index_col=False)
df.insert(loc=0, column='Date', value=season_df['Date'])
Loading = DataLoader.Loading(self.folder)
Loading.save_split_stats_per_game(df) |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ResponsePolicyRuleArgs', 'ResponsePolicyRule']
@pulumi.input_type
class ResponsePolicyRuleArgs:
def __init__(__self__, *,
response_policy: pulumi.Input[str],
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResponsePolicyRule resource.
:param pulumi.Input['ResponsePolicyRuleBehavior'] behavior: Answer this query with a behavior rather than DNS data.
:param pulumi.Input[str] dns_name: The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
:param pulumi.Input['ResponsePolicyRuleLocalDataArgs'] local_data: Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
:param pulumi.Input[str] rule_name: An identifier for this rule. Must be unique with the ResponsePolicy.
"""
pulumi.set(__self__, "response_policy", response_policy)
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if client_operation_id is not None:
pulumi.set(__self__, "client_operation_id", client_operation_id)
if dns_name is not None:
pulumi.set(__self__, "dns_name", dns_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if local_data is not None:
pulumi.set(__self__, "local_data", local_data)
if project is not None:
pulumi.set(__self__, "project", project)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
@property
@pulumi.getter(name="responsePolicy")
def response_policy(self) -> pulumi.Input[str]:
return pulumi.get(self, "response_policy")
@response_policy.setter
def response_policy(self, value: pulumi.Input[str]):
pulumi.set(self, "response_policy", value)
@property
@pulumi.getter
def behavior(self) -> Optional[pulumi.Input['ResponsePolicyRuleBehavior']]:
"""
Answer this query with a behavior rather than DNS data.
"""
return pulumi.get(self, "behavior")
@behavior.setter
def behavior(self, value: Optional[pulumi.Input['ResponsePolicyRuleBehavior']]):
pulumi.set(self, "behavior", value)
@property
@pulumi.getter(name="clientOperationId")
def client_operation_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_operation_id")
@client_operation_id.setter
def client_operation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_operation_id", value)
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> Optional[pulumi.Input[str]]:
"""
The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
"""
return pulumi.get(self, "dns_name")
@dns_name.setter
def dns_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="localData")
def local_data(self) -> Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']]:
"""
Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
"""
return pulumi.get(self, "local_data")
@local_data.setter
def local_data(self, value: Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']]):
pulumi.set(self, "local_data", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
An identifier for this rule. Must be unique with the ResponsePolicy.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
class ResponsePolicyRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
response_policy: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new Response Policy Rule.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['ResponsePolicyRuleBehavior'] behavior: Answer this query with a behavior rather than DNS data.
:param pulumi.Input[str] dns_name: The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
:param pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']] local_data: Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
:param pulumi.Input[str] rule_name: An identifier for this rule. Must be unique with the ResponsePolicy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResponsePolicyRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new Response Policy Rule.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param ResponsePolicyRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResponsePolicyRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
response_policy: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResponsePolicyRuleArgs.__new__(ResponsePolicyRuleArgs)
__props__.__dict__["behavior"] = behavior
__props__.__dict__["client_operation_id"] = client_operation_id
__props__.__dict__["dns_name"] = dns_name
__props__.__dict__["kind"] = kind
__props__.__dict__["local_data"] = local_data
__props__.__dict__["project"] = project
if response_policy is None and not opts.urn:
raise TypeError("Missing required property 'response_policy'")
__props__.__dict__["response_policy"] = response_policy
__props__.__dict__["rule_name"] = rule_name
super(ResponsePolicyRule, __self__).__init__(
'google-native:dns/v1beta2:ResponsePolicyRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResponsePolicyRule':
"""
Get an existing ResponsePolicyRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResponsePolicyRuleArgs.__new__(ResponsePolicyRuleArgs)
__props__.__dict__["behavior"] = None
__props__.__dict__["dns_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["local_data"] = None
__props__.__dict__["rule_name"] = None
return ResponsePolicyRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def behavior(self) -> pulumi.Output[str]:
"""
Answer this query with a behavior rather than DNS data.
"""
return pulumi.get(self, "behavior")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> pulumi.Output[str]:
"""
The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="localData")
def local_data(self) -> pulumi.Output['outputs.ResponsePolicyRuleLocalDataResponse']:
"""
Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
"""
return pulumi.get(self, "local_data")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Output[str]:
"""
An identifier for this rule. Must be unique with the ResponsePolicy.
"""
return pulumi.get(self, "rule_name")
|
"""trojsten_judge_client - Client for Trojsten Judge System."""
__author__ = 'Michal Hozza <mhozza@gmail.com>'
__all__ = []
|
from django import forms
from newExam.expense_app.models import Expense
class ExpenseForm(forms.ModelForm):
class Meta:
model = Expense
fields = '__all__'
widgets = {'description': forms.Textarea(attrs={'rows': 10, 'columns': 40})}
class DeleteExpenseForm(ExpenseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for _, field in self.fields.items():
field.widget.attrs['disabled'] = 'disabled'
|
import sys
from pathlib import Path
DAVIS16_DIR = Path('/home/achald/research/misc/datasets/davis/davis-2016/')
DAVIS17_DIR = Path('/home/achald/research/misc/datasets/davis/davis-2017/')
if not DAVIS16_DIR.exists():
raise ValueError(
"Could not find DAVIS 2016 repo at %s. Please edit the path in %s.",
DAVIS16_DIR, __file__)
if not DAVIS17_DIR.exists():
raise ValueError(
"Could not find DAVIS 2017 repo at %s. Please edit the path in %s.",
DAVIS17_DIR, __file__)
def add_davis16_to_sys_path():
sys.path.insert(0, str(DAVIS16_DIR / 'python' / 'lib'))
def add_davis17_to_sys_path():
sys.path.insert(0, str(DAVIS17_DIR / 'python' / 'lib'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import sys
if sys.version_info[0] == 2:
raise Exception('python3 required.')
install_requirements = [
'sanic==18.12.0',
'Jinja2==2.10'
]
setup(
name='Sanic_Jinja',
version='0.0.1',
url='https://github.com/htwenning/sanic-jinja',
license='MIT',
author='wenning',
author_email='ht.wenning@foxmail.com',
description='simple jinja2 template renderer for sanic',
packages=['sanic_jinja'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requirements,
) |
# stdlib
import socket
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
@attr(requires='riak')
class RiakTestCase(AgentCheckTest):
CHECK_NAME = 'riak'
CHECK_GAUGES = [
'riak.node_gets',
'riak.node_gets_total',
'riak.node_puts',
'riak.node_puts_total',
'riak.node_gets_counter',
'riak.node_gets_counter_total',
'riak.node_gets_set',
'riak.node_gets_set_total',
'riak.node_gets_map',
'riak.node_gets_map_total',
'riak.node_puts_counter',
'riak.node_puts_counter_total',
'riak.node_puts_set',
'riak.node_puts_set_total',
'riak.node_puts_map',
'riak.node_puts_map_total',
'riak.object_merge',
'riak.object_merge_total',
'riak.object_counter_merge',
'riak.object_counter_merge_total',
'riak.object_set_merge',
'riak.object_set_merge_total',
'riak.object_map_merge',
'riak.object_map_merge_total',
'riak.pbc_active',
'riak.pbc_connects',
'riak.pbc_connects_total',
'riak.read_repairs',
'riak.read_repairs_total',
'riak.skipped_read_repairs',
'riak.skipped_read_repairs_total',
'riak.read_repairs_counter',
'riak.read_repairs_counter_total',
'riak.read_repairs_set',
'riak.read_repairs_set_total',
'riak.read_repairs_map',
'riak.read_repairs_map_total',
'riak.node_get_fsm_active',
'riak.node_get_fsm_active_60s',
'riak.node_get_fsm_in_rate',
'riak.node_get_fsm_out_rate',
'riak.node_get_fsm_rejected',
'riak.node_get_fsm_rejected_60s',
'riak.node_get_fsm_rejected_total',
'riak.node_get_fsm_errors',
'riak.node_get_fsm_errors_total',
'riak.node_put_fsm_active',
'riak.node_put_fsm_active_60s',
'riak.node_put_fsm_in_rate',
'riak.node_put_fsm_out_rate',
'riak.node_put_fsm_rejected',
'riak.node_put_fsm_rejected_60s',
'riak.node_put_fsm_rejected_total',
'riak.riak_kv_vnodes_running',
'riak.vnode_gets',
'riak.vnode_gets_total',
'riak.vnode_puts',
'riak.vnode_puts_total',
'riak.vnode_counter_update',
'riak.vnode_counter_update_total',
'riak.vnode_set_update',
'riak.vnode_set_update_total',
'riak.vnode_map_update',
'riak.vnode_map_update_total',
'riak.vnode_index_deletes',
'riak.vnode_index_deletes_postings',
'riak.vnode_index_deletes_postings_total',
'riak.vnode_index_deletes_total',
'riak.vnode_index_reads',
'riak.vnode_index_reads_total',
'riak.vnode_index_refreshes',
'riak.vnode_index_refreshes_total',
'riak.vnode_index_writes',
'riak.vnode_index_writes_postings',
'riak.vnode_index_writes_postings_total',
'riak.vnode_index_writes_total',
'riak.dropped_vnode_requests_total',
'riak.search_index_fail_one',
'riak.search_index_fail_count',
'riak.search_index_throughput_one',
'riak.search_index_throughput_count',
'riak.search_query_fail_one',
'riak.search_query_fail_count',
'riak.search_query_throughput_one',
'riak.search_query_throughput_count',
'riak.list_fsm_active',
'riak.list_fsm_create',
'riak.list_fsm_create_total',
'riak.list_fsm_create_error',
'riak.list_fsm_create_error_total',
'riak.index_fsm_active',
'riak.index_fsm_create',
'riak.index_fsm_create_error',
'riak.riak_pipe_vnodes_running',
'riak.executing_mappers',
'riak.pipeline_active',
'riak.pipeline_create_count',
'riak.pipeline_create_error_count',
'riak.pipeline_create_error_one',
'riak.pipeline_create_one',
'riak.rings_reconciled',
'riak.rings_reconciled_total',
'riak.converge_delay_last',
'riak.converge_delay_max',
'riak.converge_delay_mean',
'riak.converge_delay_min',
'riak.rebalance_delay_last',
'riak.rebalance_delay_max',
'riak.rebalance_delay_mean',
'riak.rebalance_delay_min',
'riak.rejected_handoffs',
'riak.handoff_timeouts',
'riak.coord_redirs_total',
'riak.gossip_received',
'riak.ignored_gossip_total',
'riak.mem_allocated',
'riak.mem_total',
'riak.memory_atom',
'riak.memory_atom_used',
'riak.memory_binary',
'riak.memory_code',
'riak.memory_ets',
'riak.memory_processes',
'riak.memory_processes_used',
'riak.memory_system',
'riak.memory_total',
'riak.sys_monitor_count',
'riak.sys_port_count',
'riak.sys_process_count',
'riak.late_put_fsm_coordinator_ack',
'riak.postcommit_fail',
'riak.precommit_fail',
]
CHECK_GAUGES_STATS = [
'riak.node_get_fsm_counter_time_mean',
'riak.node_get_fsm_counter_time_median',
'riak.node_get_fsm_counter_time_95',
'riak.node_get_fsm_counter_time_99',
'riak.node_get_fsm_counter_time_100',
'riak.node_put_fsm_counter_time_mean',
'riak.node_put_fsm_counter_time_median',
'riak.node_put_fsm_counter_time_95',
'riak.node_put_fsm_counter_time_99',
'riak.node_put_fsm_counter_time_100',
'riak.node_get_fsm_set_time_mean',
'riak.node_get_fsm_set_time_median',
'riak.node_get_fsm_set_time_95',
'riak.node_get_fsm_set_time_99',
'riak.node_get_fsm_set_time_100',
'riak.node_put_fsm_set_time_mean',
'riak.node_put_fsm_set_time_median',
'riak.node_put_fsm_set_time_95',
'riak.node_put_fsm_set_time_99',
'riak.node_put_fsm_set_time_100',
'riak.node_get_fsm_map_time_mean',
'riak.node_get_fsm_map_time_median',
'riak.node_get_fsm_map_time_95',
'riak.node_get_fsm_map_time_99',
'riak.node_get_fsm_map_time_100',
'riak.node_put_fsm_map_time_mean',
'riak.node_put_fsm_map_time_median',
'riak.node_put_fsm_map_time_95',
'riak.node_put_fsm_map_time_99',
'riak.node_put_fsm_map_time_100',
'riak.node_get_fsm_counter_objsize_mean',
'riak.node_get_fsm_counter_objsize_median',
'riak.node_get_fsm_counter_objsize_95',
'riak.node_get_fsm_counter_objsize_99',
'riak.node_get_fsm_counter_objsize_100',
'riak.node_get_fsm_set_objsize_mean',
'riak.node_get_fsm_set_objsize_median',
'riak.node_get_fsm_set_objsize_95',
'riak.node_get_fsm_set_objsize_99',
'riak.node_get_fsm_set_objsize_100',
'riak.node_get_fsm_map_objsize_mean',
'riak.node_get_fsm_map_objsize_median',
'riak.node_get_fsm_map_objsize_95',
'riak.node_get_fsm_map_objsize_99',
'riak.node_get_fsm_map_objsize_100',
'riak.node_get_fsm_counter_siblings_mean',
'riak.node_get_fsm_counter_siblings_median',
'riak.node_get_fsm_counter_siblings_95',
'riak.node_get_fsm_counter_siblings_99',
'riak.node_get_fsm_counter_siblings_100',
'riak.node_get_fsm_set_siblings_mean',
'riak.node_get_fsm_set_siblings_median',
'riak.node_get_fsm_set_siblings_95',
'riak.node_get_fsm_set_siblings_99',
'riak.node_get_fsm_set_siblings_100',
'riak.node_get_fsm_map_siblings_mean',
'riak.node_get_fsm_map_siblings_median',
'riak.node_get_fsm_map_siblings_95',
'riak.node_get_fsm_map_siblings_99',
'riak.node_get_fsm_map_siblings_100',
'riak.object_merge_time_mean',
'riak.object_merge_time_median',
'riak.object_merge_time_95',
'riak.object_merge_time_99',
'riak.object_merge_time_100',
'riak.object_counter_merge_time_mean',
'riak.object_counter_merge_time_median',
'riak.object_counter_merge_time_95',
'riak.object_counter_merge_time_99',
'riak.object_counter_merge_time_100',
'riak.object_set_merge_time_mean',
'riak.object_set_merge_time_median',
'riak.object_set_merge_time_95',
'riak.object_set_merge_time_99',
'riak.object_set_merge_time_100',
'riak.object_map_merge_time_mean',
'riak.object_map_merge_time_median',
'riak.object_map_merge_time_95',
'riak.object_map_merge_time_99',
'riak.object_map_merge_time_100',
'riak.counter_actor_counts_mean',
'riak.counter_actor_counts_median',
'riak.counter_actor_counts_95',
'riak.counter_actor_counts_99',
'riak.counter_actor_counts_100',
'riak.set_actor_counts_mean',
'riak.set_actor_counts_median',
'riak.set_actor_counts_95',
'riak.set_actor_counts_99',
'riak.set_actor_counts_100',
'riak.map_actor_counts_mean',
'riak.map_actor_counts_median',
'riak.map_actor_counts_95',
'riak.map_actor_counts_99',
'riak.map_actor_counts_100',
'riak.vnode_get_fsm_time_mean',
'riak.vnode_get_fsm_time_median',
'riak.vnode_get_fsm_time_95',
'riak.vnode_get_fsm_time_99',
'riak.vnode_get_fsm_time_100',
'riak.vnode_put_fsm_time_mean',
'riak.vnode_put_fsm_time_median',
'riak.vnode_put_fsm_time_95',
'riak.vnode_put_fsm_time_99',
'riak.vnode_put_fsm_time_100',
'riak.vnode_counter_update_time_mean',
'riak.vnode_counter_update_time_median',
'riak.vnode_counter_update_time_95',
'riak.vnode_counter_update_time_99',
'riak.vnode_counter_update_time_100',
'riak.vnode_set_update_time_mean',
'riak.vnode_set_update_time_median',
'riak.vnode_set_update_time_95',
'riak.vnode_set_update_time_99',
'riak.vnode_set_update_time_100',
'riak.vnode_map_update_time_mean',
'riak.vnode_map_update_time_median',
'riak.vnode_map_update_time_95',
'riak.vnode_map_update_time_99',
'riak.vnode_map_update_time_100',
'riak.riak_kv_vnodeq_mean',
'riak.riak_kv_vnodeq_min',
'riak.riak_kv_vnodeq_max',
'riak.riak_kv_vnodeq_median',
'riak.riak_kv_vnodeq_total',
'riak.riak_pipe_vnodeq_mean',
'riak.riak_pipe_vnodeq_min',
'riak.riak_pipe_vnodeq_max',
'riak.riak_pipe_vnodeq_median',
'riak.riak_pipe_vnodeq_total',
]
# The below metrics for leveldb and read repair
# appear when they have no values, however they
# are displayed as "undefined". The search metrics
# do not appear if search is off.
CHECK_NOT_TESTED = [
'riak.leveldb_read_block_error',
'riak.read_repairs_primary_notfound_one',
'riak.read_repairs_primary_notfound_count',
'riak.read_repairs_primary_outofdate_one',
'riak.read_repairs_primary_outofdate_count',
'riak.read_repairs_fallback_notfound_one',
'riak.read_repairs_fallback_notfound_count',
'riak.read_repairs_fallback_outofdate_one',
'riak.read_repairs_fallback_outofdate_count',
'riak.search_query_latency_mean',
'riak.search_query_latency_min',
'riak.search_query_latency_median',
'riak.search_query_latency_95',
'riak.search_query_latency_99',
'riak.search_query_latency_999',
'riak.search_query_latency_max',
'riak.search_index_latency_mean',
'riak.search_index_latency_min',
'riak.search_index_latency_median',
'riak.search_index_latency_95',
'riak.search_index_latency_99',
'riak.search_index_latency_999',
'riak.search_index_latency_max',
]
SERVICE_CHECK_NAME = 'riak.can_connect'
def test_riak(self):
config_dev1 = {
"instances": [{
"url": "http://localhost:10018/stats",
"tags": ["my_tag"]
}]
}
self.run_check(config_dev1)
tags = ['my_tag']
sc_tags = tags + ['url:' + config_dev1['instances'][0]['url']]
for gauge in self.CHECK_GAUGES + self.CHECK_GAUGES_STATS:
self.assertMetric(gauge, count=1, tags=tags)
self.assertServiceCheckOK(self.SERVICE_CHECK_NAME,
tags=sc_tags,
count=1)
self.coverage_report()
def test_bad_config(self):
self.assertRaises(
socket.error,
lambda: self.run_check({"instances": [{"url": "http://localhost:5985"}]})
)
sc_tags = ['url:http://localhost:5985']
self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME,
tags=sc_tags,
count=1)
self.coverage_report()
|
# RUN: %PYTHON %s
# TODO: Numpy compiler has bitrotted.
# XFAIL: *
from npcomp.compiler.numpy.backend import iree
from npcomp.compiler.numpy.frontend import *
from npcomp.compiler.numpy import test_config
from npcomp.compiler.numpy.target import *
from npcomp.compiler.utils import logging
# TODO: This should all exist in a high level API somewhere.
from _npcomp import mlir
logging.enable()
def compile_function(f):
fe = ImportFrontend(config=test_config.create_test_config(
target_factory=GenericTarget32))
fe.import_global_function(f)
compiler = iree.CompilerBackend()
vm_blob = compiler.compile(fe.ir_module)
loaded_m = compiler.load(vm_blob)
return loaded_m[f.__name__]
@compile_function
def int_add(a: int, b: int):
return a + b
result = int_add(5, 6)
assert result == 11
@compile_function
def simple_control_flow(a: int, b: int):
return (a * b) and (a - b)
assert simple_control_flow(5, 6) == -1
assert simple_control_flow(-1, 0) == 0
|
#!/usr/bin/env python
# Copyright (c) 2014 v3aqb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import socket
from repoze.lru import lru_cache
try:
from ipaddress import ip_address as ip_address
except ImportError:
from ipaddr import IPAddress as ip_address
@lru_cache(4096, timeout=90)
def getaddrinfo(host, port=None, family=0, socktype=0, proto=0, flags=0):
"""return (family, socktype, proto, canonname, sockaddr)
>>> socket.getaddrinfo("www.python.org", 80, 0, 0, socket.SOL_TCP)
[(2, 1, 6, '', ('82.94.164.162', 80)),
(10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0))]"""
return socket.getaddrinfo(host, port, family, socktype, proto, flags)
def create_connection(address, timeout=object(), source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port):
af, socktype, proto, canonname, sa = res
sock = None
if af == 10:
continue
try:
sock = socket.socket(af, socktype, proto)
if timeout is not object():
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
@lru_cache(1024, timeout=900)
def get_ip_address(host):
try:
return ip_address(host)
except:
try:
return ip_address(getaddrinfo(host)[0][4][1])
except:
return ip_address('0.0.0.0')
def parse_hostport(host, default_port=80):
m = re.match(r'(.+):(\d+)$', host)
if m:
return m.group(1).strip('[]'), int(m.group(2))
else:
return host.strip('[]'), default_port
if __name__ == "__main__":
t = socket.getaddrinfo('www.baidu.com', 80)
r = getaddrinfo('www.baidu.com')
print(t)
print(r)
print(r[0][4][0])
|
# Generated by Django 3.2.5 on 2021-08-05 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0005_auto_20210728_0612'),
]
operations = [
migrations.AddField(
model_name='site_results',
name='count',
field=models.IntegerField(default=1),
),
]
|
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMayaRender as OpenMayaRender
from PySide import QtCore, QtGui
from shiboken import wrapInstance
import math
class KeyboardEvents(QtCore.QObject):
def __init__(self, view3D = OpenMayaUI.M3dView.active3dView()):
super(KeyboardEvents, self).__init__()
self.view = view3D
self.K_Ctrl = False
self.K_Esc = False
self.K_Shift = False
self.K_Alt = False
self.K_Enter = False
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.Type.KeyPress:
if event.key() == QtCore.Qt.Key_Control:
self.K_Ctrl = True
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Shift:
self.K_Shift = True
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Escape:
self.K_Esc = True
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Alt:
self.K_Alt = True
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Return:
self.K_Enter = True
self.view.refresh(True, True)
if event.type() == QtCore.QEvent.Type.KeyRelease:
if event.key() == QtCore.Qt.Key_Control:
self.K_Ctrl = False
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Shift:
self.K_Shift = False
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Escape:
self.K_Esc = False
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Alt:
self.K_Alt = False
self.view.refresh(True, True)
if event.key() == QtCore.Qt.Key_Return:
self.K_Enter = False
self.view.refresh(True, True)
class MouseEvents(QtCore.QObject):
def __init__(self, view3D = OpenMayaUI.M3dView.active3dView()):
super(MouseEvents, self).__init__()
self.view = view3D
self.M_Button_Left = False
self.M_Button_Right = False
self.M_Move = False
self.M_posX = 0
self.M_posY = 0
self.editMode = False
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.Type.MouseButtonPress:
if event.button() == 1:
self.M_posX = event.pos().x()
self.M_posY = event.pos().y()
self.M_Button_Left = True
self.view.refresh(True, True)
if event.button() == 2:
self.M_posX = event.pos().x()
self.M_posY = event.pos().y()
self.M_Button_Right = True
self.view.refresh(True, True)
if event.type() == QtCore.QEvent.Type.MouseButtonRelease:
if event.button() == 1:
self.M_Button_Left = False
self.view.refresh(True, True)
if event.button() == 2:
self.M_Button_Right = False
self.view.refresh(True, True)
if event.type()==QtCore.QEvent.Type.MouseMove:
self.M_posX=event.pos().x()
self.M_posY=event.pos().y()
self.M_Move=True
self.view.refresh(True, True)
if self.editMode:
return True
class ViewportPainter(object):
def __init__ (self):
self.callback = None
self.currentModelPanel = None
self.unit = 1.0
self.glFT = None
self.qt_Active_View = None
self.qt_Maya_Window = None
self.view3D = OpenMayaUI.M3dView.active3dView()
self.userKeyboardEvents = KeyboardEvents(self.view3D)
self.userMouseEvents = MouseEvents(self.view3D)
self.initializeGL()
self.initializeCallback()
def initializeGL(self):
#scene measure units
unit = cmds.currentUnit(q=1, linear=1)
if unit == "m":
self.unit = float(self.unit) * 100.0
self.glFT = OpenMayaRender.MHardwareRenderer.theRenderer().glFunctionTable()
def initializeCallback(self):
#get current model panel
self.currentModelPanel = cmds.getPanel(wf = 1)
if "modelPanel" not in self.currentModelPanel:
self.currentModelPanel = cmds.getPanel(vis = 1)
for i in self.currentModelPanel:
if "modelPanel" in i:
self.currentModelPanel = i
#try removing old callbacks from memory
try:
OpenMayaUI.MUiMessage.removeCallback(self.callBack)
except:
pass
#create a callback that is registered after a frame is drawn with a 3D content but before 2D content
self.callback = OpenMayaUI.MUiMessage.add3dViewPostRenderMsgCallback(self.currentModelPanel, self.update)
self.view3D.refresh(True, True)
#create QT maya window event filter
main_window_ptr = OpenMayaUI.MQtUtil.mainWindow()
self.qt_Maya_Window = wrapInstance(long(main_window_ptr), QtCore.QObject)
self.qt_Maya_Window.installEventFilter(self.userKeyboardEvents)
#create viewport event filter
active_view_ptr = self.view3D.widget()
self.qt_Active_View = wrapInstance(long(active_view_ptr), QtCore.QObject)
self.qt_Active_View.installEventFilter(self.userMouseEvents)
cmds.inViewMessage( amg='<hl>Tool:</hl> Use <hl>"Esc"</hl> to cancel the tool', pos='botLeft', fade=True )
print "Initialized..."
def uninitializeCallback(self):
OpenMayaUI.MUiMessage.removeCallback(self.callback) #remove 3dView Render Callback
self.qt_Maya_Window.removeEventFilter(self.userKeyboardEvents) #remove QT Callback
self.qt_Active_View.removeEventFilter(self.userMouseEvents) #remove QT Callback
OpenMayaUI.M3dView.active3dView().scheduleRefresh()
print "Uninitialized..."
def getMouseIntersect(self):
sourcePnt = OpenMaya.MPoint(0,0,0)
rayDir = OpenMaya.MVector(0,0,0)
maximumDistance = 9999999999
viewHeight = self.view3D.portHeight()
hitNormal = OpenMaya.MVector()
intersectedObject = None
intersectedPoint = OpenMaya.MFloatPoint()
intersectedFace = 0
hitFace = OpenMaya.MScriptUtil()
hitFace.createFromInt(0)
hitFacePtr = hitFace.asIntPtr()
hitDistance = OpenMaya.MScriptUtil(0.0)
hitDistancePtr = hitDistance.asFloatPtr()
self.view3D.viewToWorld(int(self.userMouseEvents.M_posX), int(viewHeight - self.userMouseEvents.M_posY), sourcePnt, rayDir)
direction = OpenMaya.MFloatVector(rayDir.x, rayDir.y, rayDir.z).normal()
iter = OpenMaya.MItDependencyNodes(OpenMaya.MFn.kMesh)
while not iter.isDone():
node =iter.thisNode()
dagPath = OpenMaya.MDagPath.getAPathTo(node)
hitPoint = OpenMaya.MFloatPoint()
source = OpenMaya.MFloatPoint(sourcePnt.x, sourcePnt.y, sourcePnt.z)
direction = OpenMaya.MFloatVector(direction.x,direction.y,direction.z)
if dagPath.isVisible():
mesh = OpenMaya.MFnMesh(dagPath)
intersected = mesh.closestIntersection(source, direction, None, None, False, OpenMaya.MSpace.kWorld, 9999999999, True, None, hitPoint, hitDistancePtr, hitFacePtr, None, None, None, 0.0001)
if intersected:
intersectionDistance = hitDistance.getFloat(hitDistancePtr)
if intersectionDistance < maximumDistance:
maximumDistance = intersectionDistance
intersectedPoint = hitPoint
intersectedFace = OpenMaya.MScriptUtil(hitFacePtr).asInt()
mesh.getClosestNormal(OpenMaya.MPoint(intersectedPoint),hitNormal,OpenMaya.MSpace.kWorld)
intersectedObject = dagPath.fullPathName()
iter.next()
if intersectedPoint.x + intersectedPoint.y + intersectedPoint.z == 0:
return None, None, None
else:
return intersectedPoint, intersectedFace, intersectedObject
def update(self, *args):
pass
|
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^register/$', views.user_create, name='user_create'),
url(r'^verify/$', views.verify, name='verify'),
url(r'^verify/token/$', views.get_verify_token, name='get_verify_token'),
url(r'^account/$', views.user_update, name='user_update'),
url(r'^account/password/$', views.password_change, name='password_change'),
]
|
import typing
from string import punctuation
def clear_punctuation(text: str) -> str:
"""Очищает полученный string от пунктационных знаков не учитывая пробелы"""
return ''.join([simbol for simbol in text if simbol not in punctuation])
def lower_and_split(text: str) -> typing.List:
"""
Переводим все символы в lowercase и разбираем на список
:param text: строковая переменная
:return: List[str] список строковых элементов
"""
return text.lower().split(' ')
|
from flask_wtf import FlaskForm
from wtforms.fields.html5 import IntegerField, DecimalField
from flask_security.forms import ConfirmRegisterForm
from wtforms.validators import (
InputRequired, DataRequired, EqualTo, URL, NumberRange,
Length,
)
from wtforms import (
BooleanField, StringField, PasswordField, SubmitField,
SelectField,
)
from ..util import StateCodes
class StripeKeysForm(FlaskForm):
stripe_sk = StringField('Stripe Secret Key',
[InputRequired()])
stripe_pk = StringField('Stripe Public Key',
[InputRequired()])
class RedirectUrlForm(FlaskForm):
redirect_url = StringField('Redirect URL',
[URL(message='Sorry this is not a valid URL')])
class DetailsForm(FlaskForm):
company_name = StringField('Company Name', [InputRequired()])
street = StringField('Street Address', [InputRequired()])
city = StringField('City', [InputRequired()])
state_code = SelectField('State', [
InputRequired(),
Length(message="State codes is invalid", max=2)
],
choices=StateCodes)
zip_code = StringField('Zip Code', [InputRequired()])
subscribe = BooleanField('Subscribe for the Gradient newsletter', [])
class VendorConfirmRegisterForm(ConfirmRegisterForm):
first_name = StringField('First Name', [InputRequired()])
last_name = StringField('Last Name', [InputRequired()])
password_confirm = PasswordField('Confirm Password',
[EqualTo('password', message='Passwords must match')])
class VendorRegisterForm(DetailsForm, VendorConfirmRegisterForm):
pass
|
import math
import torch
import numpy as np
import torch.nn.functional as F
from src.utils.utils import l2_normalize
class MoCo(object):
def __init__(self, outputs1, outputs2, queue, t=0.07):
super().__init__()
self.outputs1 = l2_normalize(outputs1, dim=1)
self.outputs2 = l2_normalize(outputs2, dim=1)
self.queue = queue.detach()
self.t = t
self.k = queue.size(0)
self.device = self.outputs1.device
def get_loss(self):
batch_size = self.outputs1.size(0) # batch_size x out_dim
witness_pos = torch.sum(self.outputs1 * self.outputs2, dim=1, keepdim=True)
witness_neg = self.outputs1 @ self.queue.T
# batch_size x (k + 1)
witness_logits = torch.cat([witness_pos, witness_neg], dim=1) / self.t
labels = torch.zeros(witness_logits.size(0), device=self.device).long()
loss = F.cross_entropy(witness_logits, labels.long())
return loss
|
import tensorflow as tf
from .logger import (StepLoggerHook,
LossLoggerHook,
AccuracyLoggerHook,
TimeLoggerHook,
EolLoggerHook)
def hooks(display_step, last_step, batch_size, loss, accuracy):
return [
tf.train.StopAtStepHook(last_step=last_step),
tf.train.NanTensorHook(loss),
StepLoggerHook(display_step, last_step),
LossLoggerHook(display_step, loss),
AccuracyLoggerHook(display_step, accuracy),
TimeLoggerHook(display_step, batch_size, last_step),
EolLoggerHook(display_step),
]
|
import typing
from TorchTSA.simulate.ARMASim import ARMASim
class ARSim(ARMASim):
def __init__(
self,
_phi_arr: typing.Union[float, typing.Sequence[float]],
_mu: float = 0.0, _sigma: float = 1.0,
):
if isinstance(_phi_arr, float) or isinstance(_phi_arr, int):
_phi_arr = (_phi_arr,)
super().__init__(
_phi_arr=_phi_arr, _mu=_mu, _sigma=_sigma
)
|
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from datetime import datetime
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
MEMBERSHIP_CHOICES = (
('Enterprise', 'ent'),
('Professional', 'pro'),
('Free', 'free')
)
# Create your models here.
class Membership(models.Model):
slug = models.SlugField()
membership_type = models.CharField(
choices=MEMBERSHIP_CHOICES,
default='Free',
max_length=30)
price = models.IntegerField(default=15)
stripe_plan_id = models.CharField(max_length=40)
def __str__(self):
return self.membership_type
class UserMembership(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
stripe_customer_id = models.CharField(max_length=40)
membership = models.ForeignKey(Membership, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.user.username
def post_save_usermembership_create(sender, instance, created, *args, **kwargs):
if created:
UserMembership.objects.get_or_create(user=instance)
user_membership, created = UserMembership.objects.get_or_create(user=instance)
if user_membership.stripe_customer_id is None or user_membership.stripe_customer_id == '':
new_customer_id = stripe.Customer.create(email=instance.email)
user_membership.stripe_customer_id = new_customer_id['id']
user_membership.save()
post_save.connect(post_save_usermembership_create, sender=settings.AUTH_USER_MODEL)
class Subscription(models.Model):
user_membership = models.ForeignKey(UserMembership, on_delete=models.CASCADE)
stripe_subscription_id = models.CharField(max_length=40)
active = models.BooleanField(default=True)
def __str__(self):
return self.user_membership.user.username
@property
def get_created_date(self):
subscription = stripe.Subscription.retrieve(self.stripe_subscription_id)
return datetime.fromtimestamp(subscription.created)
@property
def get_next_billing_date(self):
subscription = stripe.Subscription.retrieve(self.stripe_subscription_id)
return subscription.current_period_end |
import unittest, time
from src.tic_toc import tic, toc, Timer
class TestTicToc(unittest.TestCase):
def test_tic_toc(self):
pause = 2
t = tic()
time.sleep(pause)
elapsed = toc(t)
self.assertAlmostEqual(elapsed, pause, places=1)
def test_helper_class(self):
pause = 2
timer = Timer('foo_stuff')
with timer:
time.sleep(pause)
self.assertAlmostEqual(timer.tend, pause, places=1)
|
# -*- coding: utf-8 -*-
"""
Created on Mon July 9 22:20:12 2018
@author: Adam
"""
import os
import sqlite3
import numpy as np
import pandas as pd
from datetime import datetime
from emonitor.core import TABLE, DATA_DIRE
from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert
from emonitor.data import EmonitorData
from emonitor import history
# constants
COLUMNS = ('A', 'B', 'C')
TCOL = 'TIMESTAMP'
NAME = '__pytest__.db'
DB = db_path(NAME)
if os.path.isfile(DB):
os.remove(DB)
CONN = sqlite3.connect(DB)
DATA = [('2016-12-09 09:08:13', 1, 34.8, 3),
('2018-12-10 09:08:13', 2, 12, 3),
('2018-12-10 09:10:13', 3, 6.7, 3)]
def test_datadire_exists():
assert os.path.exists(DATA_DIRE)
def test_new_db():
db_init(CONN, TABLE, COLUMNS)
db_check(CONN, TABLE, COLUMNS)
def test_desc():
DESC = ("[(0, 'TIMESTAMP', 'timestamp', 1, 'CURRENT_TIMESTAMP', 0),"
" (1, 'A', 'DOUBLE', 0, 'NULL', 0),"
" (2, 'B', 'DOUBLE', 0, 'NULL', 0),"
" (3, 'C', 'DOUBLE', 0, 'NULL', 0)]")
assert str(db_describe(CONN, TABLE)) == DESC
def test_insert():
cols = (TCOL,) + COLUMNS
for d in DATA:
db_insert(CONN, TABLE, cols, d)
def test_history():
start = datetime(2015, 12, 9, 9, 8, 13)
end = datetime(2018, 12, 11, 9, 8, 13)
df = history(CONN, start, end)
vals = np.array([row[1:] for row in DATA])
assert np.array_equal(df.values, vals)
def test_clean():
CONN.close()
os.remove(DB)
def test_emonitordata():
name = NAME
data = EmonitorData(DATA_DIRE)
data.create(name, columns=[1, 2, 3], quiet=True)
fils = data.show()
assert isinstance(fils, list)
assert name in fils
data.destroy(name, force=True)
fils = data.show()
assert name not in fils |
from django.urls import path
from . import views
urlpatterns = [
# path('', views.list, name='blog'),
# path('<int:id>/', views.post, name='post'),
path('', views.PostListView.as_view(), name='blog'),
# path('<int:pk>/', views.PostDetailView.as_view(), name='post'),
path('<int:pk>/', views.post, name='post'),
] |
from __future__ import unicode_literals
import argparse
import logging
from dvc.pkg import PkgManager
from dvc.exceptions import DvcException
from .base import CmdBase, CmdBaseNoRepo, fix_subparsers, append_doc_link
logger = logging.getLogger(__name__)
class CmdPkgInstall(CmdBase):
def run(self):
try:
self.repo.pkg.install(
self.args.url,
version=self.args.version,
name=self.args.name,
force=self.args.force,
)
return 0
except DvcException:
logger.exception(
"failed to install package '{}'".format(self.args.url)
)
return 1
class CmdPkgUninstall(CmdBase):
def run(self):
ret = 0
for target in self.args.targets:
try:
self.repo.pkg.uninstall(target)
except DvcException:
logger.exception(
"failed to uninstall package '{}'".format(target)
)
ret = 1
return ret
class CmdPkgImport(CmdBase):
def run(self):
try:
self.repo.pkg.imp(
self.args.name,
self.args.src,
out=self.args.out,
version=self.args.version,
)
return 0
except DvcException:
logger.exception(
"failed to import '{}' from package '{}'".format(
self.args.src, self.args.name
)
)
return 1
class CmdPkgGet(CmdBaseNoRepo):
def run(self):
try:
PkgManager.get(
self.args.url,
self.args.src,
out=self.args.out,
version=self.args.version,
)
return 0
except DvcException:
logger.exception(
"failed to get '{}' from package '{}'".format(
self.args.src, self.args.name
)
)
return 1
def add_parser(subparsers, parent_parser):
PKG_HELP = "Manage DVC packages."
pkg_parser = subparsers.add_parser(
"pkg",
parents=[parent_parser],
description=append_doc_link(PKG_HELP, "pkg"),
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
)
pkg_subparsers = pkg_parser.add_subparsers(
dest="cmd", help="Use dvc pkg CMD --help for command-specific help."
)
fix_subparsers(pkg_subparsers)
PKG_INSTALL_HELP = "Install package."
pkg_install_parser = pkg_subparsers.add_parser(
"install",
parents=[parent_parser],
description=append_doc_link(PKG_INSTALL_HELP, "pkg-install"),
help=PKG_INSTALL_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
pkg_install_parser.add_argument("url", help="Package URL.")
pkg_install_parser.add_argument(
"--version", nargs="?", help="Package version."
)
pkg_install_parser.add_argument(
"--name",
nargs="?",
help=(
"Package alias. If not specified, the name will be determined "
"from URL."
),
)
pkg_install_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Reinstall package if it is already installed.",
)
pkg_install_parser.set_defaults(func=CmdPkgInstall)
PKG_UNINSTALL_HELP = "Uninstall package(s)."
pkg_uninstall_parser = pkg_subparsers.add_parser(
"uninstall",
parents=[parent_parser],
description=append_doc_link(PKG_UNINSTALL_HELP, "pkg-uninstall"),
help=PKG_UNINSTALL_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
pkg_uninstall_parser.add_argument(
"targets", nargs="*", default=[None], help="Package name."
)
pkg_uninstall_parser.set_defaults(func=CmdPkgUninstall)
PKG_IMPORT_HELP = "Import data from package."
pkg_import_parser = pkg_subparsers.add_parser(
"import",
parents=[parent_parser],
description=append_doc_link(PKG_IMPORT_HELP, "pkg-import"),
help=PKG_IMPORT_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
pkg_import_parser.add_argument("name", help="Package name or url.")
pkg_import_parser.add_argument("src", help="Path to data in the package.")
pkg_import_parser.add_argument(
"-o", "--out", nargs="?", help="Destination path to put data to."
)
pkg_import_parser.add_argument(
"--version", nargs="?", help="Package version."
)
pkg_import_parser.set_defaults(func=CmdPkgImport)
PKG_GET_HELP = "Download data from the package."
pkg_get_parser = pkg_subparsers.add_parser(
"get",
parents=[parent_parser],
description=append_doc_link(PKG_GET_HELP, "pkg-get"),
help=PKG_GET_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
pkg_get_parser.add_argument("url", help="Package url.")
pkg_get_parser.add_argument("src", help="Path to data in the package.")
pkg_get_parser.add_argument(
"-o", "--out", nargs="?", help="Destination path to put data to."
)
pkg_get_parser.add_argument(
"--version", nargs="?", help="Package version."
)
pkg_get_parser.set_defaults(func=CmdPkgGet)
|
#!/usr/bin/env python3
from enum import IntEnum
import time
import rospy
from threading import Thread
from dorna_enums import HumanResponse, DornaMovement
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from moveit_msgs.msg import RobotState, RobotTrajectory, DisplayTrajectory, CollisionObject
from dorna_ros.msg import DornaPos, DornaJoint, DornaLimits, DornaHomed, DornaToolHead, DornaSpeed, DornaJerk
from pix_2_world.msg import ButtonsPix, ButtonsWorld, Pix, World
from std_msgs.msg import Float64, Int32, Bool, Header, String, Empty
from dorna_api.srv import DornaConnect, DornaDisconnect, DornaHome, DornaSet, DornaPlay, DornaPause
from dorna_api.srv import DornaHalt, DornaCalibrate, DornaJog, DornaErase, DornaView
from dorna_api.srv import DornaPlanCmd, DornaXYZ, DornaXYZCheck, DornaXYZtoJoint, TfTransform
from dorna_api.srv import DornaMoveJoints, DornaMoveXYZ, DornaOutputPin
from pix_2_world.srv import Pix2World, NNService
class HumanRos:
def __init__(self):
self._ns = 'dorna_real'
self._joint_names = ['j0', 'j1', 'j2', 'j3', 'j4']
self._axis_names = ['x', 'y', 'z', 'a', 'b']
self.trajectory = None
self._button_data = None
self.connection_status = False
##############################################
#### Human Service Proxies #####
##############################################
robot_srv_topics = ['connect', 'disconnect',
'home', 'set_joint', 'play',
'pause', 'halt', 'calibrate',
'jog', 'xyz_check', 'xyz_to_joint',
'move_joints', 'move_xyzab', 'digital_out']
robot_srv_msg_types = [DornaConnect, DornaDisconnect,
DornaHome, DornaSet, DornaPlay,
DornaPause, DornaHalt, DornaCalibrate,
DornaJog, DornaXYZCheck, DornaXYZtoJoint,
DornaMoveJoints, DornaMoveXYZ, DornaOutputPin]
self._srv_prx_list = {}
for topic, msg_type in zip(robot_srv_topics, robot_srv_msg_types):
name_space = self._ns+'/robot_cmd/'+topic
# rospy.wait_for_service(name_space)
self._srv_prx_list[topic] = rospy.ServiceProxy(name_space, msg_type)
moveit_srv_topics = ['view','move_home',
'move_extend', 'move_heart',
'move_rand', 'move_button',
'move_straight']
moveit_srv_msg_types = [DornaView, DornaPlanCmd,
DornaPlanCmd, DornaPlanCmd,
DornaPlanCmd, DornaXYZ,
DornaPlanCmd]
for topic, msg_type in zip(moveit_srv_topics, moveit_srv_msg_types):
name_space = self._ns+'/moveit_cmd/'+topic
# rospy.wait_for_service(name_space)
self._srv_prx_list[topic] = rospy.ServiceProxy(name_space, msg_type)
# rospy.wait_for_service('pix_2_world')
self._srv_prx_list['pix_2_world'] = rospy.ServiceProxy('pix_2_world', Pix2World)
self._srv_prx_list['tf_transform'] = rospy.ServiceProxy('get_tf_transform', TfTransform)
##############################################
#### Human Subscribers #####
##############################################
self._sub_topics = ['cartesian_position', 'joint_angles']
pub_msg_types = [DornaPos, DornaJoint]
self._subscriber_list = {}
for topic, msg_type in zip(self._sub_topics, pub_msg_types):
callback = getattr(self, topic+"_callback", None)
self._subscriber_list[topic] = rospy.Subscriber(self._ns+'/robot_info/'+topic, msg_type, callback)
callback = getattr(self, "sent_pic_callback", None)
self._subscriber_list['sent_pic'] = rospy.Subscriber('/realsense/info/sent_pic', Empty, callback)
callback = getattr(self, "buttons_world_callback", None)
self._subscriber_list['buttons_world'] = rospy.Subscriber('buttons_world', ButtonsWorld, callback)
# self._package_subscriber = rospy.Subscriber('/tag_detections', AprilTagDetectionArray, self.package_callback)
##############################################
#### Human Publishers #####
##############################################
self._take_pic_pub = rospy.Publisher('/realsense/cmd/take_pic', Empty, queue_size=10)
def init(self):
self.connect()
if self.connection_status:
self.home()
self.calibrate()
try:
response = input("Move to Home Pose enter 1: ")
if response == '1':
self.home_pose()
else:
pass
except rospy.ROSException:
pass
else:
self.init()
def connect(self):
rospy.wait_for_service('/dorna_real/robot_cmd/connect')
response = input("Press Enter to Connect to Robot: ")
if response == '':
usb = "/dev/ttyACM0"
response = self._srv_prx_list['connect'](usb)
rospy.loginfo(response)
if not response.connected:
rospy.logerr('Error [Human_Ros]: Did not get proper response when connecting to robot')
self.connection_status = response.connected
else:
self.connection_status = False
def home(self):
response = input("NOTE: Please check robot surroundings before homing!\n\rTo home the robot motors input 1, else the robot will set the motors to the home position\n\r\n\rHere: ")
if response == "1":
srv_response = self._srv_prx_list['home']()
else:
srv_response = self._srv_prx_list['set_joint']()
if srv_response.homed:
return
else:
rospy.logerr('Error [Human_Ros]: Did not get proper response when homing to robot')
def calibrate(self):
response = input("Do you want to calibrate the motors? Default in NO, Enter y to override\n\rHere: ")
if response == 'y':
for i in range(len(self._joint_names)):
jog_response = input("Do you want to jog joint{}? y/n ".format(i))
if jog_response == 'n':
pass
elif jog_response == 'y' or jog_response == '':
self.jog_motor(i)
# response = self._srv_prx_list['calibrate']()
else:
pass
def home_pose(self):
self.service_check('/dorna_real/robot_cmd/move_joints')
response = self._srv_prx_list['move_joints']("joint", DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_JOINT_SPEED.value, [0, 145, -90, 0, 0])
if response.response:
rospy.loginfo("Arrived at home location")
else:
rospy.logwarn("Did not get good response")
# trajectory = response.trajectory.joint_trajectory
# print(trajectory)
# response = self._srv_prx_list['play'](trajectory)
def tuck_away(self):
self.service_check('/dorna_real/robot_cmd/move_joints')
response = self._srv_prx_list['move_joints']('joint', DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [-90., 170., 0., 0., 0.])
if response.response:
rospy.loginfo("Arrived at tuck_away location")
else:
rospy.logwarn("Did not get good response")
def move_forward_test(self):
self.service_check('/dorna_real/robot_cmd/move_xyzab')
## TODO:Fix this function
speed = int(DornaMovement.DEFAULT_XYZ_SPEED.value/2)
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.RELATIVE.value, speed, [30, 0, 0, 0, 0])
if response.response:
rospy.loginfo("Arrived at desired location")
else:
rospy.logwarn("Did not get good response")
def move_xyz_test(self):
self.service_check('/dorna_real/robot_cmd/move_xyzab')
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [300, 0, 500, 0, 0])
if response.response:
rospy.loginfo("Arrived at tuck_away location")
else:
rospy.logwarn("Did not get good response")
#######################################
#### Button Commands ####
#######################################
def get_button_dict(self, response):
availible_buttons = []
for i in range(len(response.msg)):
availible_buttons.append("BUTTON_{}".format(response.msg[i].id))
# availible_buttons[response[i].id] = self.find_transform("dorna_base", child_frame)
return availible_buttons
def show_availible_buttons(self, availible_buttons):
rospy.loginfo("These are the buttons we can see: ")
for button in availible_buttons:
rospy.loginfo(button)
def button_input(self, availible_buttons):
self.show_availible_buttons(availible_buttons)
button_id = input("Which button do you want to move to?\n\rEnter Here: ")
try:
if button_id in availible_buttons:
if self.can_reach_button(button_id):
return button_id
else:
rospy.logerr("Robot cant reach that button safely")
self.button_input(availible_buttons)
else:
rospy.logerr("[Error]: Response was not found in availible buttons. Try again.")
self.button_input(availible_buttons)
except KeyboardInterrupt:
raise
def get_xyz(self, button_id):
child_frame = button_id
parent_frame = 'dorna_base'
self.service_check('get_tf_transform')
xyz_transform_meters = self._srv_prx_list['tf_transform']([child_frame, parent_frame])
xyz_transform_mm = []
for val in xyz_transform_meters.transform:
xyz_transform_mm.append(val*1000.)
# xyz_transform_mm = [300, 0, 360]
return xyz_transform_mm
def can_reach_button(self, button_id):
xyz = self.get_xyz(button_id)
print(xyz)
response = self._srv_prx_list['xyz_check'](xyz)
print(response.can_reach)
if response.can_reach:
rospy.loginfo("Can reach button location")
return True
else:
rospy.logerr("Cannot reach button location")
return False
def pick_button(self, response):
availible_buttons = self.get_button_dict(response)
button_id = self.button_input(availible_buttons)
xyz = self.get_xyz(button_id)
return xyz
def xyz_to_joint(self, xyz, x_offset = None):
if x_offset:
xyz[0] -= x_offset
response = self._srv_prx_list['xyz_to_joint'](xyz)
return response.joint_angles
#######################################
#### Jog Commands ####
#######################################
def jog_cmd(self, axis, axis_value, step_size, max_step, min_step, units):
rospy.loginfo("Axis {} current value [{}]: {}".format(axis, units, axis_value))
rospy.loginfo("Current jog step size is {} [{}]".format(step_size, units))
command = input("Enter w to jog up\n\rEnter s to jog down\n\rEnter e to change step size\n\rEnter y when ok\n\rHere: ")
if command == 'w':
response = self._srv_prx_list['jog']("joint", axis, step_size)
return True
elif command == 's':
response = self._srv_prx_list['jog']("joint", axis, -step_size)
return True
elif command == 'e':
step_size = int(input("Enter value for step size in {} (Max is {}, Min is {}): ".format(units, max_step, min_step)))
step_size = min(max(step_size, min_step), max_step)
self.jog_cmd(axis, axis_value, step_size, max_step, min_step, units)
elif command == 'y':
return False
else:
rospy.logerr('Invalid Entry')
pass
return True
def jog_axis(self, axis):
not_ok = True
units = rospy.get_param('/dorna_real/units')
index = self._axis_names.index(axis)
while not_ok:
axis_value = self.robot_xyz_pos[index]
if axis == "a" or axis == "b":
step_size = 5
not_ok = self.jog_cmd(axis, axis_value, step_size, 10, 1, "degrees")
elif units == "mm":
step_size = 25
not_ok = self.jog_cmd(axis, axis_value, step_size, 50, 10, "millimeters")
elif units == "inch":
step_size = 1
not_ok = self.jog_cmd(axis, axis_value, step_size, 2, 0.4, "inches")
def jog_motor(self, motor):
not_ok = True
step_size = 5 # [deg]
name = self._joint_names[motor]
while not_ok:
joint_value = self.robot_joint_angles[motor]
not_ok = self.jog_cmd(name, joint_value, step_size, 10, 1, "degrees")
def jog_input(self):
jogging = True
while jogging:
mode = int(input("To move each motor enter 0\n\rTo move end effector enter 1\n\rOr 2 to quit\n\rHere: "))
if mode == 1:
axis = input("Enter Axis [x, y, z, pitch, roll] or q to quit jogging: ")
if axis not in self._axis_names and axis != "q":
rospy.logerr("Invalid Entry")
pass
elif axis == "q":
jogging = False
else:
self.jog_axis(axis)
elif mode == 0:
motor = input("Enter Motor number (0-4) or q to quit jogging: ")
if motor not in ['0','1','2','3','4','q']:
rospy.logerr("Invalid Entry")
pass
elif motor == 'q':
jogging = False
else:
self.jog_motor(int(motor))
else:
jogging = False
###########################################
#### Robot Info Callback ####
###########################################
def cartesian_position_callback(self, pos):
self.robot_xyz_pos = [pos.x, pos.y, pos.z, pos.a, pos.b]
def joint_angles_callback(self, joints):
self.robot_joint_angles = [joints.j0, joints.j1, joints.j2, joints.j3, joints.j4]
###########################################
#### Thread ####
###########################################
def service_check(self, name):
try:
rospy.wait_for_service(name, timeout=5.0)
except rospy.ROSException:
rospy.logerr('{} service is not responding'.format(name))
self.run()
def run(self):
try:
while not rospy.is_shutdown():
if self.trajectory is not None:
play = input("Press s to view path again in Rvis\n\rPress p to play current trajectory\n\r Press e to erase current trajectory\n\r Here: ")
if play == 'p':
response = self._srv_prx_list['play'](self.trajectory)
elif play == 'e':
self.trajectory = None
elif play == 's':
response = self._srv_prx_list['view']()
else:
rospy.logerr("You did not pick one of the options try again.")
pass
elif self._button_data is not None:
button_xyz = self.pick_button(self._button_data) #Returns selected button xyz in robot frame (only returns if robot can reach)
joint_angles = self.xyz_to_joint(button_xyz, x_offset=30.0) #Returns joint_angles at end position with 10 mm offset
# name = 'move_button'
# self.service_check(self._ns+'/moveit_cmd/'+name)
# response = self._srv_prx_list[name](list(joint_angles)) #Returns trajectory for button press
# self.trajectory = response.trajectory.joint_trajectory
# rospy.loginfo("Sending trajectory to Robot")
# response = self._srv_prx_list['play'](self.trajectory)
self.service_check('/dorna_real/robot_cmd/move_joints')
response = self._srv_prx_list['move_joints']('joint', DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, list(joint_angles))
if response.response:
rospy.loginfo("Arrived at button location")
else:
rospy.logwarn("Did not get good response")
response = input("Does robot hand seem to be aligned correctly? ")
if response == '' or response == 'y':
self.move_forward_test()
else:
rospy.logerr("Hand does not seem to be aligned correctly.")
response = input("Would you like to jog the robot into the correct place? ")
if response == '' or response == 'y':
self.jog_input()
response = input("Would you like to return robot to home position? ")
if response == '' or response == 'y':
self.home_pose()
response = input("Was demo successful, ready to quit and disconnect? ")
if response == '' or response == 'y':
self.home_pose()
rospy.loginfo("CONGRATS")
elif response == 'n':
self.button_demo()
else:
path = input("[Note] These are commands for Moveit Node make sure dorna_real_moveit.py has started.\n\rEnter h to move to Home position\n\rEnter e to move to Extended position\n\rEnter d to draw heart\n\rEnter r to move to random valid position\n\rEnter s to move in a straight line\n\rEnter b for buttons \n\rEnter j to jog joints\n\rEnter q to quit\n\rHere: ")
if path == "h":
name = "move_home"
self.service_check(self._ns+'/moveit_cmd/'+name)
response = self._srv_prx_list[name]()
self.trajectory = response.trajectory.joint_trajectory
elif path == "e":
name = 'move_extend'
self.service_check(self._ns+'/moveit_cmd/'+name)
response = self._srv_prx_list[name]()
self.trajectory = response.trajectory.joint_trajectory
elif path == "d":
name = 'move_heart'
self.service_check(self._ns+'/moveit_cmd/'+name)
response = self._srv_prx_list[name]()
self.trajectory = response.trajectory.joint_trajectory
elif path == "r":
name = 'move_rand'
self.service_check(self._ns+'/moveit_cmd/'+name)
response = self._srv_prx_list[name]()
self.trajectory = response.trajectory.joint_trajectory
elif path == "b":
self.button_demo()
elif path == 'j':
self.jog_input()
elif path == 't':
self.tuck_away()
elif path == 'f':
self.move_forward_test()
elif path == 'x':
self.move_xyz_test()
elif path == 'p':
self.pick_and_place(300, 0, -250, 0)
elif path == 'q':
rospy.loginfo("Quiting Program")
if self.connection_status:
response = self._srv_prx_list['quit']()
rospy.signal_shutdown('Quit')
except KeyboardInterrupt:
pass
def sent_pic_callback(self, data):
rospy.loginfo("Picture was sent to nn.")
self.home_pose()
def buttons_world_callback(self, data):
self._button_data = data
def button_demo(self):
self.tuck_away()
time.sleep(5) ##TODO: Fix this to be dependant on robot state
# name = 'pix_2_world'
# self.service_check(name)
# response = self._srv_prx_list[name]() #Starts button demo response is dictionary of buttons and xyz in camera frame
self._take_pic_pub.publish(Empty())
rospy.loginfo("Take_picture was requested.")
# self.home_pose()
def pick_and_place(self, xi, yi, xf, yf):
tool_head = 0
package_height = 150
safety = 50
above = package_height+tool_head+safety
child_frame = 'tag_2'
parent_frame = 'dorna_base'
pin = 2
response = self._srv_prx_list['digital_out'](pin, False)
response = self._srv_prx_list['move_xyzab']("joint", DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [xi,yi, 300, -90, 0])
time.sleep(2)
response = self._srv_prx_list['tf_transform']([child_frame, parent_frame])
location = response.transform
x = location[0]*1000.
y = location[1]*1000.
z = round(location[2]*1000., 1)
response = self._srv_prx_list['move_xyzab']("joint", DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [x+30, y, safety, -90, 0])
response = self._srv_prx_list['digital_out'](pin, True)
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.RELATIVE.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [0, 0, -60, 0, 0])
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.RELATIVE.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [0,0, 100, 0, 0])
response = self._srv_prx_list['move_xyzab']("joint", DornaMovement.GLOBAL.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [0, -300, 100, -90, 0])
response = self._srv_prx_list['move_xyzab']("joint", DornaMovement.RELATIVE.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [xf, 300, 0, 0, 0])
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.RELATIVE.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [0, 0, -100, 0, 0])
response = self._srv_prx_list['digital_out'](pin, False)
time.sleep(2)
response = self._srv_prx_list['move_xyzab']("line", DornaMovement.RELATIVE.value, DornaMovement.DEFAULT_XYZ_SPEED.value, [0, 0, safety, 0, 0])
response = self.home_pose()
if __name__=="__main__":
rospy.init_node('human_ros')
rospy.loginfo('Starting node "human_ros"')
human = HumanRos()
try:
human.init()
except (rospy.service.ServiceException, KeyboardInterrupt):
rospy.logerr('Check if the robot node has started')
raise
try:
runner = Thread(target=human.run)
runner.start()
rospy.spin()
runner.join()
except (rospy.ROSInterruptException, KeyboardInterrupt):
raise
# finally:
# human.quit
|
#------------------
# Script to calculate EKE from ERA-interim files (currently 6-hourly)
# Taken from ipython notebook
#
# Written by Rachel White (rachel.white@cantab.net) on 15-Dec-2016
#-------------------
import os, errno
import netCDF4
import numpy as np
import datetime as dt
import pandas as pd
import xray as xr
#import Ngl
#import math
from scipy import stats
from rhwhitepackages.readwrite import shiftlons
from rhwhitepackages.readwrite import xrayOpen
from rhwhitepackages.stats import regressmaps
from rhwhitepackages.readwrite import getdenfilename
# plotting
import xray.plot as xplt
uvindir = '/home/disk/eos4/rachel/Obs/ERAI/uv'
startyr = 1998
endyr = 2015
for iyear in range(startyr,endyr):
uvfile = xr.open_mfdataset(uvindir + '/interim_daily_' + str(iyear) +
'*.grb')
ulev, vlev = uvfile['u'], uvfile['v']
udash = ulev - ulev.mean(dim='longitude')
vdash = vlev - vlev.mean(dim='longitude')
EKEall = 0.5 * ((ulev * ulev) + (vlev * vlev))
EKEyears = EKEall.groupby('time.month').sum(dim='time')
EKEyears = EKEyears.rename({'month':'time'})
EKEyears = EKEyears.rename({'latitude':'lat'})
EKEyears = EKEyears.rename({'longitude':'lon'})
EKEds = xr.Dataset({'EKE':EKEyears})
EKEds.to_netcdf(uvindir + '/EKE_' + str(iyear) + '.nc',mode='w')
uvindir = '/home/disk/eos4/rachel/Obs/ERAI/uv'
startyr = 1998
endyr = 2015
for iyear in range(startyr,endyr):
for imonth in range(0,12):
uvfile = xr.open_mfdataset(uvindir + '/interim_daily_' + str(iyear) +
'{:02d}'.format(imonth+1) + '.grb')
ulev, vlev = uvfile['u'], uvfile['v']
udash = ulev - ulev.mean(dim='time')
vdash = vlev - vlev.mean(dim='time')
EKEall = 0.5 * ((udash * udash) + (vdash * vdash))
EKEmonth = EKEall.mean(dim='time')
EKEmonth = EKEmonth.rename({'latitude':'lat'})
EKEmonth = EKEmonth.rename({'longitude':'lon'})
EKEtime = ulev.time
s = pd.Series(EKEtime)
smin = pd.to_datetime(s.min(),unit='ms')
averagetime = (s-s.min()).astype('m8[ms]').mean()
averagedate = smin + dt.timedelta(milliseconds=averagetime)
EKEds = xr.Dataset({'EKE':EKEmonth},{'time':averagedate})
EKEds.to_netcdf(uvindir + '/EKE_' + str(iyear) +
'{:02d}'.format(imonth+1) + '.nc',mode='w')
|
from itertools import chain
import pygame
import sys
import time
import classes.lemmings
from functions.level_utilities import level_load_save, level_save, level_create
from global_variables import BLOCK_DEFAULT_SIZE, LEVEL_DEATH_FRAMES, LEVEL_FRAME_TIME,\
LEVEL_BACKGROUND_COLOR, LEVEL_TEXT_COLOR
def level_run(sound_arg, block_size=None, level_slot=None, save_slot=None):
from user_interface.main_menu import menu_main
from user_interface.level_end_screen import end_screen
# Level startup
pygame.init()
# Either loading or creating lemmings and objects, reading stats and creating interface
if save_slot is None:
lemmings, objects_dictionarized, stats, interface = level_create(level_slot, new_block_size=block_size)
else:
lemmings, objects_dictionarized, stats, interface = level_load_save(save_slot, new_block_size=block_size)
# Setting variables based on stats entries
if block_size is None:
block_size = stats["Block_size"]
else:
# If main level-running procedure had block size provided then we save that
stats["Block_size"] = block_size
lemmings_spawn_number = stats["Lemmings_spawn_number"] # How many lemmings are to spawn by each entrance
lemmings_spawn_rate = stats["Lemmings_spawn_rate"] # How many frames there will be between next spawns
# Based on the width we set level window size and then create a screen
level_size = stats["Level_size"]
screen = pygame.display.set_mode(level_size) # setting screen of the globally set size
# Settings for the clock's appearance
text_font = pygame.font.Font(None, block_size)
text_color = LEVEL_TEXT_COLOR
method_to_use = None # Variable used to change types of lemmings
pause = False # Flag which can be changed by pressing right button
while True:
if not pause:
# Moving clock forward and reducing the timer
dt = interface["Clock"].tick(1 / LEVEL_FRAME_TIME) / 1000
stats["Timer"] -= dt
# Swapping to a new frame with small delay (by default 200 frames per second)
stats["Frames_elapsed"] += 1
# time.sleep(LEVEL_FRAME_TIME * (BLOCK_DEFAULT_SIZE / block_size))
# Spawning lemmings from each entrance
for obj_entrance in objects_dictionarized["Entrance"]:
obj_entrance.spawn(lemmings, spawn_rate=lemmings_spawn_rate, spawn_number=lemmings_spawn_number)
# For each lemming...
for lem in lemmings:
# Check if it's dead
if lem.dead > 0:
# If it has been dead for a while, we finally remove him
if lem.dead > LEVEL_DEATH_FRAMES:
lemmings.remove(lem)
# It's dead attribute serves as a counter for displaying the dead state
lem.dead += 1
continue
# We collide the lemming with each object of each type
lem.collision_objects(objects_dictionarized)
# We don't let the lemming leave escape the window
lem.boundary_check(level_size)
# Removing lemming if it was marked for removal (e.g. lemming made it to the exit, got upgraded)
if lem.remove == 1:
lemmings.remove(lem)
continue
# Moving the lemming
lem.move()
# Filling background
screen.fill(LEVEL_BACKGROUND_COLOR)
# Drawing lemmings and all objects (interface included)
for obj in lemmings + list(chain.from_iterable(objects_dictionarized.values())):
screen.blit(obj.image, obj.rect)
# For interface buttons we additionally:
for button in objects_dictionarized["Buttons"]:
# Display the button's second image which represents the lemming type
if button.image_name2 is not None:
screen.blit(button.image2, button.rect2)
# Display the remaining charges for given lemming type
screen.blit(*button.charges_to_text(stats, pygame.font.Font(None, 2 * block_size), (255, 0, 0)))
# Drawing clock
clock_text = text_font.render(interface["Time_left"]+str(round(stats["Timer"], 1)), True, text_color)
screen.blit(clock_text, interface["Clock_position"])
# Changing display to reflect the changes
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# For each click...
if event.type == pygame.MOUSEBUTTONDOWN:
# Reading position of the click
click_position = pygame.mouse.get_pos()
# Exit to main menu button
if objects_dictionarized["MenuButtons"][0].rect.collidepoint(click_position):
menu_main(sound_arg)
# Pause button
if objects_dictionarized["MenuButtons"][1].rect.collidepoint(click_position):
pause = not pause
# Save1, Save2 & Save3 buttons
for i in [2, 3, 4]:
if objects_dictionarized["MenuButtons"][i].rect.collidepoint(click_position):
level_save(5 - i, lemmings, objects_dictionarized, stats, screen)
# For each type change button which was clicked on
for button in objects_dictionarized["Buttons"]:
if button.rect.collidepoint(click_position):
# If there is no method then we create new method
if (stats["Class_list"][button.class_name] > 0 and
method_to_use is None):
method_to_use = getattr(classes.lemmings, button.class_name)
# efekt zaznaczenia
break
# If there is a method "deselect" it
else:
method_to_use = None
# efekt odanzcaenia
# For each lemming that was clicked we either apply the method or execute lemming's on-click method
for lem in lemmings:
# Type application
if method_to_use is not None:
if lem.rect.collidepoint(click_position):
# Changing lemming type to the selected one and adding it back to the lemming pool
lemmings.append(method_to_use(lemming_arg=lem, objects_dictionarized=objects_dictionarized))
# Decreasing the number of charges
stats["Class_list"][method_to_use.__name__] -= 1
# "Deselecting"the method
method_to_use = None
# wyłączyć efekt kliknięcia
# Leaving the lemming loop to ensure that only one lemming was changed
break
else:
# For special types it's going to remove the type and revert lemming back to normal
lem.on_click(click_position, objects_dictionarized, lemmings)
# Level end check, proceed only if each entrance spawned maximum number of lemmings
if stats["Frames_elapsed"] > lemmings_spawn_rate * (lemmings_spawn_number+1):
# End of time or end of living lemmings
if max(stats["Timer"], 0) == 0 or lemmings == []:
# Count how many lemmings were spawned
stats["Lemmings_spawned"] = lemmings_spawn_number * len(objects_dictionarized["Entrance"])
# Count how many lemmings made it to exits
for obj_exit in objects_dictionarized["Exit"]:
stats["Lemmings_exit"] += obj_exit.lemming_exit_number
end_screen(stats, sound_arg)
|
from django.db import models
from django_resized import ResizedImageField
# Create your models here.
def upload_directory(instance,filename):
ext = filename.split('.')[-1]
return f'user_{instance.id}/profile_picture.'+ext
class User(models.Model):
username = models.CharField(max_length=100,unique=True)
email = models.EmailField(max_length=100,unique=True)
password = models.CharField(max_length=100)
profile_pic = ResizedImageField(size=[500,500],upload_to=upload_directory,default='default.png')
class Meta:
verbose_name_plural = "User"
def __str__(self): return self.username
class Message(models.Model):
sender = models.ForeignKey(User,on_delete=models.CASCADE,related_name='%(class)s_sender_requests_created')
receiver = models.ForeignKey(User,on_delete=models.CASCADE,related_name='%(class)s_receiver_requests_created')
content = models.CharField(max_length=5000)
datetime = models.DateTimeField()
class Meta:
ordering = ['datetime']
def __str__(self):return str(self.id)
class Contact(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_user_requests_created')
contact = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_contact_requests_created')
favourite = models.BooleanField(default=False)
class Meta:
ordering = ['contact']
def __str__(self): return self.user.username + '-' + self.contact.username
def story_upload_directory(instance,filename):
return f'story/{instance.id}/{filename}'
class Story(models.Model):
user = models.ForeignKey(User,unique=True,on_delete=models.CASCADE, related_name='%(class)s_user_requests_created')
story = ResizedImageField(size=[700,700],upload_to=story_upload_directory)
datetime = models.DateTimeField()
def __str__(self): return self.user.username
|
# -*- coding: utf-8 -*-
from icemac.addressbook.i18n import _
import datetime
import icemac.addressbook.browser.base
import icemac.addressbook.interfaces
import icemac.truncatetext
import z3c.pagelet.browser
import z3c.table.batch
import z3c.table.column
import z3c.table.table
import zope.i18n
END_OF_DATE = datetime.date(datetime.MAXYEAR, 12, 31)
END_OF_DATETIME = datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59)
# Columns
class BaseColumn(z3c.table.column.Column):
"""Column knowing how to handle entities and fields in the address book.
It adapts the `item` to the defined entity and gets the value from the
specified field.
"""
# CAUTION: This column needs the following fields to be set:
entity = NotImplemented # referenced entity as object
field = NotImplemented # referenced field on entity as object
defaultValue = u'' # Value for display when there is no value.
def getRawValue(self, item):
"""Compute the value, which can be None."""
schema_field = icemac.addressbook.entities.get_bound_schema_field(
item, self.entity, self.field)
# Returning the value of the bound object as it might differ from item:
return schema_field.get(schema_field.context)
def getValue(self, item):
"""Compute the value, mostly ready for display."""
obj = self.getRawValue(item)
if obj is None:
return self.defaultValue
return obj
renderCell = getValue
class DateTimeColumn(z3c.table.column.FormatterColumn,
BaseColumn):
"""Column which is sortable even with `None` values in it."""
maxValue = END_OF_DATETIME
def renderCell(self, item):
value = self.getRawValue(item)
if value:
return self.getFormatter().format(value)
return self.defaultValue
def getSortKey(self, item):
# We use the isoformat as sort key, so comparison does not break if
# we mix timezone naive and timezone aware datetimes. And yes, we
# know that this might produce some glitches in the sort order but
# it is better than an HTTP-500 and better than trying to guess
# timezone information.
key = self.getRawValue(item)
if key is None:
# empty date fields should be sorted to the end of the list
key = self.maxValue
return key.isoformat()
class DateColumn(DateTimeColumn):
"""DateColumn which is able to sort even `None` values."""
formatterCategory = 'date'
maxValue = END_OF_DATE
class LinkColumn(icemac.addressbook.browser.base.BaseView,
z3c.table.column.LinkColumn):
"""LinkColumn which uses address book's URL computation method."""
def getLinkURL(self, item):
return self.url(item, self.linkName)
class TitleLinkColumn(LinkColumn):
"""Column containing the title of an object and a link to the object."""
header = _(u'Name')
weight = 2
def getSortKey(self, item):
return icemac.addressbook.interfaces.ITitle(item).lower()
def getLinkContent(self, item):
return icemac.addressbook.interfaces.ITitle(item)
class DeleteLinkColumn(LinkColumn):
"""Column containing the a link to delete the object."""
header = _(u'')
weight = 100
linkContent = _(u'Delete')
linkName = 'delete.html'
class TruncatedContentColumn(z3c.table.column.GetAttrColumn):
"""Column which truncates its content to `length` characters."""
length = 20 # number of characters to display
attrName = None # attribute to access
ellipsis = u'…' # ellipsis sign
defaultValue = u'' # default value when there is no value
def getValue(self, obj):
value = super(TruncatedContentColumn, self).getValue(obj)
if value is None:
return self.defaultValue
result = icemac.truncatetext.truncate(
value, self.length, self.ellipsis)
return result
class SourceColumn(z3c.table.column.GetAttrColumn):
"""GetAttrColumn where attr is a value or iterable out of a source."""
attrName = NotImplemented
soure = NotImplemented # source the values are taken from
def getSortKey(self, item):
# Sort case insensitive:
return super(SourceColumn, self).getSortKey(item).lower()
def getValue(self, obj):
values = super(SourceColumn, self).getValue(obj)
titles = [zope.i18n.translate(self.source.factory.getTitle(x),
context=self.request)
for x in values]
return u', '.join(sorted(titles, key=lambda x: x.lower()))
class KeywordsColumn(SourceColumn):
"""SourceColumn where attr is an iterable of keywords."""
attrName = 'keywords'
source = icemac.addressbook.interfaces.keyword_source
# Tables
class Table(z3c.table.table.Table):
"""Table which supports a no-rows-found message."""
title = None # used by the breadrumbs
cssClassEven = u'table-even-row'
cssClassOdd = u'table-odd-row'
startBatchingAt = 1000000
no_rows_message = u'' # Set at subclass.
def renderTable(self):
if self.rows:
return super(Table, self).renderTable()
return self.no_rows_message
class PageletTable(z3c.pagelet.browser.BrowserPagelet, Table):
"""Render the table in a pagelet which also has a template.
When no template is required the `Table` class can be used.
"""
def __init__(self, *args, **kw):
super(PageletTable, self).__init__(*args, **kw)
Table.__init__(self, *args, **kw)
update = Table.update
# Batching
class NiceBatchProvider(z3c.table.batch.BatchProvider):
"""A batch provider with a nicer spacer."""
batchSpacer = u'…'
|
__author__ = "rolevin"
from typing import List, Optional
from synapse.ml.cyber.utils.spark_utils import DataFrameUtils, ExplainBuilder
from pyspark.ml import Transformer
from pyspark.ml.param.shared import Param, Params
from pyspark.sql import DataFrame, functions as f, types as t
import random
class ComplementAccessTransformer(Transformer):
partitionKey = Param(
Params._dummy(), "partitionKey", "The name of the partition_key field name"
)
indexedColNamesArr = Param(
Params._dummy(),
"indexedColNamesArr",
"The name of the fields to use to generate the complement set from",
)
complementsetFactor = Param(
Params._dummy(),
"complementsetFactor",
"The estimated average size of the complement set to generate",
)
"""
Given a dataframe it returns a new dataframe with access patterns sampled from
the set of possible access patterns which did not occur in the given dataframe
(i.e., it returns a sample from the complement set).
"""
def __init__(
self,
partition_key: Optional[str],
indexed_col_names_arr: List[str],
complementset_factor: int,
):
super().__init__()
# we assume here that all indices of the columns are continuous within their partition_key
ExplainBuilder.build(
self,
partitionKey=partition_key,
indexedColNamesArr=indexed_col_names_arr,
complementsetFactor=complementset_factor,
)
@staticmethod
def _min_index_token(curr_col_name: str) -> str:
return "__min_{0}__".format(curr_col_name)
@staticmethod
def _max_index_token(curr_col_name: str) -> str:
return "__max_{0}__".format(curr_col_name)
@staticmethod
def _tuple_token() -> str:
return "__tuple__"
def _transform(self, df: DataFrame) -> DataFrame:
"""generate a dataframe which consists of a sample from the complement set
Parameters
----------
df: a given dataframe containing the columns in 'indexed_col_names_arr'
Returns
-------
dataframe which which consists of a sample from the complement set
"""
complementset_factor = self.complementset_factor
if complementset_factor == 0:
return DataFrameUtils.make_empty(df)
the_partition_key = self.partition_key
indexed_col_names_arr = self.indexed_col_names_arr
if the_partition_key is None:
partition_key = "__dummy_partition_key__"
df = df.withColumn(partition_key, f.lit(0)).cache()
else:
partition_key = the_partition_key
df = df.cache()
limits_dfs = [
df.select(partition_key, curr_col_name)
.distinct()
.groupBy(partition_key)
.agg(
f.min(curr_col_name).alias(
ComplementAccessTransformer._min_index_token(curr_col_name)
),
f.max(curr_col_name).alias(
ComplementAccessTransformer._max_index_token(curr_col_name)
),
)
.orderBy(partition_key)
for curr_col_name in indexed_col_names_arr
]
def make_randint(factor):
schema = t.ArrayType(
t.StructType(
[
t.StructField(curr_col_name, t.IntegerType())
for curr_col_name in indexed_col_names_arr
]
)
)
@f.udf(schema)
def randint(min_index_arr, max_index_arr):
return [
tuple(
[
random.randint(min_index, max_index)
for min_index, max_index in zip(
min_index_arr, max_index_arr
)
]
)
for _ in range(factor)
]
return randint
pre_complement_candidates_df = df.cache()
for limits_df in limits_dfs:
pre_complement_candidates_df = pre_complement_candidates_df.join(
limits_df, partition_key
).cache()
cols = [f.col(partition_key)] + [
f.col(curr_col_name) for curr_col_name in indexed_col_names_arr
]
randint = make_randint(complementset_factor)
complement_candidates_df = (
pre_complement_candidates_df.withColumn(
ComplementAccessTransformer._tuple_token(),
f.explode(
randint(
f.array(
[
f.col(
ComplementAccessTransformer._min_index_token(
curr_col_name
)
)
for curr_col_name in indexed_col_names_arr
]
),
f.array(
[
f.col(
ComplementAccessTransformer._max_index_token(
curr_col_name
)
)
for curr_col_name in indexed_col_names_arr
]
),
)
),
)
.select(
*(
[partition_key]
+ [
f.col(
"{0}.{1}".format(
ComplementAccessTransformer._tuple_token(),
curr_col_name,
)
).alias(curr_col_name)
for curr_col_name in indexed_col_names_arr
]
)
)
.distinct()
.orderBy(*cols)
)
tuples_df = df.select(*cols).distinct().orderBy(*cols)
res_df = (
complement_candidates_df.join(
tuples_df, [partition_key] + indexed_col_names_arr, how="left_anti"
)
.select(*cols)
.orderBy(*cols)
)
if the_partition_key is None:
res_df = res_df.drop(partition_key)
return res_df
|
import sys, os
import numpy as np
import statsmodels.stats.multitest
if (len(sys.argv) < 4):
print("python3 fdrPASTAAResult.py PASTAA_result, outputFile, pvalue")
else:
PASTAA_result = sys.argv[1]
output_ = sys.argv[2]
p = float(sys.argv[3])
TFs = []
pvalues = []
#read result
with open(PASTAA_result, 'r') as result:
for line in result:
line = line.strip().split('\t')
TFs.append(line[0])
if float(line[1]) > 1:
pvalues.append(1.0)
else:
pvalues.append(float(line[1]))
#determine fdr
rec, cor_pvalue = statsmodels.stats.multitest.fdrcorrection(pvals = pvalues, alpha = p,is_sorted = True)
# print(rec)
# print(cor_pvalue)
counter = 0
with open(output_, 'w') as o:
o.write("TF\tpvalue(fdr correction)\n")
for i in rec:
if i == True:
o.write(TFs[counter] + '\t' + str(cor_pvalue[counter]) + '\n')
counter+=1
|
# vim:set ts=4 sw=4 et:
'''
ImageName
---------
'''
import logging
import re
try:
from urllib.parse import parse_qs, urlsplit
except ImportError:
# python2
from urlparse import parse_qs, urlsplit
from ..action_mapper import Action
from ..exceptions import (ConfigurationException, InvalidRequestException,
UnauthorizedException)
from .base import BaseCheck
def query_parameter(payload, **kwargs):
"""Extract image name from the query parameters
"""
parameter_name = kwargs.get('parameter_name', 't')
query = parse_qs(urlsplit(payload.uri).query)
logging.debug('Query parameters: %s', query)
if parameter_name not in query:
raise InvalidRequestException(
'Image name not found in query parameters'
)
response = []
for image in query[parameter_name]:
if ':' in image:
name, tag = image.split(':')
else:
name = image
tag = 'latest'
if len(query[parameter_name]) == 1:
return (name, tag)
response.append((name, tag))
return response
def query_parameter_compound(payload, **kwargs):
"""Extract image name from the query parameters
"""
parameter_name = kwargs.get('parameter_name', 'repo')
parameter_tag = kwargs.get('parameter_tag', 'tag')
query = parse_qs(urlsplit(payload.uri).query)
if parameter_name not in query:
raise InvalidRequestException(
'Image name not found in query parameters'
)
name = query[parameter_name][0]
if parameter_tag in query:
tag = query[parameter_tag][0]
else:
tag = 'latest'
return (name, tag)
def path_and_query_parameter(payload, **kwargs):
"""Extract image name from path, tag from the query parameters
"""
# Extract name
path = urlsplit(payload.uri).path
match = re.match(
r"^/v\d.\d{2}/[^/]+/([a-zA-Z0-9/:_.-]+)(/(json|tag|history|push|get))+/?",
path)
name = match.group(1)
# Extract tag
parameter_tag = kwargs.get('parameter_tag', 'tag')
query = parse_qs(urlsplit(payload.uri).query)
if parameter_tag not in query:
raise InvalidRequestException(
'Image tag not found in query parameters'
)
tag = query[parameter_tag][0]
return (name, tag)
#pylint: disable=unused-argument
def path_parameter(payload, **kwargs):
"""Extract container name from the path parameters
"""
path = urlsplit(payload.uri).path
if payload.method == "DELETE":
match = re.match(r"^/v\d.\d{2}/[^/]+/([a-zA-Z0-9/:_.-]+)/?", path)
else:
match = re.match(
r"^/v\d.\d{2}/[^/]+/([a-zA-Z0-9/:_.-]+)(/(json|tag|history|push|get))+/?",
path)
if ':' in match.group(1):
name, tag = match.group(1).split(':')
else:
name = match.group(1)
tag = 'latest'
return (name, tag)
def source_and_dest(payload, **kwargs):
"""Extract image name from the path and query parameters
"""
(src_name, src_tag) = path_parameter(payload, **kwargs)
(dst_name, dst_tag) = query_parameter_compound(payload, **kwargs)
return [(src_name, src_tag), (dst_name, dst_tag)]
FUNCTION_MAP = {
# 'imagesList': query_filter_parameter, # ignored right now
'imagesBuild': (query_parameter, {"parameter_name": "t"}),
'imagesCreate': [
(query_parameter, {"parameter_name": "repo"}), # import
(query_parameter_compound, {"parameter_name": "fromImage", "parameter_tag": "tag"}), # pull
],
'imagesInspect': path_parameter,
'imagesHistory': path_parameter,
'imagesPush': path_and_query_parameter,
'imagesTag': [
(source_and_dest, {"parameter_name": "repo", "parameter_tag": "tag"}),
],
'imagesRemove': path_parameter,
'imagesCommit': [
(query_parameter_compound, {"parameter_name": "repo", "parameter_tag": "tag"}),
],
'imagesExport': path_parameter,
'imagesExportMultiple': (query_parameter, {"parameter_name": "names"}),
}
def get_action_name(payload, action_name=None):
"""Return the action_name for the Payload URI
:param Payload payload: The current payload
:return: The action_name
:rtype: str or None
"""
if action_name is None:
action_name = Action(method=payload.method, query=payload.uri).name
if action_name not in FUNCTION_MAP:
return None
return action_name
class ImageName(BaseCheck):
"""A module that checks the container name"""
def run(self, args, payload):
"""Run the module checks.
Validate `container name` against defined rules.
Raise :exc:`UnauthorizedException` when the container name doesn't
respect the rules.
When a list is given, Exception is raised only if all rules fails.
If no name was forced for the container creation, then Exception is
raised.
Rules examples:
.. code-block:: yaml
rules: "^myproject-.*$"
.. code-block:: yaml
rules: "^$USER-.*$"
.. code-block:: yaml
rules: "^only_this_container_name$"
Or a list:
.. code-block:: yaml
rules: ["^only_this_container_name$", "^$USER-.*$"]
The container name used on Request is contained in the uri query
parameters as 'name'.
:param args: The module arguments from the config
:type args: list or dict or str or None
:param payload: The payload of the current request.
:type payload: :class:`docker_leash.payload.Payload`
"""
if not args:
raise ConfigurationException(
'Incomplete "ImageName" module configuration'
)
name = get_action_name(payload)
if name is None:
return
names = self._get_name(payload, name)
rules = args if isinstance(args, list) else [args]
rules = self.replace_user(rules, payload)
logging.debug('Rules: %s', rules)
for rule in rules:
for image_name in names:
if not re.match(rule, image_name[0]):
raise UnauthorizedException(
'Image name not authorized'
)
@staticmethod
def _get_name(payload, action_name=None):
"""Return the name of the container
:param Payload payload: The current payload
:return: The container name
:rtype: str or None
"""
def call(function, payload):
"""Called function is optional
"""
if isinstance(function, tuple):
function_name = function[0]
kwargs = function[1]
else:
function_name = function
kwargs = {}
return function_name(payload, **kwargs)
action_name = get_action_name(payload, action_name)
function = FUNCTION_MAP[action_name]
# Multiple rules
if isinstance(function, list):
for item in function:
try:
return call(item, payload)
except InvalidRequestException:
pass
raise InvalidRequestException(
'Action name not found: %s' % action_name
)
# Single rule
return call(function, payload)
|
import numpy as np
import pria_lifechem
import os
from pria_lifechem.function import *
from pria_lifechem.models.CallBacks import *
from pria_lifechem.models.deep_classification import *
from pria_lifechem.models.deep_regression import *
task_list = ['cross_validation_Keck_Pria_AS_Retest', 'cross_validation_Keck_FP', 'cross_validation_RMI']
def clean(list_a, list_b):
neo_a, neo_b = [], []
for a,b in zip(list_a, list_b):
if np.isnan(a) or np.isnan(b):
continue
else:
neo_a.append(a)
neo_b.append(b)
neo_a = np.array(neo_a)
neo_b = np.array(neo_b)
return neo_a, neo_b
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', dest="model_name", action="store", required=True)
parser.add_argument('--task', dest="task", action="store", required=True)
parser.add_argument('--fold_idx', dest="fold_idx", action="store", type=int, required=True)
given_args = parser.parse_args()
model_name = given_args.model_name
task = given_args.task
fold_idx = given_args.fold_idx
data = np.load('{}/fold_{}.npz'.format(model_name, fold_idx))
task_index = task_list.index(task)
print task, task_index
y_train = reshape_data_into_2_dim(data['y_train'][:, task_index])
y_val = reshape_data_into_2_dim(data['y_val'][:, task_index])
y_test = reshape_data_into_2_dim(data['y_test'][:, task_index])
y_pred_on_train = reshape_data_into_2_dim(data['y_pred_on_train'][:, task_index])
y_pred_on_val = reshape_data_into_2_dim(data['y_pred_on_val'][:, task_index])
y_pred_on_test = reshape_data_into_2_dim(data['y_pred_on_test'][:, task_index])
print y_train.shape, '\t', y_pred_on_train.shape, '\t', y_test.shape, '\t', y_pred_on_test.shape
y_train, y_pred_on_train = clean(y_train, y_pred_on_train)
y_val, y_pred_on_val = clean(y_val, y_pred_on_val)
y_test, y_pred_on_test = clean(y_test, y_pred_on_test)
print y_train.shape, '\t', y_pred_on_train.shape, '\t', y_test.shape, '\t', y_pred_on_test.shape
print('train precision: {}'.format(precision_auc_single(y_train, y_pred_on_train)))
print('train roc: {}'.format(roc_auc_single(y_train, y_pred_on_train)))
print('train bedroc: {}'.format(bedroc_auc_single(y_train, y_pred_on_train)))
print
print('validation precision: {}'.format(precision_auc_single(y_val, y_pred_on_val)))
print('validation roc: {}'.format(roc_auc_single(y_val, y_pred_on_val)))
print('validation bedroc: {}'.format(bedroc_auc_single(y_val, y_pred_on_val)))
print
print('test precision: {}'.format(precision_auc_single(y_test, y_pred_on_test)))
print('test roc: {}'.format(roc_auc_single(y_test, y_pred_on_test)))
print('test bedroc: {}'.format(bedroc_auc_single(y_test, y_pred_on_test)))
print
for EF_ratio in [0.02, 0.01, 0.0015, 0.001]:
n_actives, ef, ef_max = enrichment_factor_single(y_test, y_pred_on_test, EF_ratio)
nef = ef / ef_max
print('ratio: {}, EF: {},\tactive: {}'.format(EF_ratio, ef, n_actives))
print('ratio: {}, NEF: {}'.format(EF_ratio, nef))
|
#!/usr/bin/python3
#‑∗‑ coding: utf‑8 ‑∗‑
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.tensorboard as tensorboard
from random import choices
from collections import deque
class Agent:
""" Initialize agent.
Args:
nb_actions (int): number of actions available to the agent
parameters (dict): contains all the parameters needed
"""
def __init__(self, nb_actions, parameters):
self._set_parameters(parameters)
self.nb_actions = nb_actions
self.primary = DQN().to(self.device)
self.target = DQN().to(self.device)
self.optimizer = optim.SGD(self.primary.parameters(), lr=self.alpha)
self.memory = ExperienceReplayBuffer(size=self.memory_size)
self.rewards = deque([0], maxlen=100)
self.eval_mode = False
self.step_count = 0
if self.tensorboard_log:
self.writer = tensorboard.SummaryWriter()
def _set_parameters(self, configuration):
self.__dict__ = {k:v for (k,v) in configuration.items()}
def _reset_target(self):
""" Update the target network.
"""
self.target.load_state_dict(self.primary.state_dict())
def _update_epsilon(self):
""" Update the epsilon value.
"""
self.epsilon = max(self.epsilon * self.epsilon_decay_factor, self.min_epsilon)
def _normalize_reward(self, rewards):
""" Normalize rewards.
"""
return rewards / 100
def load(self, path):
""" Load weights at the path location.
Args:
path (str): Path where the weights will be loaded.
"""
self.primary.load_state_dict(torch.load(path))
self._reset_target()
def save(self, path):
""" Save weights at the path location.
Args:
path (str): Path where the weights will be saved.
"""
torch.save(self.primary.state_dict(), path)
def log(self, name, value):
""" Log the value in function of steps.
Args:
name (str): Variable's name.
value (float): Value to store.
"""
if self.tensorboard_log:
self.writer.add_scalar(name, value, self.step_count)
def eval(self):
""" Turn off exploration and learning.
"""
self.eval_mode = True
def train(self):
""" Turn on exploration and learning.
"""
self.eval_mode = False
@torch.no_grad()
def select_action(self, state) -> int:
""" Given the state, select an action.
Args:
state (obj): the current state of the environment.
Returns:
action (int): an integer compatible with the task's action space.
"""
if not self.eval_mode and torch.rand(1).item() > self.epsilon:
y_pred = self.primary(torch.from_numpy(state).to(self.device))
else:
y_pred = torch.rand(self.nb_actions)
action = torch.argmax(y_pred).item()
return action
def step(self, state, action, reward, next_state, done) -> None:
""" Update the agent's knowledge, using the most recently sampled tuple.
Args:
state (obj): the previous state of the environment
action (int): the agent's previous choice of action
reward (float): last reward received
next_state (obj): the current state of the environment
done (bool): whether the episode is complete (True or False)
"""
# Do not save step or learn in eval mode
if self.eval_mode:
return
# Push experience to buffer
self.memory.push(state, action, reward, next_state, done)
self.rewards[-1] += reward
if len(self.memory) >= self.batch_size:
self.learn()
if self.step_count % self.update_step == 0:
self._reset_target()
if done:
self._update_epsilon()
self.log('Epsilon', self.epsilon)
if len(self.memory) >= self.batch_size:
self.log('Reward', sum(self.rewards) / len(self.rewards))
self.rewards.append(0)
self.step_count += 1
def learn(self):
""" Update the agent's knowledge, using replay buffer.
"""
# Create random batch of self.batch_size steps
states, actions, rewards, next_states, dones = self.memory.batch(batch_size=self.batch_size)
# Create torch tensors
states = torch.tensor(states).to(self.device)
actions = torch.tensor(actions).to(self.device)
rewards = torch.tensor(rewards).float().to(self.device)
next_states = torch.tensor(next_states).to(self.device)
dones = torch.tensor(dones).to(self.device)
# Normalize rewards
rewards = self._normalize_reward(rewards)
# Calculate target reward and detach it from the graph
# Avoid gradient descend in the target network
next_state_value = self.target(next_states).max(1)[0].detach()
# Remove temporal reward if it's the last step
next_state_value[dones] = 0.0
# Calculate target reward
target_reward = rewards + (self.gamma * next_state_value)
# Actual action values state
states_action_values = self.primary(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)
# MSE Error:
error = torch.mean(torch.pow(states_action_values - target_reward, 2))
self.log('Loss', error)
# Optimize model
self.optimizer.zero_grad()
error.backward()
self.optimizer.step()
class ExperienceReplayBuffer():
""" Initialize ExperienceReplayBuffer.
Args:
size (int): Replay buffer's size.
"""
def __init__(self, size=100):
self.buffer = deque(maxlen=size)
def __len__(self):
return len(self.buffer)
def push(self, state, action, reward, next_state, done):
self.buffer.append((state, action, reward, next_state, done))
def batch(self, batch_size=32):
steps = choices(self.buffer, k=batch_size)
states, actions, rewards, next_states, dones = zip(*steps)
return states, actions, rewards, next_states, dones
class DQN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(8, 64)
self.fc2 = nn.Linear(64, 48)
self.fc3 = nn.Linear(48, 4)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
from django.contrib import admin
from .models import RuleSet, ProfilingRecord
class RuleSetAdmin(admin.ModelAdmin):
list_display = (
'enabled',
'uri_regex',
'user_filter_type',
'user_group_filter',
)
class ProfilingRecordAdmin(admin.ModelAdmin):
list_display = (
'start_ts',
'user',
'session_key',
'remote_addr',
'http_method',
'request_uri',
'view_func_name',
'response_status_code',
'duration',
)
admin.site.register(RuleSet, RuleSetAdmin)
admin.site.register(ProfilingRecord, ProfilingRecordAdmin)
|
import sys
import os
#sys.path.append(os.path.abspath(sys.path[0]+"/src/streamline/model_selection/models/"))
from ..AbstractRegressorPredictiveModel import AbstractRegressorPredictiveModel
from sklearn.linear_model import Lasso
class LassoRegressorPredictiveModel(AbstractRegressorPredictiveModel):
#properties
#constructor
def __init__(self, X, y, lasso_params, nfolds=3, n_jobs=1, scoring=None,random_grid=False, n_iter=10, verbose=True):
self._code="lasso"
if verbose:
print ("Constructed LassoRegressorPredictiveModel: " +self._code)
AbstractRegressorPredictiveModel.__init__(self, "regressor", X, y, lasso_params, nfolds, n_jobs, scoring,random_grid, n_iter, verbose)
self._model = self.constructRegressor(Lasso(), self._random_grid)
#methods
def execute(self):
pass
|
from typing import Callable
import schedule
from . import constants as c
from .gpsd import GPSD
from .logging import get_logger
from .server import Server
logger = get_logger(__name__)
def schedule_job(job: Callable, seconds: int) -> None:
"""Clear a previously running job, if exists, and launch it again"""
schedule.clear(job.__name__)
job()
schedule.every(seconds).seconds.do(job).tag(job.__name__)
def post_location_job() -> None:
"""Post unsent location list and then post current location"""
if not Server.token:
Server.login()
try:
location = GPSD.get_location()
except Exception:
logger.exception("Cannot acquire location")
return
if Server.token:
Server.send_unsent_locations()
Server.post_location(location)
else:
Server.append_failed_location(location)
def panic_job() -> None:
"""Check for panic mode and reschedule post_location_job if necesary"""
new_panic = Server.is_panic_mode()
if Server.panic_mode and not new_panic:
logger.info("Disabling panic mode")
schedule_job(post_location_job, c.TIME_NO_PANIC)
elif not Server.panic_mode and new_panic:
logger.info("Enabling panic mode")
schedule_job(post_location_job, c.TIME_PANIC)
Server.panic_mode = new_panic
|
# -*- coding: utf-8 -*-
# Copyright (2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oneview_redfish_toolkit.api import errors
from oneview_redfish_toolkit.api.redfish_error import RedfishError
from oneview_redfish_toolkit.tests.base_test import BaseTest
class TestRedfishError(BaseTest):
"""Tests for RedfishError class"""
def test_class_instantiation(self):
"""Tests class instantiation"""
try:
redfish_error = RedfishError("GeneralError", "General Error")
except Exception as e:
self.fail("Failed to instantiate RedfishError. Error: ".format(e))
self.assertIsInstance(redfish_error, RedfishError)
def test_serialize(self):
"""Tests the serialize function result against known result"""
redfish_error = RedfishError("GeneralError", "General Error")
result = json.loads(redfish_error.serialize())
with open(
'oneview_redfish_toolkit/mockups/errors/'
'RedfishErrorNoExtendedInfo.json'
) as f:
redfish_error_mockup = json.load(f)
self.assertEqual(redfish_error_mockup, result)
def test_add_extended_info_invalid_error_code(self):
"""Tests the add_extended_info invalid error code"""
redfish_error = RedfishError("GeneralError", "General Error")
try:
redfish_error.add_extended_info(
"InvalidCode",
"General Message")
except errors.OneViewRedfishResourceNotFoundError as e:
self.assertEqual(
e.msg,
"message_id InvalidCode not found")
def test_add_extended_info_invalid_message_args(self):
"""Tests the add_extended_info invalid message_args"""
redfish_error = RedfishError("GeneralError", "General Error")
try:
redfish_error.add_extended_info(
message_id="PropertyValueNotInList",
message_args=["Only 1, need 2"])
except errors.OneViewRedfishError as e:
self.assertEqual(
e.msg,
'Message has 2 replacements to be made but 1 args where sent')
def test_redfish_error_with_extended_info(self):
"""Tests the add_extended_info with two additional info"""
with open(
'oneview_redfish_toolkit/mockups/errors/'
'RedfishErrorExtendedInfo.json'
) as f:
redfish_error_mockup = json.load(f)
try:
redfish_error = RedfishError(
"GeneralError",
"A general error has occurred. See ExtendedInfo "
"for more information.")
redfish_error.add_extended_info(
message_id="PropertyValueNotInList",
message_args=["RED", "IndicatorLED"],
related_properties=["#/IndicatorLED"])
redfish_error.add_extended_info(
message_id="PropertyNotWritable",
message_args=["SKU"],
related_properties=["#/SKU"])
except errors.OneViewRedfishError as e:
self.fail("Failled to add Extened info".format(e))
result = json.loads(redfish_error.serialize())
self.assertEqual(redfish_error_mockup, result)
|
# proxy module
from __future__ import absolute_import
from mayavi.sources.image_reader import *
|
from .StructureConsensuLossFunction import StructureConsensuLossFunction
|
#Import("env", "projenv")
import os
import shutil
import glob
import pathlib
def readall(path):
f = open(path, 'r')
data = f.read()
f.close()
return data
def create_name_from_filename(filename):
ret = filename
ret = ret.replace(".", "_")
ret = ret.replace(" ", "_")
return ret
EMBED_DIR=os.path.join(os.getcwd(), "embed") # embedしたいファイルがおいてあるディレクトリ
OUT_DIR=os.path.join(os.getcwd(), "include", "embed") # 変換した .h ファイルの出力先
TEMPLATE_FILE=os.path.join(os.getcwd(), "build_script", "template.cpp")
SEP_LENGTH = 90;
print("=" * SEP_LENGTH)
print("create_embed.py executing.")
print(f"EMBED_DIR = {EMBED_DIR}")
print(f"OUT_DIR = {OUT_DIR}")
print("=" * SEP_LENGTH)
# access to global build environment
#print(env.Dump())
# access to project build environment (is used source files in "src" folder)
#print(projenv.Dump())
#
# Script to build cpp file from embed directory
#
# see: https://thingpulse.com/embed-binary-data-on-esp32/
# see: https://docs.platformio.org/en/latest/projectconf/advanced_scripting.html#before-pre-and-after-post-actions
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.mkdir(OUT_DIR)
template = readall(TEMPLATE_FILE)
for file in pathlib.Path(EMBED_DIR).iterdir():
output = template
data = readall(file)
output_name = create_name_from_filename(file.name)
# 変数名部分を置換。 変数名は全部大文字とする
output = output.replace("$$REPLACE_NAME$$", output_name.upper()).replace("$$REPLACE_CONTENT$$", data)
output_cpp_path = os.path.join(OUT_DIR, output_name + ".h")
f = open(output_cpp_path, 'w')
data = f.write(output)
f.close()
print(f"generated {output_cpp_path}")
print("=" * SEP_LENGTH)
print("DONE")
print("=" * SEP_LENGTH)
|
#!/usr/bin/python3
import argparse
import glob
import os
# This should be in the remote server in /root/messages_to_download.py
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def unread_messages_files():
messages_files = []
for home_directory in glob.glob("/home/*"):
new_message_directory = os.path.join(home_directory, "Maildir/new")
new_mail_files = glob.glob(os.path.join(new_message_directory, "*"))
for new_mail_file in new_mail_files:
messages_files.append(new_mail_file)
return messages_files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
for message in unread_messages_files():
print(message)
|
"""
This scripts allows to play with the 4 parameters of the backlash model.
Based on matplotlib's interactive slider demo
.. seealso::
https://matplotlib.org/3.1.1/gallery/widgets/slider_demo.html
"""
from typing import Union
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Button, Slider
from blm import BacklashModel
def lin_dec_sin(t: np.ndarray, t_end: float, amp: float, freq: float):
"""Linearly decaying sinus wave."""
return amp * (1 - t / t_end) * np.cos(2 * np.pi * freq * t)
def update(val):
"""Core callback function."""
reset_lbm_from_slider_values(x_init=lin_dec_sin(t_grid[0], t_end, amp, freq))
# Generate backlash data
x_hist = []
for t in t_grid:
x = lin_dec_sin(t, t_end, amp, freq)
x_hist.append(G_bl_model(x).copy())
x_hist = np.expand_dims(np.stack(x_hist), axis=1)
# Update the curve the data
G_lines.set_ydata(x_hist)
# Update the zone data
u_grid = np.linspace(-5, 5, 2001)
u_grid_lo = G_slider_m_lo.val * (u_grid + G_slider_c_lo.val)
u_grid_up = G_slider_m_up.val * (u_grid - G_slider_c_up.val)
G_bound_lo.set_ydata(u_grid_lo)
G_bound_up.set_ydata(u_grid_up)
axs[1].collections.clear()
axs[1].fill_between(u_grid, u_grid_lo, u_grid_up, color="gray", alpha=0.3, label="backlash zone")
fig.canvas.draw_idle()
def reset(event):
"""Reset the sliders."""
G_slider_m_lo.reset()
G_slider_m_up.reset()
G_slider_c_lo.reset()
G_slider_c_up.reset()
def reset_lbm_from_slider_values(x_init: Union[float, int, list, np.ndarray]):
"""Use the global slider values to create a new linear backlash model."""
# Catch infeasible values
if G_slider_m_lo.val == 0:
G_slider_m_lo.val = 1e-4
if G_slider_m_up.val == 0:
G_slider_m_up.val = 1e-4
# Set values
G_bl_model.reset(G_slider_m_lo.val, G_slider_m_up.val, G_slider_c_lo.val, G_slider_c_up.val, x_init=x_init)
if __name__ == "__main__":
# Setup
dt = 0.002
t_end = 5.0
t_grid = np.linspace(0.0, t_end, int(t_end / dt))
amp = 4.0
freq = 1.0
x = lin_dec_sin(t_grid, t_end, amp, freq)
# Create figure
mpl.style.use("seaborn")
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 9))
plt.subplots_adjust(left=0.05, bottom=0.3)
# Create sliders
lim_m = 5.0
lim_c = 10.0
delta_m = 0.02
delta_c = 0.01
ax_m_lo = plt.axes([0.05, 0.10, 0.7, 0.03])
ax_m_up = plt.axes([0.05, 0.05, 0.7, 0.03])
ax_c_lo = plt.axes([0.05, 0.20, 0.7, 0.03])
ax_c_up = plt.axes([0.05, 0.15, 0.7, 0.03])
G_slider_m_lo = Slider(ax_m_lo, "m_lo", -lim_m, lim_m, valinit=1.0, valstep=delta_m)
G_slider_m_up = Slider(ax_m_up, "m_up", -lim_m, lim_m, valinit=1.0, valstep=delta_m)
G_slider_c_lo = Slider(ax_c_lo, "c_lo", 0.0, lim_c, valinit=0.0, valstep=delta_c)
G_slider_c_up = Slider(ax_c_up, "c_up", 0.0, lim_c, valinit=0.0, valstep=delta_c)
G_slider_m_lo.on_changed(update)
G_slider_m_up.on_changed(update)
G_slider_c_lo.on_changed(update)
G_slider_c_up.on_changed(update)
# Create reset button
resetax = plt.axes([0.8, 0.115, 0.1, 0.05])
button_reset = Button(resetax, "reset", hovercolor="0.975")
button_reset.on_clicked(reset)
# Initial plotting of the curve
axs[0].plot(t_grid, x, lw=2, label="original", color="C0")
(G_lines,) = axs[0].plot(t_grid, x, lw=2, label="backlash", color="C2")
# Plot the backlash decision boundaries
u_grid = np.linspace(-5, 5, 2001)
u_grid_lo = G_slider_m_lo.val * (u_grid + G_slider_c_lo.val)
u_grid_up = G_slider_m_up.val * (u_grid - G_slider_c_up.val)
(G_bound_lo,) = axs[1].plot(u_grid, u_grid_lo, label="bound lo", color="C1")
(G_bound_up,) = axs[1].plot(u_grid, u_grid_up, label="bound up", color="C3")
axs[1].fill_between(u_grid, u_grid_lo, u_grid_up, color="gray", alpha=0.3, label="backlash zone")
# Annotate
axs[0].set_xlabel("time [s]")
axs[0].set_ylabel("output (with and without backlash)")
axs[0].set_ylim([-6, 6])
axs[0].legend(
bbox_to_anchor=(0.2, 1.02, 0.6, 0.102),
loc="lower left",
ncol=2,
mode="expand",
borderaxespad=0.0,
frameon=False,
)
axs[1].axis("equal")
axs[1].legend(
bbox_to_anchor=(0.1, 1.02, 0.8, 0.102),
loc="lower left",
ncol=3,
mode="expand",
borderaxespad=0.0,
frameon=False,
)
# Backlash model
G_bl_model = BacklashModel(G_slider_m_lo.val, G_slider_m_up.val, G_slider_c_lo.val, G_slider_c_up.val)
plt.show()
|
from typing import Tuple
__all__: Tuple[str, ...] = (
"AlexFlipnoteException",
"BadRequest",
"NotFound",
"InternalServerError",
"Forbidden",
"HTTPException",
)
class AlexFlipnoteException(Exception):
pass
class BadRequest(AlexFlipnoteException):
pass
class NotFound(AlexFlipnoteException):
pass
class InternalServerError(AlexFlipnoteException):
pass
class Forbidden(AlexFlipnoteException):
pass
class HTTPException(AlexFlipnoteException):
def __init__(self, response, message):
self.response = response
self.status = response.status
self.message = message
|
import lgsvl
import time
from environs import Env
from ..scenario import Scenario
from .common import spawn_state, place_car_on_the_point
env = Env()
LGSVL__SIMULATOR_HOST = env.str("LGSVL__SIMULATOR_HOST", "127.0.0.1")
LGSVL__SIMULATOR_PORT = env.int("LGSVL__SIMULATOR_PORT", 8181)
LGSVL__AUTOPILOT_0_HOST = env.str("LGSVL__AUTOPILOT_0_HOST", "127.0.0.1")
LGSVL__AUTOPILOT_0_PORT = env.int("LGSVL__AUTOPILOT_0_PORT", 9090)
TIME_LIMIT = 60 # seconds
# Wait until an ego vehicle approaches this controllable object within 50 meters
# Change current state to green and wait for 60s, red & yellow - 0s
# Loop over this control policy from the beginning
TRAFFIC_LIGHT_POLICY = "trigger=50;green=60;yellow=0;red=0;loop"
class StraightModel:
@staticmethod
def run(scenario: Scenario, time_limit: int = TIME_LIMIT):
print("Map {} Non-NPC: {} - ".format(scenario.map, scenario.ID), end="")
sim = lgsvl.Simulator(LGSVL__SIMULATOR_HOST, LGSVL__SIMULATOR_PORT)
sim.load(scenario.map)
# Get a list of controllable objects
controllables = sim.get_controllables("signal")
for c in controllables:
signal = sim.get_controllable(c.transform.position, "signal")
signal.control(TRAFFIC_LIGHT_POLICY)
START_POINT = lgsvl.geometry.Vector(scenario.start[0], 0, scenario.start[1])
ego_state = spawn_state(sim)
ego_state = place_car_on_the_point(sim=sim, point=START_POINT, state=ego_state)
ego = sim.add_agent("2e9095fa-c9b9-4f3f-8d7d-65fa2bb03921", lgsvl.AgentType.EGO, ego_state)
ego.connect_bridge(LGSVL__AUTOPILOT_0_HOST, LGSVL__AUTOPILOT_0_PORT)
dv = lgsvl.dreamview.Connection(sim, ego, LGSVL__AUTOPILOT_0_HOST)
dv.set_hd_map(scenario.dvmap)
dv.set_vehicle('Lincoln2017MKZ LGSVL')
modules = [
'Localization',
'Transform',
'Routing',
'Prediction',
'Planning',
'Control'
]
dv.setup_apollo(scenario.end[0], scenario.end[1], modules)
destination = lgsvl.geometry.Vector(scenario.end[0], 0, scenario.end[1])
# Run a simulation
is_test_failed = False
try:
t0 = time.time()
while True:
sim.run(0.5)
currentPos = ego.state.position
# print(lgsvl.evaluator.separation(currentPos, destination))
if lgsvl.evaluator.separation(currentPos, destination) < 10:
raise lgsvl.evaluator.TestException(
"PASSED: EGO does reach to destination, distance {} < 10!".format(lgsvl.evaluator.separation(currentPos, destination))
)
else:
if time.time() - t0 > time_limit:
is_test_failed = True
raise lgsvl.evaluator.TestException(
"FAILED: Timeout! EGO does not reach to destination, distance {} > 10!".format(lgsvl.evaluator.separation(currentPos, destination))
)
except lgsvl.evaluator.TestException as e:
print("{}".format(e))
# Close simulator
dv.disable_apollo()
sim.close()
# Send a message
if is_test_failed:
raise Exception("TESTING FAILED!") |
'''
This sets the callback for reloading a user from the session. The function
you set should take a user ID (a unicode) and return a user object, or
None if the user does not exist.
'''
from google.appengine.ext import ndb
def user_callback(user_id):
# Because we are always provided an unicode here, we use the urlsafe()
# method to identify the users, instead of the usual ID.
user_key = ndb.Key(urlsafe=user_id)
user_model = user_key.get()
return user_model
|
import json
import os
from os import path as osp
from random import choice
import re
import shutil
from subprocess import PIPE, Popen, check_call, check_output
import time
import threading
import urllib2
import sys
from behave import given, when, then # pylint: disable=E0611
from pyvs import helpers
from pyvs.helpers import LONG_FFPROBESIZE
from pyvs.vsperf import ASSETS
from pyvs.vsfiles import remove_if_exist, extend_path
if ASSETS is None:
raise Exception("env variable VIDEOSTITCH_ASSETS is not set")
ALGO_FILE = osp.join(ASSETS, "algo.json")
EXPOSURE_OUTPUT = osp.join(ASSETS, "videoformat01", "res.ptv")
RTMP_INPUT_PTV = osp.join(ASSETS, "rtmp", "rtmp_input.ptv")
INTERNAL_RTMP_SERVER = "10.0.0.175"
if sys.platform == "win32":
# name conflict with system tool,
# see e.g. https://savage.net.au/ImageMagick/html/install-convert.html
IMAGEMAGICK_CONVERT = "im-convert"
else:
IMAGEMAGICK_CONVERT = "convert"
# Utils {{{
def normalize(path):
return osp.join(ASSETS, path)
def remove_output_file(path):
try:
os.remove(path)
except OSError:
raise Exception("the output file {} does not exist".format(path))
def check_json(path):
with open(path, 'r') as f:
data = f.read()
try:
json.loads(data)
except ValueError:
return False
return True
def json_file_to_dict(path, ndigits=None):
def fcn(x):
return round(float(x), int(ndigits))
with open(path, 'r') as f:
data = f.read()
if ndigits:
return json.loads(data, parse_float=fcn)
else:
return json.loads(data)
def integrity_check(path):
try:
proc = Popen(["avprobe", path],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception("avprobe is not in your PATH")
return proc.communicate(), proc.returncode
def alignment_check(path):
try:
proc = Popen(["ffprobe", "-of", "json", "-show_streams",
"-count_frames", path],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception("ffprobe is not in your PATH")
return proc.communicate(), proc.returncode
def atomic_check(path):
try:
proc = Popen(["AtomicParsley", path, "-T"],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception("AtomicParsley is not in your PATH")
return proc.communicate(), proc.returncode
#}}}
# Given {{{
@given('I use {file_name:S} for synchronization')
@given('I use {file_name:S} for exposure')
@given('I use {file_name:S} for photometric calibration')
@given('I use {file_name:S} for calibration presets maker')
@given('I use {file_name:S} for calibration presets application')
@given('I use {file_name:S} for calibration deshuffling')
@given('I use {file_name:S} for epipolar')
@given('I use {file_name:S} for calibration')
@given('I use {file_name:S} for scoring')
@given('I use {file_name:S} for mask')
@given('I use {file_name:S} for autocrop')
def given_exposure(ctx, file_name):
shutil.copy(osp.join(ctx.utils, file_name), ALGO_FILE)
@when('I start the RTMP flow')
@given('There is an RTMP flow')
def given_rtmp_started(ctx):
rtmp_tpl = osp.join(ctx.utils, "assets_ptv", "rtmp", "rtmp.tpl")
# generate a PTV with the correct address
with open(rtmp_tpl, "r") as f:
text = f.read()
text = text.replace("##ADDRESS##", "rtmp://{}:1935/live/{}".format(
INTERNAL_RTMP_SERVER, ctx.mac))
with open(RTMP_INPUT_PTV, "w") as f:
f.write(text)
args = [
"-i", RTMP_INPUT_PTV,
"-p", ctx.vahplugins,
"-f", "0",
"-l", "999",
]
cmd = osp.join(ctx.studio, "videostitch-cmd")
# Fork a videostitch-cmd in the background
proc = Popen([cmd] + args)
ctx.rtmp_flow = threading.Thread(target=proc.communicate)
ctx.rtmp_flow.start()
time.sleep(5)
@given('I generated {ptv:S} with {tpl:S}')
def given_generate_ptv(ctx, ptv, tpl):
tpl = osp.join(ctx.utils, "assets_ptv", tpl)
ptv = osp.join(ctx.data, ptv)
with open(tpl, "r") as f:
text = f.read()
text = text.replace("##ADDRESS##", "rtmp://{}:1935/live/{}".format(
INTERNAL_RTMP_SERVER, ctx.mac))
with open(ptv, "w") as f:
f.write(text)
# }}}
# When {{{
def when_launch_control_cmd(ctx, control, tool, ptv, args, generated=False, from_repo=False):
if from_repo:
destination = osp.join(ctx.utils, "assets_ptv", ptv)
else:
destination = osp.join(ASSETS, ptv)
if tool in ["videostitch-cmd", "undistort", "depth"]:
try:
shutil.copy(osp.join(ctx.utils, "assets_ptv", ptv),
destination)
except IOError:
if not generated:
raise
cmd = [
osp.join(ctx.studio, tool),
"-i", destination,
"-p", ctx.plugins,
"-p", ctx.vahplugins,
] + args.split()
if control is not None:
cmd = control.split() + cmd
if ctx.plugins_gpu is not None:
cmd += ["-p", ctx.plugins_gpu]
try:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise Exception("{} not installed in {}".format(tool, ctx.studio))
ctx.cmd = " ".join(cmd)
ctx.cmd_output = proc.communicate()
print(cmd)
print(ctx.cmd_output)
ctx.res = proc.returncode
os.remove(destination)
@when('I launch {tool:S} with {ptv:S} and "{args}"')
def when_launch_cmd(ctx, tool, ptv, args, generated=False, from_repo=False):
when_launch_control_cmd(ctx, None, tool, ptv, args, generated, from_repo)
@when('I launch {tool:S} with {ptv:S} and "{args}" in the background')
def when_launch_cmd_background(ctx, tool, ptv, args, from_repo=True):
ctx.cmd_background = threading.Thread(
target=when_launch_cmd,
args=(ctx, tool, ptv, args, False, from_repo)
)
ctx.cmd_background.start()
def when_launch_control_cmd_background(ctx, control, tool, ptv, args, from_repo=True):
ctx.cmd_background = threading.Thread(target=when_launch_control_cmd, args=(ctx, control, tool, ptv, args, False, from_repo))
ctx.cmd_background.start()
@when('I launch videostitch-cmd with generated {ptv:S} and "{args}"')
def when_launch_cmd_generated(ctx, ptv, args):
when_launch_cmd(ctx, "videostitch-cmd", ptv, args, generated=True)
@when('I launch videostitch-cmd with {ptv:S} from repo and "{args}"')
def when_launch_cmd_repo(ctx, ptv, args):
when_launch_cmd(ctx, "videostitch-cmd", ptv, args, from_repo=True)
@when('I launch {tool:S} for calibration with {ptv:S} and "{args}"')
@when('I launch {tool:S} for synchronization with {ptv:S}')
@when('I launch {tool:S} for exposure with {ptv:S}')
@when('I launch {tool:S} for synchronization with {ptv:S} and "{args}"')
@when('I launch {tool:S} for exposure with {ptv:S} and "{args}"')
@when('I launch {tool:S} for photometric calibration with {ptv:S} and "{args}"')
@when('I launch {tool:S} for calibration presets maker with {ptv:S} and "{args}"')
@when('I launch {tool:S} for calibration presets application with {ptv:S} and "{args}"')
@when('I launch {tool:S} for calibration deshuffling with {ptv:S} and "{args}"')
@when('I launch {tool:S} for epipolar with {ptv:S} and "{args}"')
@when('I launch {tool:S} for scoring with {ptv:S} and "{args}"')
@when('I launch {tool:S} for mask with {ptv:S} and "{args}"')
@when('I launch {tool:S} for autocrop with {ptv:S} and "{args}"')
def when_launch_algo(ctx, tool, ptv, args=""):
args += "--apply_algos {} ".format(ALGO_FILE)
when_launch_cmd(ctx, tool, ptv, args)
os.remove(ALGO_FILE)
@when('I launch {tool:S} without arguments')
def when_launch_empty_cmd(ctx, tool):
try:
proc = Popen(
[osp.join(osp.join(ctx.studio, tool))],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception("{} not installed in {}".format(tool, ctx.studio))
ctx.output = proc.communicate()
ctx.res = proc.returncode
@when('I launch videostitch-cmd with "{args}"')
def when_launch_cmd_without_ptv(ctx, args):
cmd = [
osp.join(osp.join(ctx.studio, "videostitch-cmd")),
"-i", " ",
"-p", ctx.plugins,
] + args.split()
ctx.start_time = time.time()
if ctx.plugins_gpu is not None:
cmd += ["-p", ctx.plugins_gpu]
try:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise Exception("videostitch-cmd not installed in {}".format(
ctx.studio))
ctx.cmd = " ".join(cmd)
ctx.cmd_output = proc.communicate()
print(cmd)
print(ctx.cmd_output)
ctx.res = proc.returncode
def gen_ptv_with_param(template, name, value):
reg = '(\\[{}\\])'.format(name)
p = re.compile(reg)
ptv_gen = p.sub(value, template)
return ptv_gen
@when('I test and check videostitch-cmd with "{audio_codec}" and "{sampling_rate}" and "{sample_format}" and "{channel_layout}" and "{audio_bitrate}"')
def when_launch_and_check_audio_conf(
ctx, audio_codec, sampling_rate, sample_format, channel_layout,
audio_bitrate):
template_ptv = osp.join(ctx.utils, "assets_ptv", "videoformat01",
"template_audio_output.ptv")
template = open(template_ptv,"r").read()
codecs = audio_codec.split(" ")
rates = sampling_rate.split(" ")
formats = sample_format.split(" ")
layouts = channel_layout.split(" ")
bitrates = audio_bitrate.split(" ")
print_str = "Test codec {} and rate {} and format {} and layout {} and bitrate {}"
output = osp.join("videoformat01","output.mp4")
for codec in codecs:
for rate in rates:
for sp_format in formats:
for layout in layouts:
for bitrate in bitrates:
ptv_gen = gen_ptv_with_param(template, "audio_codec",
'"{}"'.format(codec))
ptv_gen = gen_ptv_with_param(ptv_gen, "sampling_rate",
str(rate))
ptv_gen = gen_ptv_with_param(ptv_gen, "sample_format",
'"{}"'.format(sp_format))
ptv_gen = gen_ptv_with_param(ptv_gen, "audio_bitrate",
str(bitrate))
ptv_gen = gen_ptv_with_param(
ptv_gen, "channel_layout", '"{}"'.format(layout))
ptv_file = osp.join(
ctx.utils, "assets_ptv", "videoformat01",
"test_audio_output.ptv")
with open(ptv_file, "w") as f:
f.write(ptv_gen)
ptv_relative_path = osp.join("videoformat01",
"test_audio_output.ptv")
when_launch_cmd(ctx, "videostitch-cmd",
ptv_relative_path, "-f 0 -l 200")
print(print_str.format(codec, rate, sp_format, layout,
bitrate))
then_the_field_equal(ctx, output, "sampling_rate",
rate)
then_the_field_equal(ctx, output, "channel_layout",
layout)
then_the_field_equal(ctx, output, "sample_format",
sp_format)
@when('I compare {output:S} with {ref:S}')
def when_compare_picture(ctx, output, ref):
ref = normalize(ref)
output = normalize(output)
try:
proc = Popen(["compare",
"-metric",
"MAE",
ref,
output,
osp.join(ctx.data, "null"),
],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception("compare is not in your PATH")
ctx.output = proc.communicate()[1]
ctx.res = proc.returncode
ctx.pictures.append([output, ref])
@when('I replace transparency with a red background in {output:S}')
def when_compare_picture(ctx, output):
output = normalize(output)
try:
proc = Popen([IMAGEMAGICK_CONVERT,
"-background",
"red",
"-alpha",
"remove",
output,
output
],
stdout=PIPE,
stderr=PIPE,
)
except OSError:
raise Exception(IMAGEMAGICK_CONVERT + " is not in your PATH. Expecting convert.exe to be renamed to im-convert.exe on Windows.")
ctx.output = proc.communicate()[1]
ctx.res = proc.returncode
@when('I analyze score of {output:S}')
def when_analyze_score(ctx, output):
output = osp.join(ASSETS, output)
with open(output, 'r') as f:
data = f.read()
try:
data = json.loads(data)
except ValueError:
assert 0, "the ouput ptv is invalid"
if "score" not in data[0]:
assert 0, "no score in ptv"
ctx.output= data[0]["score"]
@when('I analyze uncovered_ratio of {output:S}')
def when_analyze_uncovered_ratio(ctx, output):
output = osp.join(ASSETS, output)
with open(output, 'r') as f:
data = f.read()
try:
data = json.loads(data)
except ValueError:
assert 0, "the ouput ptv is invalid"
if "uncovered_ratio" not in data[0]:
assert 0, "no score in ptv"
ctx.output= data[0]["uncovered_ratio"]
@when('I check {output:S} integrity with avprobe')
def when_check_avprobe(ctx, output):
"""This removes the output file after"""
output = normalize(output)
ctx.output, ctx.res = integrity_check(output)
remove_output_file(output)
@when('I check files {wildcard:S} integrity with avprobe')
def when_check_multiple_avprobe(ctx, wildcard):
"""This removes the output file after"""
wildcard = normalize(wildcard)
ctx.output = []
ctx.res = []
for path in extend_path(wildcard):
r1, r2 = integrity_check(path)
ctx.output.append(r1)
ctx.res.append(r2)
remove_output_file(path)
@when('I check files {output:S} {fformat:S} streams alignment with avprobe')
def when_check_alignment_avprobe(ctx, output, fformat):
wildcard = normalize("{}-*.{}".format(output,fformat))
for i in xrange(len(extend_path(wildcard))-2):
path = normalize("{}-{}.{}".format(output,i+1,fformat))
assert osp.isfile(path), "the file {} does not exist".format(path)
r1, _ = alignment_check(path)
joutput = json.loads(r1[0])
start0 = float(joutput['streams'][0]['start_time'])
start1 = float(joutput['streams'][1]['start_time'])
duration0 = float(joutput['streams'][0]['duration'])
duration1 = float(joutput['streams'][1]['duration'])
print_str = "the file {} streams start_time are not aligned: {} <> {}"
assert abs(start0 - start1) < 0.03, print_str.format(path, start0,
start1)
print_str = "the file {} streams duration are not aligned: {} <> {}"
assert abs(duration0 - duration1 + start0 - start1) < 0.05,\
print_str.format(path, duration0 + start0, duration1 + start1)
print_str = "the file {} did not decode the expected number of frames for stream {} : {} <> {}"
for k in xrange(len(joutput['streams'])):
stream =joutput['streams'][k]
nb_frames = stream['nb_frames']
nb_read_frames = stream['nb_read_frames']
assert nb_frames == nb_read_frames, print_str.format(
path, k, nb_frames, nb_read_frames)
@when('I rename {inputf} to {outputf}')
def when_rename_file(ctx, inputf, outputf):
remove_if_exist(normalize(outputf))
os.rename(normalize(inputf), normalize(outputf))
@when('I launch autocrop-cmd with input {input_picture:S} and output {output:S}')
def when_launch_autocrop_cmd(ctx, input_picture, output):
cmd = [
osp.join(osp.join(ctx.studio, "autocrop-cmd")),
"-i", osp.join(ASSETS, input_picture),
"-o", osp.join(ASSETS, output),
"-d"
]
try:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise Exception("autocrop-cmd not installed in {}".format(ctx.studio))
ctx.cmd = " ".join(cmd)
ctx.cmd_output = proc.communicate()
ctx.res = proc.returncode
@when('I wait the RTMP flow to stop')
def when_stop_rtmp_flow(ctx):
ctx.rtmp_flow.join()
ctx.rtmp_flow = None
@when('I compare video {generated:S} to {ref:S}')
def when_compare_video(ctx, generated, ref):
generated = osp.join(ASSETS, generated)
ref = osp.join(ASSETS, ref)
ctx.log = "psnr.log"
cmd = [
"ffmpeg",
"-i", generated,
"-i", ref,
"-lavfi",
"psnr='stats_file={}'".format(ctx.log),
"-f", "null", "-",
]
check_call(cmd, stdout=PIPE, stderr=PIPE)
# }}}
# Then {{{
@then('I expect the command to {res:S}')
def cmd_should_have_res(ctx, res):
if res == "fail":
assert ctx.res, "the command should have failed:\n{}\n{}".format(
ctx.cmd_output, ctx.cmd)
elif res == "succeed":
print_str = "the command should have succeeded (returned {}):\n{}\n{}"
assert not ctx.res, print_str.format(ctx.res, ctx.cmd_output, ctx.cmd)
else:
raise Exception("Wrong usage of step")
@then('I expect the command to {res:S} and {stdx:S} contains "{keyword}"')
def the_cmd_should_have_res_keyword(ctx, res, stdx, keyword):
if stdx == "stdout":
stdx = 0
elif stdx == "stderr":
stdx = 1
else:
raise Exception("Wrong usage of step")
if res == "fail":
assert ctx.res, "the command should have failed:\n{}\n{}".format(
ctx.cmd_output, ctx.cmd)
elif res == "succeed":
assert not ctx.res,\
"the command should have succeeded:\n{}\n{}".format(
ctx.cmd_output, ctx.cmd)
else:
raise Exception("Wrong usage of step")
assert keyword in ctx.cmd_output[stdx],\
"the captured stdout does not contain keyword: {}".format(keyword)
@then('I expect the command to fail with code {code:d}')
def cmd_should_have_code(ctx, code):
assert ctx.res == code, "not the expected exit code expected " +\
"{}, received {}".format(code, ctx.res)
@then('I expect the comparison to succeed')
@then('I expect the comparison error to be less than {error:f}')
def then_comparison_ok(ctx, error=0.05):
res = re.sub(r'.*\((.*)\)', r'\1', ctx.output)
try:
float(res)
except ValueError:
assert 0, "the comparison failed: {}\n{}\n{}".format(
res, ctx.cmd, ctx.cmd_output)
assert float(res) < error, "error is too big: {}".format(res)
@then('mse_avg is under {error:f}')
def then_video_compare(ctx, error):
with open(ctx.log, "r") as f:
lines = f.readlines()
for line in lines:
line_ok = False
for word in line.split():
if "mse_avg" in word:
line_ok = True
rerror = word.split(":")[-1]
assert float(rerror) <= error, "error is too big :{}".format(
line)
break
assert line_ok, "parsing error : {}".format(line)
remove_if_exist(ctx.log)
@then('I expect the score to be more than {error:f}')
def then_score_ok(ctx, error):
res = ctx.output
assert float(res) >= error, "error is too big: {}".format(res)
@then('I expect the full coverage to be {target:S}')
def check_full_coverage(ctx, target):
res = ctx.output
if target == "true":
assert float(res) == float(0.0),\
"uncovered ratio {} is not 0, full coverage is false".format(res)
elif target == "false":
assert float(res) != float(0.0),\
"uncovered ratio {} is 0, full coverage is true".format(res)
else:
raise Exception("Wrong usage of step")
@then('{wildcard:S} is a single file')
def when_check_single_file(ctx, wildcard):
"""This DOES NOT remove the output file after"""
wildcard = normalize(wildcard)
assert len(extend_path(wildcard)) == 1,\
"number of video files is invalid, just one is expected : {}".format(
wildcard)
@then('I check {output:S} faststart with AtomicParsley')
def when_check_fast_start_atomic(ctx, output):
output = normalize(output)
ctx.output, ctx.res = atomic_check(output)
res = str(ctx.output).split("Atom ")
moov = -1
mdat = -1
i = 0
for s in res:
if s.startswith("moov"):
moov = i
if s.startswith("mdat"):
mdat = i
i = i + 1
assert moov > -1, "no moov Atom in output\n{}".format(ctx.output)
print_str = "moov Atom {} after mdat Atom {} in output\n{}"
assert moov < mdat, print_str.format(moov, mdat, ctx.output)
@then('I check {output:S} Atom {atom:S} with AtomicParsley')
def when_check_atomic(ctx, output, atom):
output = normalize(output)
ctx.output, ctx.res = atomic_check(output)
res = str(ctx.output).split("Atom ")
i = 0
for s in res:
if s.startswith(atom):
break
i = i + 1
assert i != res.__len__(), "no {} atom in output \n{}".format(
atom, ctx.output)
@then('I check {output:S} no Atom {atom:S} with AtomicParsley')
def when_check_no_atomic(ctx, output, atom):
output = normalize(output)
ctx.output, ctx.res = atomic_check(output)
res = str(ctx.output).split("Atom ")
i = 0
for s in res:
if s.startswith(atom):
break
i = i + 1
assert i == res.__len__(), "{} atom in output \n{}".format(
atom, ctx.output)
@then('The video is OK')
def then_video_ok(ctx):
assert not ctx.res, "video is invalid : {}".format(ctx.output)
@then('The videos are OK')
def then_videos_ok(ctx):
for i in xrange(len(ctx.res)):
assert not ctx.res[i], "video is invalid : {}".format(ctx.output[i])
@then('The exposure output ptv is valid')
def then_exposure_output_valid(ctx):
assert check_json(EXPOSURE_OUTPUT), "the output ptv is invalid"
@then('The JSON output {output_file:S} is valid')
@then('The photometric output {output_file:S} is valid')
def then_json_output_valid(ctx, output_file):
assert_str = "the output ptv is invalid"
assert check_json(osp.join(ASSETS, output_file)), assert_str
@then(r'The exposure RGB score in {output:S} is less than {diffr:d}, {diffg:d}, {diffb:d}')
def then_exposure_output_score(ctx, output, diffr, diffg, diffb):
output = osp.join(ASSETS, output)
with open(output, 'r') as f:
data = f.read()
remove_output_file(output)
try:
data = json.loads(data)[0]
except ValueError:
raise Exception("the ouput ptv is invalid: {}".format(data))
assert_str = "ptv doesn't contain valid exposure score: {}".format(data)
assert_bool = "diff_red" in data and "diff_green" in data and\
"diff_blue" in data
assert assert_bool, assert_str
assert_str = "Expected red exposure score < {}, got: {}".format(
diffr, data["diff_red"])
assert data["diff_red"] < int(diffr), assert_str
assert_str = "Expected green exposure score < {}, got: {}".format(
diffg, data["diff_green"])
assert data["diff_green"] < int(diffg), assert_str
assert_str = "Expected blue exposure score < {}, got: {}".format(
diffb, data["diff_blue"])
assert data["diff_blue"] < int(diffb), assert_str
@then('The synchronization output "{file_name:S}" is valid')
def then_synchro_output_valid(ctx, file_name):
assert check_json(normalize(file_name)), "the output ptv is invalid"
@then('The synchronization output "{file_name:S}" is consistent with "{ref:S}"')
@then('The synchronization output "{file_name:S}" is consistent with "{ref:S}" within {nb_frame:d} frames')
@then('The synchronization output "{file_name:S}" is consistent with "{ref:S}" within {nb_frame:d} frame')
def then_synchro_value_correct(ctx, file_name, ref, nb_frame=10):
output = json_file_to_dict(normalize(file_name))
ref = json_file_to_dict(osp.join(ctx.utils, ref))
for ref_input in ref["inputs"]:
in_offset = ref_input["frame_offset"]
for out_input in output["inputs"]:
out_offset = out_input["frame_offset"]
if ref_input["reader_config"] == out_input["reader_config"]:
assert abs(in_offset - out_offset) <= nb_frame, "wrong output"
break
@then('The calibration cost of output "{file_name:S}" is consistent with "{ref:S}"')
def the_calibration_cost_correct(ctx, file_name, ref):
output = json_file_to_dict(normalize(file_name))
ref = json_file_to_dict(osp.join(ctx.utils, ref))
calibration_cost = output.get("calibration_cost")
if not calibration_cost:
calibration_cost = output["pano"]["calibration_cost"]
calibration_cost_ref = ref.get("calibration_cost")
if not calibration_cost_ref:
calibration_cost_ref = ref["pano"]["calibration_cost"]
assert abs(calibration_cost - calibration_cost_ref) <= 150,\
"wrong output %f <> %f" % (calibration_cost, calibration_cost_ref)
@then('The translations of output "{file_name:S}" are consistent with "{ref:S}" for the first input')
def the_calibration_translations_correct(ctx, file_name, ref):
output = json_file_to_dict(normalize(file_name))
ref = json_file_to_dict(osp.join(ctx.utils, ref))
in_x = ref["inputs"][0]["geometries"]["translation_x"]
in_y = ref["inputs"][0]["geometries"]["translation_y"]
in_z = ref["inputs"][0]["geometries"]["translation_z"]
out_x = output["inputs"][0]["geometries"]["translation_x"]
out_y = output["inputs"][0]["geometries"]["translation_y"]
out_z = output["inputs"][0]["geometries"]["translation_z"]
assert_str = "wrong output {} <> {}"
assert abs(in_x - out_x) <= 0.001, assert_str.format(in_x, out_x)
assert abs(in_y - out_y) <= 0.001, assert_str.format(in_y, out_y)
assert abs(in_z - out_z) <= 0.001, assert_str.format(in_z, out_z)
@then('The file size of {output:S} is below {filesize:d} bytes')
def then_check_filesize(ctx, output, filesize):
for eachfile in extend_path(normalize(output)):
assert os.path.getsize(eachfile) < filesize,\
"{} size {} is above {} limit".format(
eachfile, os.path.getsize(eachfile), filesize)
@then(r'I expect {file_name:S} is the same as {ref:S} with {ndigits:S} digits after the decimal point for float')
def then_check_json_files_equal(ctx, file_name, ref, ndigits):
output_json = json_file_to_dict(normalize(file_name), ndigits)
ref_json = json_file_to_dict(osp.join(ctx.utils, "assets_ptv", ref),
ndigits)
assert output_json == ref_json, "{}\n\n\n{}".format(output_json, ref_json)
remove_output_file(normalize(file_name))
@then(r'I expect the geometries of {file_name:S} are the same as {ref:S}')
def then_check_json_geometries_equal(ctx, file_name, ref):
output_json = json_file_to_dict(normalize(file_name))
ref_json = json_file_to_dict(osp.join(ctx.utils, "assets_ptv", ref))
ref_pano = ref_json["pano"]["inputs"]
out_pano = output_json["pano"]["inputs"]
for ref_input, out_input in zip(ref_pano, out_pano):
ref_geo = ref_input["geometries"]
out_geo = out_input["geometries"]
assert ref_geo == out_geo, "{}\n\n{}".format(ref_geo, out_geo)
remove_output_file(normalize(file_name))
@then(r'I expect the input readers and stack orders of {file_name:S} are the same as {ref:S}')
def then_check_json_input_readers_equal(ctx, file_name, ref):
output_json = json_file_to_dict(normalize(file_name))
ref_json = json_file_to_dict(osp.join(ctx.utils, "assets_ptv", ref))
ref_pano = ref_json["pano"]["inputs"]
out_pano = output_json["pano"]["inputs"]
for ref_input, out_input in zip(ref_pano, out_pano):
ref_reader = ref_input["reader_config"]
out_reader = out_input["reader_config"]
ref_stack = ref_input["stack_order"]
out_stack = out_input["stack_order"]
assert ref_reader == out_reader, "{}\n\n{}".format(
ref_reader, out_reader)
assert ref_stack == out_stack, "{}\n\n{}".format(out_stack, ref_stack)
remove_output_file(normalize(file_name))
@then('I check the audio bitrate of {filename:S} to be equal to {bitrate:d}')
def then_the_audio_bit_rate_equal(ctx, filename, bitrate):
filename = osp.join(ASSETS, filename)
ffprobe_output=helpers.get_ffprobe_audio_outputs("100000", filename)
assert ffprobe_output.has_key("streams")
result = ffprobe_output["streams"][0]["bit_rate"]
result = float(result)/1000.
print("audio bitrate measured = {} kb/s different from the expected {} kb/s".format(
result, bitrate))
tolerance=bitrate*0.05
assert ((bitrate-tolerance)<=result) & (result <= (bitrate+tolerance))
@then('I check the video effective_bitrate of the recorded video file for {duration:d} seconds is {bitrate:d} with precision of {precision:g}')
def then_the_videofile_bit_rate_equal(ctx, duration, bitrate, precision):
effective_bitrate = helpers.get_effective_bitrate(
LONG_FFPROBESIZE,
ctx.strem_file_path,
duration)
res = (1.0 - float(precision)) * int(bitrate) < int(effective_bitrate)
assert res, "expected value {}, but got {}".format(bitrate,
effective_bitrate)
res = (1.0 + float(precision)) * int(bitrate) > int(effective_bitrate)
assert res, "expected value {}, but got {}".format(bitrate,
effective_bitrate)
@then('I check the video effective_bitrate of the recorded video file for {duration:d} seconds is {order:S} than {bitrate:d}')
def then_the_videofile_bit_rate_equal(ctx, duration, order, bitrate):
effective_bitrate = helpers.get_effective_bitrate(
LONG_FFPROBESIZE,
ctx.strem_file_path,
duration)
if (str(order) == "higher"):
assert int(bitrate) < int(effective_bitrate), "expected more than {}, but got {}".format(bitrate, effective_bitrate)
elif(str(order) == "lower"):
assert int(bitrate) > int(effective_bitrate), "expected less than {}, but got {}".format(bitrate, effective_bitrate)
elif(str(order) == "around"):
res = 0.95 * int(bitrate) < int(effective_bitrate)
assert res, "expected value {}, but got {}".format(bitrate,
effective_bitrate)
res = 1.05 * int(bitrate) > int(effective_bitrate)
assert res, "expected value {}, but got {}".format(bitrate,
effective_bitrate)
else:
raise Exception("wrong comparator used {}. use lower/higher/around".format(order))
@then('I check the {field:S} of {filename:S} to be equal to {value:S}')
def then_the_field_equal(ctx, filename, field, value):
filename = osp.join(ASSETS, filename)
ffprobe_output=helpers.get_ffprobe_audio_outputs("100000", filename)
assert ffprobe_output.has_key("streams")
key = field
if field == "sampling_rate":
key = "sample_rate"
elif field == "sample_format":
key = "sample_fmt"
result = ffprobe_output["streams"][0][key]
print("{} resulted {} different from the expected {}".format(
field, result, value))
assert (result == value)
@then('I check the video {field:S} of {filename:S} to be equal to {value:S}')
def then_the_video_field_equal(ctx, filename, field, value):
filename = osp.join(ASSETS, filename)
ffprobe_output=helpers.get_ffprobe_video_outputs("100000", filename)
assert ffprobe_output.has_key("streams")
key = field
result = ffprobe_output["streams"][0][key]
print("{} resulted {} different from the expected {}".format(
field, result, value))
assert (result == value)
@then('The background process was successful')
def then_background_process_sucess(ctx):
ctx.cmd_background.join()
assert ctx.res == 0, "background process failed (code {})".format(ctx.res)
@then('I record the audio output during {duration:d} seconds in {wavfile:S}')
def then_record_audio_of_rtmp(ctx, duration, wavfile):
output_filename = osp.join(ASSETS, wavfile)
stream = ctx.rtmp_stream
helpers.record_audio_sample_from_broadcasting(
stream, output_filename, duration)
ctx.cmd_background.join()
assert ctx.res == 0, "background process failed (code {})".format(ctx.res)
@then(u'I expect the output audio channel map of {filename:S} to be "{channel_map}"')
def then_check_channel_map(ctx, filename, channel_map):
channel_map = channel_map.split(" ")
c_map = []
for m in channel_map:
c_map.append(int(m))
wavfile = osp.join(ASSETS, filename)
f0 = 44100. / 2. / 512.
resulted_channel_map = helpers.get_audio_channel_map(wavfile, f0)
assert(len(resulted_channel_map) == len(c_map))
print('expected channel map {}'.format(c_map))
print('resulted channel map {}'.format(resulted_channel_map))
assert(resulted_channel_map == c_map)
@then(u'I wait for {duration:d} seconds')
def then_wait(ctx, duration):
time.sleep(duration)
@given(u'the RTMP stream is started with bandwidth limited to {bw:d} with {ptv:S} and "{args}"')
def given_limited_rtmp_started_with(ctx, bw, ptv, args):
random_seed = "".join(choice('0123456789') for i in range(6))
gen_file = osp.join(osp.join(ptv.split('/')[0], 'gen-rtmp-{}.ptv'.format(
random_seed)))
template_ptv = osp.join(ctx.utils, 'assets_ptv', ptv)
ctx.stream_name = 'sinegen{}'.format(random_seed)
ctx.rtmp_stream = 'rtmp://{}/audio_map_test/{}'.format(
INTERNAL_RTMP_SERVER, ctx.stream_name)
with open(osp.join(ctx.utils, 'assets_ptv', gen_file), 'w') as gen_ptv:
with open(template_ptv,"r") as template_file:
template = template_file.read()
template = template.replace('##ADDRESS##', ctx.rtmp_stream)
gen_ptv.write(template)
if bw is None:
control = None
else:
control = 'trickle -s -u {}'.format(bw)
when_launch_control_cmd_background(ctx, control, 'videostitch-cmd', gen_file, args,
from_repo=False)
@given(u'the RTMP stream is started with {ptv:S} and "{args}"')
def given_rtmp_started_with(ctx, ptv, args):
given_limited_rtmp_started_with(ctx, None, ptv, args)
@then('I copy the file from the wowza server')
def then_rtmp_copy_stream(ctx):
dwn_link = 'http://{}:1900/{}.mp4'.format(INTERNAL_RTMP_SERVER,
ctx.stream_name)
dwn_link = dwn_link.replace(" ", "")
ctx.strem_file_path = osp.join(ASSETS, 'audio_channel_map/stream.mp4')
ctx.wav_file_path = osp.join(ASSETS, 'audio_channel_map/output.wav')
rsp = urllib2.urlopen(dwn_link)
with open(ctx.strem_file_path,'wb') as f:
f.write(rsp.read())
@then('I strip the audio from the recorded video file')
def then_strip_wav_from_file(ctx):
helpers.get_wave_from_video_file(ctx.strem_file_path, ctx.wav_file_path)
os.remove(ctx.strem_file_path)
@then('I expect program compilation to take less than {timeout:d} seconds')
def then_check_opencl_cache(ctx, timeout):
execution_time = time.time() - ctx.start_time
assert_str = "execution took too long: {}s".format(execution_time)
assert execution_time < float(timeout), assert_str
@then('I expect the number of frames of the recorded video file to be {order:S} than {nb:d}')
def then_nb_order_frame(ctx, order, nb):
args = ["ffprobe", "-select_streams", "v", "-show_streams", ctx.strem_file_path]
output = check_output(args)
res = re.search(r'.*nb_frames=(\d+).*', output)
if res:
res = res.group(1)
else:
raise Exception("something went wrong with ffmpeg {}".format(output))
if (str(order) == "higher"):
assert int(nb) < int(res), "expected more than {}, but got {}".format(nb, res)
elif(str(order) == "lower"):
assert int(nb) > int(res), "expected less than {}, but got {}".format(nb, res)
else:
raise Exception("wrong comparator used {}. use lower or higher".format(order))
@then('I expect the number of frames of {file_name:S} to be {nb:d}')
def then_nb_frame(ctx, file_name, nb):
file_name = osp.join(ASSETS, file_name)
args = ["ffprobe", "-select_streams", "v", "-show_streams", file_name]
output = check_output(args)
res = re.search(r'.*nb_frames=(\d+).*', output)
if res:
res = res.group(1)
else:
raise Exception("something went wrong with ffmpeg {}".format(output))
assert int(res) == int(nb), "wrong number of frames {} != {}".format(
res, nb)
# }}}
|
"""
* MIT License
*
* Copyright (c) 2019 Arpit Aggarwal Shantam Bajpai
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without
* limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to
* whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
"""
# header files
from utils import *
import sys
# set data path
args = sys.argv
path_video = ""
output_path_video = ""
if(len(args) > 1):
path_video = args[1]
# define constants
dimension = 200
world_points = np.array([[0, 0], [dimension - 1, 0], [dimension - 1, dimension - 1], [0, dimension - 1]], dtype="float32")
cap = cv2.VideoCapture(path_video)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("output_problem1.avi", fourcc, 20.0, (960, 540))
tracker = None
# read video
count = 0
prev_corners = []
while(cap.isOpened()):
ret, frame = cap.read()
if(ret):
# get current video frame
frame = cv2.resize(frame, (0, 0), fx = 0.5, fy = 0.5)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_frame, 191, 255, 0)
# get corners
corners = get_artag_corners(frame, thresh, 1)
for corner in corners:
# get relation between world points and corner
homography_matrix = get_homography_matrix(corner, world_points)
warp_artag = warp_perspective(frame, homography_matrix, (dimension, dimension))
gray_warp_artag = cv2.cvtColor(warp_artag, cv2.COLOR_BGR2GRAY)
binary_code, orientation, new_world_points = get_artag_id(gray_warp_artag, dimension)
if(count%20 == 0):
print("Orientation is: " + orientation)
print("ID is: " + str(binary_code))
print()
out.write(frame)
count = count + 1
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
import socket
import threading
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers import basehttp
from django.test.testcases import TransactionTestCase
from django.core.management import call_command
class StoppableWSGIServer(basehttp.WSGIServer):
"""WSGIServer with short timeout, so that server thread can stop this server."""
def server_bind(self):
"""Sets timeout to 1 second."""
basehttp.WSGIServer.server_bind(self)
self.socket.settimeout(1)
def get_request(self):
"""Checks for timeout when getting request."""
try:
sock, address = self.socket.accept()
sock.settimeout(None)
return (sock, address)
except socket.timeout:
raise
class TestServerThread(threading.Thread):
"""Thread for running a http server while tests are running."""
def __init__(self, address, port):
self.address = address
self.port = port
self._stopevent = threading.Event()
self.started = threading.Event()
self.error = None
super(TestServerThread, self).__init__()
def run(self):
"""Sets up test server and database and loops over handling http requests."""
try:
handler = WSGIHandler()
server_address = (self.address, self.port)
httpd = StoppableWSGIServer(server_address, basehttp.WSGIRequestHandler)
httpd.set_app(handler)
self.started.set()
except basehttp.WSGIServerException, e:
self.error = e
self.started.set()
return
# Must do database stuff in this new thread if database in memory.
from django.conf import settings
if settings.DATABASE_ENGINE == 'sqlite3' \
and (not settings.TEST_DATABASE_NAME or settings.TEST_DATABASE_NAME == ':memory:'):
# Import the fixture data into the test database.
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0})
# Loop until we get a stop event.
while not self._stopevent.isSet():
httpd.handle_request()
def join(self, timeout=None):
"""Stop the thread and wait for it to finish."""
self._stopevent.set()
threading.Thread.join(self, timeout)
class TestServerTestCase(TransactionTestCase):
def start_test_server(self, address='localhost', port=8000):
"""Creates a live test server object (instance of WSGIServer)."""
self.server_thread = TestServerThread(address, port)
self.server_thread.start()
self.server_thread.started.wait()
if self.server_thread.error:
raise self.server_thread.error
def stop_test_server(self):
if self.server_thread:
self.server_thread.join()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for partition functions from equivalence.py"""
import unittest
from typing import Any, Iterable
from beanmachine.ppl.utils.equivalence import partition_by_kernel, partition_by_relation
def _brace(s: str) -> str:
return "{" + s + "}"
def _comma(s: Iterable[str]) -> str:
return ",".join(s)
def _set_str(items: Iterable[Any]) -> str:
return _brace(_comma(sorted({str(item) for item in items})))
def _set_set_str(results: Iterable[Any]) -> str:
return _set_str([_set_str(eqv) for eqv in results])
class PartitionTest(unittest.TestCase):
def test_partition_(self) -> None:
"""Tests for partition_kernel from equivalence.py"""
def three_kernel(x: int) -> int:
return (x % 3 + 3) % 3
def three_relation(x: int, y: int) -> bool:
return (x - y) % 3 == 0
expected = """{{-1,-4,-7,2,5,8},{-2,-5,-8,1,4,7},{-3,-6,-9,0,3,6,9}}"""
s = set(range(-9, 10))
observed1 = _set_set_str(partition_by_relation(s, three_relation))
observed2 = _set_set_str(partition_by_kernel(s, three_kernel))
self.assertEqual(observed1, expected)
self.assertEqual(observed2, expected)
|
from torch import optim
from torch.nn import MSELoss
import torch.nn.functional as F
AGENT_CONFIG = {
# 'lr' indicates the learning rate of the "mainbody".
# Value-based RL: Q net; Policy-based RL: Policy net; Actor-critic RL: Actor
'lr':0.01,
'mom':None,
'reward_decay':0.9,
'memory_size': 1e6,
# 'hidden_layers' defines the layers of the "mainbody".
# Value-based RL: Q net; Policy-based RL: Policy net; Actor-critic RL: Actor
'hidden_layers':[64, 64],
'act_func': F.tanh,
'out_act_func': None,
'using_bn': False,
}
DQN_CONFIG = {
'replace_target_iter':600,
'e_greedy':0.9,
'e_greedy_increment':None,
'optimizer': optim.RMSprop,
'loss' : MSELoss,
'batch_size': 32,
}
DDPG_CONFIG = {
'steps_per_iter': 50,
'learn_start_step': 10000,
'batch_size': 128,
'reward_decay': 0.99,
'tau' : 0.005,
'noise_var' : 0.3,
'noise_min' : 0.01,
'noise_decrease' : 2e-5,
'optimizer': optim.Adam,
'v_optimizer': optim.Adam,
'lr': 1e-4,
'lr_v' : 1e-3,
'hidden_layers': [400, 300],
'hidden_layers_v' : [400, 300],
'loss_func_v': MSELoss,
'act_func': F.relu,
'out_act_func': F.tanh,
'action_bounds':1,
'max_grad_norm': None,
}
TD3_CONFIG = {
'actor_delayed_steps': 2,
'smooth_epsilon': 0.5,
'smooth_noise': 0.2,
}
NAF_CONFIG = {
'steps_per_iter': 50,
'learn_start_step': 10000,
'tau' : 0.005,
'lr' : 1e-3,
'noise_var' : 0.3,
'noise_min' : 0.01,
'noise_decrease' : 2e-5,
'optimizer': optim.Adam,
'loss': MSELoss,
'batch_size': 128,
'hidden_layers': [400, 300],
'action_bounds':1,
'max_grad_norm': 1.,
'act_func': F.tanh,
'using_bn': True,
}
PG_CONFIG = {
'max_grad_norm': 2,
'steps_per_iter': 2048,
'action_bounds': 1,
'optimizer':optim.Adam,
'GAE_lambda' : 0.95, # HIGH-DIMENSIONAL CONTINUOUS CONTROL USING GENERALIZED ADVANTAGE ESTIMATION. 2016 ICLR
'entropy_weight':0.0,
'init_noise': 1.,
'value_type': 'FC',
'hidden_layers_v' : [64,64],
'loss_func_v':MSELoss,
'v_optimizer': optim.Adam,
'mom_v' : None,
'lr_v' : 0.01,
'iters_v': 3,
'using_KL_estimation' : False,
'policy_type': 'FC',
}
PG_CONFIG['memory_size'] = PG_CONFIG['steps_per_iter']
NPG_CONFIG = {
'cg_iters': 10,
'cg_residual_tol' : 1e-10,
'cg_damping': 1e-3,
'max_kl_divergence':0.01,
}
PPO_CONFIG = {
'nbatch_per_iter': 32,
'updates_per_iter': 10,
'clip_epsilon': 0.2,
'lr': 3e-4,
'v_coef': 0.5,
}
PPO_CONFIG['lr_v'] = PPO_CONFIG['lr']
AdaptiveKLPPO_CONFIG = {
'init_beta':3.,
'nbatch_per_iter': 32,
'updates_per_iter': 10,
'lr': 3e-4,
'v_coef': 0.5,
}
AdaptiveKLPPO_CONFIG['lr_v'] = AdaptiveKLPPO_CONFIG['lr']
TRPO_CONFIG = {
'max_search_num' : 10,
'accept_ratio' : .1,
'step_frac': .5
}
HPG_CONFIG = {
'sampled_goal_num': 10,
'goal_space': None,
'per_decision': True,
'weighted_is': True,
'using_hgf_goals' : True,
'using_original_data': False,
'using_her_reward': False,
}
HTRPO_CONFIG = {
'KL_esti_method_for_TRPO' : 'kl2',
} |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets.login_helpers import login_utils
from telemetry.core import util
from telemetry.page import action_runner as action_runner_module
def GetGaiaContext(tab):
"""Returns Gaia's login page context."""
for context in tab.GetWebviewContexts():
if context.GetUrl().startswith('https://accounts.google.com/'):
return context
return None
def LoginChromeAccount(action_runner, credential,
credentials_path=login_utils.DEFAULT_CREDENTIAL_PATH):
"""Logs in a Gaia account into Chrome.
This function navigates the tab into Chrome's login page and logs in a user
using credentials in |credential| part of the |credentials_path| file.
Args:
action_runner: Action runner responsible for running actions on the page.
credential: The credential to retrieve from the credentials file
(type string).
credentials_path: The string that specifies the path to credential file.
Raises:
exceptions.Error: See GetWebviewContexts() and ExecuteJavaScript()
for a detailed list of possible exceptions.
"""
account_name, password = login_utils.GetAccountNameAndPassword(
credential, credentials_path=credentials_path)
action_runner.Navigate('chrome://chrome-signin')
# Get the Gaia webview context within the sign in extension to create a Gaia
# action_runner. The action runner will then execute JS in the Gaia context.
gaia_context = util.WaitFor(lambda: GetGaiaContext(action_runner.tab), 5)
if not gaia_context:
raise RuntimeError('Can not find GAIA webview context for sign in.')
gaia_action_runner = action_runner_module.ActionRunner(gaia_context)
new_flow = gaia_action_runner.EvaluateJavaScript(
'document.querySelector("#gaia_firsform") != null')
gaia_form_id = 'gaia_firstform' if new_flow else 'gaia_loginform'
login_utils.InputForm(gaia_action_runner, account_name, input_id='Email',
form_id=gaia_form_id)
if new_flow:
gaia_action_runner.ClickElement(selector='#%s #next' % gaia_form_id)
login_utils.InputForm(gaia_action_runner, password, input_id='Passwd',
form_id=gaia_form_id)
gaia_action_runner.ClickElement(selector='#signIn')
action_runner.WaitForNavigate()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from typing import Optional
import mmap
from kgx.cli.cli_utils import transform # type: ignore
from kg_idg.transform_utils.transform import Transform
"""
Ingest KGX-format human protein-protein interactions from
STRING. Filter by interaction confidence
(combined score >= CONFIDENCE_THRESHOLD) to reduce noise.
Transform with KGX for validation.
"""
CONFIDENCE_THRESHOLD = 700
STRING_SOURCES = {
'STRINGNodes': 'string_nodes.tsv',
'STRINGEdges': 'string_edges.tsv'
}
class STRINGTransform(Transform):
"""Ingests the STRING human subset transform from KG-COVID-19 and
runs a kgx transform for validation.
"""
def __init__(self, input_dir: str = None, output_dir: str = None) -> None:
source_name = "string"
super().__init__(source_name, input_dir, output_dir) # set some variables
def run(self, nodes_file: Optional[str] = None, edges_file: Optional[str] = None) -> None: # type: ignore
"""Obtain files and call the parse function.
"""
if nodes_file and edges_file:
for source in [nodes_file, edges_file]:
k = source.split('.')[0]
data_file = os.path.join(self.input_base_dir, source)
self.parse(k, data_file, k)
else:
for k in STRING_SOURCES.keys():
name = STRING_SOURCES[k]
data_file = os.path.join(self.input_base_dir, name)
self.parse(name, data_file, k)
def filter(self, name: str, data_file: str) -> None:
# Do quality screen here - combined score must be >=
# CONFIDENCE_THRESHOLD value
# TODO: make this faster with mmap or the like
new_edge_file_path = os.path.join(os.path.dirname(data_file),
'string_edges_filtered.tsv')
print(f"Applying confidence threshold of {CONFIDENCE_THRESHOLD} to STRING")
with open(new_edge_file_path, 'w') as new_edge_file, \
open(data_file, 'r') as raw_edge_file:
new_edge_file.write(raw_edge_file.readline()) # Header
for line in raw_edge_file:
scores = ((line.rstrip()).split("\t"))[6:]
score_sum = (sum([int(i) for i in scores if i.isdigit()]))
if score_sum >= CONFIDENCE_THRESHOLD:
new_edge_file.write(line)
os.rename(data_file, os.path.join(os.path.dirname(data_file),'string_edges_full.tsv'))
os.rename(new_edge_file_path, os.path.join(os.path.dirname(data_file),STRING_SOURCES['STRINGEdges']))
def parse(self, name: str, data_file: str, source: str) -> None:
print(f"Parsing {data_file}")
if name == STRING_SOURCES['STRINGEdges']:
print(f"Parsing edges in {name}")
self.filter(name, data_file)
transform(inputs=[data_file],
input_format='tsv',
output=os.path.join(self.output_dir, name),
output_format='tsv')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.