content stringlengths 5 1.05M |
|---|
from os import path
import random
import shutil
import tempfile
from unittest import TestCase
from pippi.soundbuffer import SoundBuffer
from pippi import dsp
class TestSoundBuffer(TestCase):
def setUp(self):
self.soundfiles = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.soundfiles)
def test_create_empty_buffer(self):
sound = SoundBuffer()
self.assertTrue(len(sound) == 0)
self.assertTrue(not sound)
sound = SoundBuffer(length=1)
self.assertEqual(len(sound), 44100)
self.assertTrue(sound)
def test_create_stereo_buffer_from_soundfile(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
self.assertEqual(len(sound), 44100)
self.assertTrue(sound.samplerate == 44100)
sound = dsp.read("tests/sounds/guitar1s.wav")
self.assertEqual(len(sound), 44100)
self.assertTrue(sound.samplerate == 44100)
def test_graph_soundfile(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
sound.graph("tests/renders/graph_soundbuffer.png", width=1280, height=800)
def test_create_mono_buffer_from_soundfile(self):
sound = SoundBuffer(filename="tests/sounds/linux.wav")
self.assertTrue(sound.samplerate == 44100)
self.assertTrue(sound.channels == 1)
self.assertEqual(len(sound), 228554)
sound = dsp.read("tests/sounds/linux.wav")
self.assertTrue(sound.samplerate == 44100)
self.assertTrue(sound.channels == 1)
self.assertEqual(len(sound), 228554)
def test_create_mono_buffer_from_wavetable(self):
wt = dsp.wt("sine", wtsize=4096)
self.assertTrue(len(wt) == 4096)
snd = dsp.buffer(wt)
self.assertTrue(len(snd) == 4096)
self.assertTrue(snd[100][0] != 0)
snd = SoundBuffer(wt)
self.assertTrue(len(snd) == 4096)
self.assertTrue(snd[100][0] != 0)
def test_stack_soundbuffer(self):
snd1 = SoundBuffer(filename="tests/sounds/guitar1s.wav")
snd2 = SoundBuffer(filename="tests/sounds/LittleTikes-A1.wav")
channels = snd1.channels + snd2.channels
length = max(len(snd1), len(snd2))
out = dsp.stack([snd1, snd2])
self.assertTrue(channels == out.channels)
self.assertTrue(length == len(out))
self.assertTrue(snd1.samplerate == out.samplerate)
out.write("tests/renders/soundbuffer_stack.wav")
def test_convolve_soundbuffer(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
impulse = SoundBuffer(filename="tests/sounds/LittleTikes-A1.wav")
out = sound.convolve(impulse)
out.write("tests/renders/soundbuffer_convolve_guitar_littletikes.wav")
impulse = dsp.win("sinc")
out = sound.convolve(impulse)
out.write("tests/renders/soundbuffer_convolve_guitar_sinc.wav")
def test_clip_soundbuffer(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
sound = sound.clip(-0.1, 0.1)
self.assertEqual(len(sound), 44100)
self.assertTrue(sound.samplerate == 44100)
self.assertTrue(sound.max() <= 0.1)
def test_save_buffer_to_soundfile(self):
filename = path.join(self.soundfiles, "test_save_buffer_to_soundfile.{}")
sound = SoundBuffer(length=1)
sound.write(filename.format("wav"))
self.assertTrue(path.isfile(filename.format("wav")))
sound.write(filename.format("flac"))
self.assertTrue(path.isfile(filename.format("flac")))
sound.write(filename.format("ogg"))
self.assertTrue(path.isfile(filename.format("ogg")))
def test_split_buffer(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
length = random.triangular(0.1, sound.dur)
framelength = int(length * sound.samplerate)
durations = []
for grain in sound.grains(length):
durations += [grain.dur]
for grain_length in durations[:-1]:
self.assertEqual(int(grain_length * sound.samplerate), framelength)
remainderframes = int((sound.dur - sum(durations[:-1])) * sound.samplerate)
self.assertEqual(int(durations[-1] * sound.samplerate), remainderframes)
self.assertEqual(sum(durations), sound.dur)
def test_random_split_buffer(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
durations = []
for grain in sound.grains(0.001, sound.dur):
durations += [grain.dur]
self.assertNotEqual(durations[-1], 0)
self.assertEqual(sum(durations), sound.dur)
def test_window(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
for window_type in ("sine", "saw", "tri", "hamm", "hann", "bart", "kaiser", "black"):
sound = sound.env(window_type)
self.assertEqual(sound[0], (0, 0))
def test_speed(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
speed = random.random()
out = sound.speed(speed)
out.write("tests/renders/soundbuffer_speed.wav")
self.assertEqual(len(out), int(len(sound) * (1 / speed)))
def test_vpeed(self):
sound = SoundBuffer(filename="tests/sounds/linux.wav")
speed = dsp.win("hann", 0.5, 2)
out = sound.vspeed(speed)
out.write("tests/renders/soundbuffer_vspeed_0.5_2.wav")
speed = dsp.win("hann", 0.15, 0.5)
out = sound.vspeed(speed)
out.write("tests/renders/soundbuffer_vspeed_0.15_0.5.wav")
speed = dsp.win("hann", 5, 50)
out = sound.vspeed(speed)
out.write("tests/renders/soundbuffer_vspeed_5_50.wav")
def test_transpose(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
speed = random.triangular(1, 10)
out = sound.transpose(speed)
self.assertEqual(len(out), len(sound))
speed = random.triangular(0, 1)
out = sound.transpose(speed)
self.assertEqual(len(out), len(sound))
speed = random.triangular(10, 100)
out = sound.transpose(speed)
self.assertEqual(len(out), len(sound))
def test_pan(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
for pan_method in ("linear", "constant", "gogins"):
pan_left = sound.pan(0, method=pan_method)
self.assertEqual(pan_left[random.randint(0, len(pan_left))][0], 0)
pan_right = sound.pan(1, method=pan_method)
self.assertEqual(pan_right[random.randint(0, len(pan_right))][1], 0)
def test_slice_frame(self):
""" A SoundBuffer should return a single frame
when sliced into one-dimensionally like:
frame = sound[frame_index]
A frame is a tuple of floats, one value
for each channel of sound.
"""
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
indices = (0, -1, len(sound) // 2, -(len(sound) // 2))
for frame_index in indices:
frame = sound[frame_index]
self.assertTrue(isinstance(frame, tuple))
self.assertEqual(len(frame), sound.channels)
self.assertTrue(isinstance(frame[0], float))
def test_slice_sample(self):
""" Slicing into the second dimension of a SoundBuffer
will return a single sample at the given channel index.
sample = sound[frame_index][channel_index]
Note: A sample is a float, usually between -1.0 and 1.0
but pippi will only clip overflow when you ask it to, or
when writing a SoundBuffer back to a file.
So, numbers can exceed that range during processing and
be normalized or clipped as desired later on.
"""
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
indices = (0, -1, len(sound) // 2, -(len(sound) // 2))
for frame_index in indices:
for channel_index in range(sound.channels):
sample = sound[frame_index][channel_index]
self.assertTrue(isinstance(sample, float))
def test_pad_sound_with_silence(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
original_length = len(sound)
silence_length = random.triangular(0.001, 1)
sound = sound.pad(silence_length)
self.assertEqual(len(sound), int((sound.samplerate * silence_length) + original_length))
self.assertEqual(sound[0], (0, 0))
original_length = len(sound)
silence_length = random.triangular(0.001, 1)
sound = sound.pad(end=silence_length)
self.assertEqual(len(sound), int((sound.samplerate * silence_length) + original_length))
self.assertEqual(sound[-1], (0, 0))
def test_taper(self):
sound = SoundBuffer(filename="tests/sounds/guitar1s.wav")
sound = sound.taper(0)
sound = sound.taper(0.001)
sound = sound.taper(0.01)
sound = sound.taper(0.1)
sound = sound.taper(1)
sound = sound.taper(10)
self.assertEqual(len(sound), 44100)
self.assertTrue(sound.samplerate == 44100) |
from os import path
from typing import Callable
from unittest.mock import patch
import pandas as pd
import pytest
import requests
from _pytest.monkeypatch import MonkeyPatch
import pybaseball
@pytest.mark.parametrize(
"cache_type", [(x) for x in ['CSV', 'PARQUET']]
)
@patch('pybaseball.cache.config.enabled', True)
def test_cache(monkeypatch: MonkeyPatch, cache_type: str, thrower: Callable) -> None:
with patch('pybaseball.cache.config.cache_type', cache_type):
# Delete any existing data just in case
pybaseball.cache.purge()
# Uncached read
result = pybaseball.batting_stats(2019) # type: ignore
# Make requests.get throw an error so we can be sure this is coming from the cache
monkeypatch.setattr(requests, 'get', thrower)
# Cached read
result2 = pybaseball.batting_stats(2019) # type: ignore
pd.testing.assert_frame_equal(result, result2)
# Cleanup
pybaseball.cache.purge()
|
from setuptools import setup
setup(
name='Axelrod',
version='0.0.25',
author='Vince Knight, Owen Campbell, Karol Langner, Marc Harper',
author_email=('axelrod-python@googlegroups.com'),
packages=['axelrod', 'axelrod.strategies', 'axelrod.tests'],
scripts=['run_axelrod'],
url='http://axelrod.readthedocs.org/',
license='The MIT License (MIT)',
description='Reproduce the Axelrod iterated prisoners dilemma tournament',
)
|
#****************************************************************************************
# Designed by - Guilherme Maurer
# Miguel Xavier
# Plinio Silveira
# Yago Liborio
# Pontifical Catholic University of Rio Grande do Sul
#
# GenerateMAC - A script that calls functions to generate MAC for Model and Input
#
#****************************************************************************************
from Factory import Factory
import sys
if __name__ == "__main__":
cypher_hash = sys.argv[1]
path = sys.argv[2]
f = Factory()
puma_cypher_hash = f.auth(cypher_hash)
puma_cypher_hash.generateMACModel(path)
puma_cypher_hash.generateMACInput(path) |
import argparse
import errno
import os
import ncbitax
def mkdir_p(dirpath):
''' Verify that the directory given exists, and if not, create it.
'''
try:
os.makedirs(dirpath)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dirpath):
pass
else:
raise
def file_lines(filename):
if filename is not None:
with open(filename) as f:
for line in f:
yield line
def add_parser_subset_taxonomy(subparsers):
parser = subparsers.add_parser('subset', help='Subset NCBI taxonomy db files by taxid.')
parser.add_argument(
"tax_db", metavar='tax-db',
help="Taxonomy database directory (containing nodes.dmp, parents.dmp etc.)",
)
parser.add_argument(
"output_db", metavar='output-db',
help="Output taxonomy database directory",
)
parser.add_argument(
"--whitelist-taxids",
help="List of taxids to add to taxonomy (with parents)",
nargs='+', type=int
)
parser.add_argument(
"--whitelist-taxid-file",
help="File containing taxids - one per line - to add to taxonomy with parents.",
)
parser.add_argument(
"--whitelist-tree-taxids",
help="List of taxids to add to taxonomy (with parents and children)",
nargs='+', type=int
)
parser.add_argument(
"--whitelist-tree-taxid-file",
help="File containing taxids - one per line - to add to taxonomy with parents and children.",
)
parser.add_argument(
"--whitelist-gi-file",
help="File containing GIs - one per line - to add to taxonomy with nodes.",
)
parser.add_argument(
"--whitelist-accession-file",
help="File containing accessions - one per line - to add to taxonomy with nodes.",
)
parser.add_argument(
"--skip-gi", action='store_true',
help="Skip GI to taxid mapping files"
)
parser.add_argument(
"--skip-accession", action='store_true',
help="Skip accession to taxid mapping files"
)
parser.add_argument(
"--no-strip-version", action='store_false',
help="Don't skip numerical version suffix on accessions."
)
parser.add_argument(
"--skip-dead-accession", action='store_true',
help="Skip dead accession to taxid mapping files"
)
parser.set_defaults(func=main_subset_taxonomy)
return parser
def main_subset_taxonomy(args):
args.strip_version = not args.no_strip_version
subset_taxonomy(**vars(args))
def subset_taxonomy(tax_db=None, output_db=None, whitelist_taxids=None, whitelist_taxid_file=None,
whitelist_tree_taxids=None, whitelist_tree_taxid_file=None,
whitelist_gi_file=None, whitelist_accession_file=None,
skip_gi=None, skip_accession=None, skip_dead_accession=None,
strip_version=None, **kwargs):
'''
Generate a subset of the taxonomy db files filtered by the whitelist. The
whitelist taxids indicate specific taxids plus their parents to add to
taxonomy while whitelist_tree_taxids indicate specific taxids plus both
parents and all children taxa. Whitelist GI and accessions can only be
provided in file form and the resulting gi/accession2taxid files will be
filtered to only include those in the whitelist files. Finally, taxids +
parents for the gis/accessions will also be included.
'''
strip_version = strip_version if strip_version is not None else True
mkdir_p(os.path.join(output_db, 'accession2taxid'))
db = ncbitax.TaxonomyDb(tax_dir=tax_db, load_nodes=True)
taxids = set()
if whitelist_taxids is not None:
taxids.update(set(whitelist_taxids))
taxids.update((int(x) for x in file_lines(whitelist_taxid_file)))
tree_taxids = set()
if whitelist_tree_taxids is not None:
tree_taxids.update(set(whitelist_tree_taxids))
taxids.update((int(x) for x in file_lines(whitelist_tree_taxid_file)))
keep_taxids = set(ncbitax.collect_parents(db.parents, taxids))
if tree_taxids:
db.children = parents_to_children(db.parents)
children_taxids = collect_children(db.children, tree_taxids)
keep_taxids.update(children_taxids)
# Taxids kept based on GI or Accession. Get parents afterwards to not pull in all GIs/accessions.
keep_seq_taxids = set()
def filter_file(path, sep='\t', taxid_column=0, gi_column=None, a2t=False, header=False):
input_path = os.path.join(db.tax_dir, path)
output_path = os.path.join(output_db, path)
input_path = ncbitax.maybe_compressed(input_path)
with ncbitax.compressed_open(input_path, 'rt') as f, \
ncbitax.compressed_open(output_path, 'wt') as out_f:
if header:
out_f.write(next(f))
for line in f:
parts = line.split(sep)
taxid = int(parts[taxid_column])
if gi_column is not None:
gi = int(parts[gi_column])
if gi in gis:
keep_seq_taxids.add(taxid)
out_f.write(line)
continue
if a2t:
accession = parts[accession_column_i]
if strip_version:
accession = accession.split('.', 1)[0]
if accession in accessions:
keep_seq_taxids.add(taxid)
out_f.write(line)
continue
if taxid in keep_taxids:
out_f.write(line)
if not skip_gi:
gis = set(int(x) for x in file_lines(whitelist_gi_file))
filter_file('gi_taxid_nucl.dmp', taxid_column=1, gi_column=0)
filter_file('gi_taxid_prot.dmp', taxid_column=1, gi_column=0)
if not skip_accession:
if strip_version:
accessions = set(x.strip().split('.', 1)[0] for x in file_lines(whitelist_accession_file))
accession_column_i = 0
else:
accessions = set(file_lines(whitelist_accession_file))
accession_column_i = 1
acc_dir = os.path.join(db.tax_dir, 'accession2taxid')
acc_paths = []
for fn in os.listdir(acc_dir):
if fn.endswith('.accession2taxid') or fn.endswith('.accession2taxid.gz'):
if skip_dead_accession and fn.startswith('dead_'):
continue
acc_paths.append(os.path.join(acc_dir, fn))
for acc_path in acc_paths:
filter_file(os.path.relpath(acc_path, db.tax_dir), taxid_column=2, header=True, a2t=True)
# Add in taxids found from processing GI/accession
keep_seq_taxids = ncbitax.collect_parents(db.parents, keep_seq_taxids)
keep_taxids.update(keep_seq_taxids)
filter_file('nodes.dmp', sep='|')
filter_file('names.dmp', sep='|')
filter_file('merged.dmp')
filter_file('delnodes.dmp')
|
import torch
from transformers import *
# Transformers has a unified API
# for 8 transformer architectures and 30 pretrained weights.
# Model | Tokenizer | Pretrained weights shortcut
MODELS = [(BertModel, BertTokenizer, 'bert-base-uncased'),
(OpenAIGPTModel, OpenAIGPTTokenizer, 'openai-gpt'),
(GPT2Model, GPT2Tokenizer, 'gpt2'),
(CTRLModel, CTRLTokenizer, 'ctrl'),
(TransfoXLModel, TransfoXLTokenizer, 'transfo-xl-wt103'),
(XLNetModel, XLNetTokenizer, 'xlnet-base-cased'),
(XLMModel, XLMTokenizer, 'xlm-mlm-enfr-1024'),
(DistilBertModel, DistilBertTokenizer, 'distilbert-base-uncased'),
(RobertaModel, RobertaTokenizer, 'roberta-base')]
MODELS = [(OpenAIGPTModel, OpenAIGPTTokenizer, 'openai-gpt'),]
# To use TensorFlow 2.0 versions of the models, simply prefix the class names with 'TF', e.g. `TFRobertaModel` is the TF 2.0 counterpart of the PyTorch model `RobertaModel`
# Let's encode some text in a sequence of hidden-states using each model:
for model_class, tokenizer_class, pretrained_weights in MODELS:
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
#OpenAIGPTTokenier is a BytePair encoding
model = model_class.from_pretrained(pretrained_weights)
# Encode text
input_ids = torch.tensor([tokenizer.encode("Here is some text to encode", add_special_tokens=True)]) # Add special tokens takes care of adding [CLS], [SEP], <s>... tokens in the right way for each model.
#tokenizer.encode returns a python list, therefore, you need to transform it to a torch tensor using torch.tensor
with torch.no_grad():
last_hidden_states = model(input_ids)[0] # Models outputs are now tuples
#in above, last_hidden_states is the output of the last self-attention block which will be a tensor(input_lenght, embedding_size)
#in particular, model(input_ids) here will return a tuple with size of 1 that is the tenosr corresponding to the output of the last
#self-attention block. However, you can ask the model to ouput not only the output of the last self-attention block but the outputs of all the
#slef-attention blocks. You can ask the model to do such by passing output_hidden_states = True while instantiating the model as follows:
#model = OpenAIGPTModel.from_pretrained("openai-gpt", ouput_hidden_states = True). Doing so, if you execute result = model(input_ids),
#then result[0] will be the torch tensor(input_lenght, 768) ouput of the last self-attention block and result[1] will be a tuple of lenght 13,
#where each element of this tuple is a torch tensor(input_lenght, 768). In particular, the first tensor is corresponding to the embedding layer
#and the rest 12 tensors are corresponding to the 12 self-attention blocks. Also, you have the option to ask the model to ouput its attentions.
#You can ask the model to do a such as follows: model = OpenAIGPTModel.from_pretrain("openai-gpt", output_hidden_states = True,
#ouput_attentions = True). Then, the result will be a tuple with 3 elemnts where the first 2 are same as above and the third element itself
#will be a python tuple with 12 torch tensors correspondings to the 12 self-attention blocks. Each of this tensor will be a
#tensor(1, 12, input_length, input_lenght) where 12 here refers to 12 head of the self-attention blocks and tensor(0, i, :, :) refers to
#the inner-product similarity based attention matrix corresponding to the ith head of this block.
print(last_hidden_states.size())
# Each architecture is provided with several class for fine-tuning on down-stream tasks, e.g.
BERT_MODEL_CLASSES = [BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering]
# All the classes for an architecture can be initiated from pretrained weights for this architecture
# Note that additional weights added for fine-tuning are only initialized
# and need to be trained on the down-stream task
pretrained_weights = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
for model_class in BERT_MODEL_CLASSES:
# Load pretrained model/tokenizer
model = model_class.from_pretrained(pretrained_weights)
# Models can return full list of hidden-states & attentions weights at each layer
model = model_class.from_pretrained(pretrained_weights,
output_hidden_states=True,
output_attentions=True)
input_ids = torch.tensor([tokenizer.encode("Let's see all hidden-states and attentions on this text")])
all_hidden_states, all_attentions = model(input_ids)[-2:]
# Models are compatible with Torchscript
model = model_class.from_pretrained(pretrained_weights, torchscript=True)
traced_model = torch.jit.trace(model, (input_ids,))
# Simple serialization for models and tokenizers
model.save_pretrained('./directory/to/save/') # save
model = model_class.from_pretrained('./directory/to/save/') # re-load
tokenizer.save_pretrained('./directory/to/save/') # save
tokenizer = BertTokenizer.from_pretrained('./directory/to/save/') # re-load
# SOTA examples for GLUE, SQUAD, text generation...
|
import numpy as np
import matplotlib.pyplot as plt
def load_data():
m = 400
N = int(m/2)
D = 2
X = np.zeros((m,D))
y = np.zeros((m,1), dtype='uint8')
a = 4
# Using mathematical functions to generate petals like structure
for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2
r = a*np.sin(4*t) + np.random.randn(N)*0.2
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
X = X.T
y = y.T
return X, y
def plot_planar_data(X, y):
plt.style.use('fivethirtyeight')
plt.scatter(X[0, :], X[1, :], c=y, s=50, cmap=plt.cm.Spectral)
plt.show() |
from warpkern import WarpkernPhy
import wiringpi
from typing import List
import random
import math
from time import time
from threading import Thread
import numpy as np
from cffi import FFI
def writeData(data):
wiringpi.wiringPiSPIDataRW(0, data) # write the last chunk
def floatToByte(val: float) -> int:
return max(0, min(int(val*255), 255)) # Convert float to byte + temporal dithering
class PiPhy(WarpkernPhy):
def __init__(self):
#wiringpi.wiringPiSetup()
#wiringpi.wiringPiSPISetup(0, 4800000)
self.thread = None
self.ffi = FFI()
self.ffi.cdef("""
int wiringPiSetup (void) ;
int wiringPiSPIGetFd (int channel) ;
int wiringPiSPIDataRW (int channel, unsigned char *data, int len) ;
int wiringPiSPISetupMode (int channel, int speed, int mode) ;
int wiringPiSPISetup (int channel, int speed) ;
""")
self._wiringpi = self.ffi.dlopen("/usr/lib/libwiringPi.so")
self.chan = self.ffi.cast("int", 0)
self.dlen = None
self.dataptr = None
self._wiringpi.wiringPiSetup()
self._wiringpi.wiringPiSPISetup(self.chan, self.ffi.cast("int", 4500000))
def pushData(self, data: np.array):
if self.dlen is None:
self.dlen = self.ffi.cast("int", len(data))
if self.dataptr is None:
self.dataptr = self.ffi.cast("unsigned char *", data.ctypes.data)
self._wiringpi.wiringPiSPIDataRW(self.chan, self.dataptr, self.dlen)
|
# Copyright 2020 Miljenko Šuflaj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Tuple
import numpy as np
def get_random_params(range_dict: Dict[str, Tuple[Any, Any]],
force_dict: Dict[str, Tuple] = None,
amount: int = 1) -> List[Dict[str, Any]]:
"""
Generates random parameters.
:param range_dict:
A Dict[str, Tuple[Any, Any]] mapping parameter names to their numerical
ranges.
:param force_dict:
(Optional) A Dict[str, Tuple] mapping parameter names to all their
possible values. For every value a copy of the range_dict-generated
parameter dict is created (or in other words, every possibility in a
force dict branches into a version which has the range_dict-generated
parameters as base. Defaults to None.
:param amount:
(Optional) A int representing the number of random samples drawn when
generating range_dict based parameters. Defaults to 1.
:return:
A List[Dict[str, Any]] containing the list of different parameter
configurations randomly generated given the ranges passed as arguments.
"""
params_list = list()
if range_dict is not None:
for _ in range(amount):
params = dict()
for key, value in range_dict.items():
if isinstance(value[0], int):
params[key] = np.random.randint(*value)
elif isinstance(value[0], float):
params[key] = np.random.uniform(*value)
params_list.append(params)
if force_dict is not None:
for key, value in force_dict.items():
t_params_list = list()
for subvalue in value:
for params in params_list:
params[key] = subvalue
t_params_list.append(dict(params))
params_list = list(t_params_list)
return params_list
|
import tensorflow as tf
import cifar10
import os
import argparse
import cifar10,cifar10_input
import numpy as np
import math
width = 24
height = 24
FLAGS = cifar10.FLAGS
def load_graph(frozen_graph_pb):
with tf.gfile.GFile(frozen_graph_pb,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def,name='prefix')
return graph
def test_from_pbmodel():
print("test from pb...")
session = tf.Session()
images_test = readFromLocal()
images_test = tf.expand_dims(images_test, 0)
images_test = session.run(images_test)
print(images_test)
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename", default='ckpt_pb_model.pb',
type=str, help='Frozen model file to import')
args = parser.parse_args()
graph = load_graph(args.frozen_model_filename)
print(graph)
for op in graph.get_operations():
print(op.name, op.values())
top_k_op = graph.get_tensor_by_name("prefix/top_k_op/top_k_op:0")
session = tf.Session(graph=graph)
x = graph.get_tensor_by_name("prefix/input_x:0")
y = graph.get_tensor_by_name("prefix/input_y:0")
# for i in range(0,10):
# out = session.run(top_k_op, feed_dict={x: images_test, y:[i]}) #(?,)
# print(out)
# true_count = np.sum(out)
# precision = true_count / 1
# print('precision @ 1 =%.3f' % precision)
# return out
def readFromLocal():
image_value = tf.read_file('predicateImage/icon.jpg')
img = tf.image.decode_jpeg(image_value, channels=3)
print(type(image_value)) # bytes
print(type(img)) # Tensor
return img
def test():
print("test from pb...")
data_dir = '/tmp/cifar10_data/cifar-10-batches-bin' # 数据所在路径
batch_size = 128
images_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)
print(images_test)
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename", default='ckpt_pb_model.pb', type=str, help='Frozen model file to import')
args = parser.parse_args()
graph = load_graph(args.frozen_model_filename)
print(graph)
for op in graph.get_operations():
print(op.name, op.values())
top_k_op = graph.get_tensor_by_name("prefix/top_k_op/top_k_op:0")
x = graph.get_tensor_by_name("prefix/input_x:0")
y = graph.get_tensor_by_name("prefix/input_y:0")
num_examples = 10000
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0
total_sample_count = num_iter * batch_size
step = 0
session = tf.Session(graph=graph)
image_batch = images_test.eval(session=session)
label_batch = labels_test.eval(session=session)
while step < num_iter:
predictions = session.run([top_k_op], feed_dict={x: image_batch, y: label_batch})
true_count += np.sum(predictions)
step += 1
precision = true_count / total_sample_count
print('precision @ 1 =%.3f' % precision)
print(test()) |
from .io import savefig, OUT_PATH, FIG_PATH, get_out_dir |
# ============================================================================
# This file contains a list of functions use to manipulate queries.
#
# Authors: Peter Williams, Tom Maullin, Camille Maumet (09/01/2018)
# ============================================================================
import os
# This function takes as input a query and returns a query list.
def add_query_to_list(query):
queryList = []
for i in query:
for j in i:
queryList.append("%s" % j)
return(queryList)
# Function for printing the results of a query.
def print_query(query):
# Print each row.
for row in query:
if len(row) == 1:
print("%s" % row)
elif len(row) == 2:
print("%s, %s" % row)
elif len(row) == 3:
print("%s, %s, %s" % row)
else:
print("Error, not a suitable length")
# This function runs a query of either queryType 'Ask' or 'Select'. Filters
# can be added also.
def run_query(graph, queryFile, queryType, filters={}):
# Open the file and read it in.
queryFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
queryFile + '.txt'))
queryText = queryFile.read()
queryFile.close()
# If there are any filters specified add them to the query.
for fil in filters:
queryText = queryText.replace('{{{' + fil + '}}}', filters[fil])
# Run the query.
queryOutput = graph.query(queryText)
# If we are asking we only want true if something was returned.
if queryType == 'Ask':
for row in queryOutput:
queryResult = row
if queryResult:
return(True)
else:
return(False)
# If we are selecting we want a list of outputs
if queryType == 'Select':
return(add_query_to_list(queryOutput))
|
#!/usr/bin/env python
import numpy as np
import math
import os
import sys
import pickle
#import collections
#import types
#import traceback
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
from sklearn.preprocessing import LabelEncoder
import pcl
# ROS imports
import rospy
#import tf
#from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
#from geometry_msgs.msg import Pose
import sensor_msgs.point_cloud2 as pc2
from sensor_stick.srv import GetNormals
#from sensor_stick.marker_tools import *
import sensor_stick.marker_tools as marker_tools
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from visualization_msgs.msg import Marker
# Local imports
import pcl_helper
#import sensor_stick.pcl_helper
import pclproc
#====================== GLOBALS =====================
# Clearly this module wants to be a class
g_pcl_sub = None
g_pcl_objects_pub = None
g_pcl_table_pub = None
g_pcl_cluster_pub = None
g_object_markers_pub = None
g_detected_objects_pub = None
g_model = None
g_clf = None
g_encoder = None
g_scaler = None
g_callBackCount = -1
g_callBackSkip = 40 # How many callbacks to skip until actual processing. Default is 0
# For debug testing only
g_doRunRosNode = True # For invoking RunRosNode() when run from pycharm
g_doTests = False # Invokes Test_Process_msgPCL() when file is run
g_testmsgPCLFilename = "./Assets/msgPCL" # + "num..pypickle" # File containing a typical Ros msgPCL, used by doTests
g_testrawPCLFilename = "./Assets/rawPCL" # + "num.pypickle" # File containing a typical rawPCL as unpacked my pcl_helper used by doTests
g_dumpCountTestmsgPCL = 0 # How many debug msgPCL files to dump. Normally 0
g_dumpCountTestrawPCL = 0 # How many debug rawPCL files to dump. Normally 0
#--------------------------------- get_normals()
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
#--------------------------------- ProcessPCL()
def Process_rawPCL(pclpcRawIn):
DebugDumpMsgPCL(pclpcRawIn)
pclRecs = [] # For dev/debug display. Container for point cloud records: tuple (pclObj, pclName# )
pclRecs.append((pclpcRawIn, "pclpcRawIn"))
pclRecsDownSampled = pclproc.PCLProc_DownSampleVoxels(pclpcRawIn)
pclRecs += pclRecsDownSampled
pclpcDownSampled, pclpcDownSampledName = pclRecsDownSampled[0]
# PassThrough Filter
pclRecsRansac = pclproc.PCLProc_Ransac(pclpcDownSampled)
pclRecs += pclRecsRansac
# Extract inliers and outliers
pclpcPassZ, pclpcPassZIn, pclpcPassZOut = pclRecsRansac[0][0], pclRecsRansac[1][0], pclRecsRansac[2][0]
pclpcTable, pclpcObjects = pclpcPassZIn, pclpcPassZOut # Rename for clarity
# Euclidean Clustering
pclpObjectsNoColor = pcl_helper.XYZRGB_to_XYZ(pclpcObjects)
clusterIndices, pclpcClusters = pclproc.PCLProc_ExtractClusters(pclpObjectsNoColor)
labelRecs = []
for index, pts_list in enumerate(clusterIndices):
# Get points for a single object in the overall cluster
pcl_cluster = pclpcObjects.extract(pts_list)
msgPCL_cluster = pcl_helper.pcl_to_ros(pcl_cluster) # Needed for histograms... would refactor
# Extract histogram features
chists = pclproc.compute_color_histograms(msgPCL_cluster, doConvertToHSV=True)
normals = get_normals(msgPCL_cluster)
nhists = pclproc.compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# CLASSIFY, retrieve the label for the result
# and add it to detected_objects_labels list
prediction = g_clf.predict(g_scaler.transform(feature.reshape(1, -1)))
label = g_encoder.inverse_transform(prediction)[0]
# Accumulate label records for publishing (and labeling detected objects)
label_pos = list(pclpcObjects[pts_list[0]])
label_pos[2] += 0.3
labelRecs.append((label, label_pos, index))
return labelRecs, pclpcObjects, pclpcTable, pclpcClusters
#--------------------------------- CB_msgPCL()
def CB_msgPCL(msgPCL):
"""
ROS "/sensor_stick/point_cloud" subscription Callback handler
Handle the PointCloud ROS msg received by the "/sensor_stick/point_cloud"
This function is almost entirely unpacking/packing ROS messages and publishing.
The the unpacked input pcl is processed by Process_rawPCL(pclpcRawIn)
which returns the values that need to be packed and published
:param msgPCL:
:return:
"""
global g_callBackCount
g_callBackCount += 1
if (g_callBackCount % g_callBackSkip != 0):
return;
print "\rCBCount= {:05d}".format(g_callBackCount),
sys.stdout.flush()
DebugDumpMsgPCL(msgPCL)
# Extract pcl Raw from Ros msgPCL
pclpcRawIn = pcl_helper.ros_to_pcl(msgPCL)
#------- PROCESS RAW PCL-------------------------
labelRecs, pclpcObjects, pclpcTable, pclpcClusters = Process_rawPCL(pclpcRawIn)
detected_objects_labels = [] # For ros loginfo only
detected_objects = [] # For publish - for PROJ3!
for (labelText, labelPos, labelIndex) in labelRecs:
detected_objects_labels.append(labelText)
g_object_markers_pub.publish(marker_tools.make_label(labelText, labelPos, labelIndex ))
# Add detected object to the list of detected objects.
do = DetectedObject()
do.label = labelText
do.cloud = pclpcClusters
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Package Processed pcls into Ros msgPCL
msgPCLObjects = pcl_helper.pcl_to_ros(pclpcObjects)
msgPCLTable = pcl_helper.pcl_to_ros(pclpcTable)
msgPCLClusters = pcl_helper.pcl_to_ros(pclpcClusters)
# Publish everything
# This is the output you'll need to complete the upcoming project!
g_detected_objects_pub.publish(detected_objects) # THIS IS THE CRUCIAL STEP FOR PROJ3
g_pcl_objects_pub.publish(msgPCLObjects)
g_pcl_table_pub.publish(msgPCLTable)
g_pcl_cluster_pub.publish(msgPCLClusters)
#====================== Main() =====================
def RunRosNode():
'''
ROS clustering/segmentation node initialization
'''
print("ROS clustering/segmentation node initializatiing...")
global g_pcl_sub
global g_pcl_objects_pub
global g_pcl_table_pub
global g_pcl_cluster_pub
global g_object_markers_pub
global g_detected_objects_pub
global g_model
global g_clf
global g_encoder
global g_scaler
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
g_pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, CB_msgPCL, queue_size=1)
# Create Publishers
g_pcl_objects_pub = rospy.Publisher("/pcl_objects", pcl_helper.PointCloud2, queue_size=1)
g_pcl_table_pub = rospy.Publisher("/pcl_table", pcl_helper.PointCloud2, queue_size=1)
g_pcl_cluster_pub = rospy.Publisher("/pcl_cluster", pcl_helper.PointCloud2, queue_size=1)
g_object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=8)
g_detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Load Model From disk
g_model = pickle.load(open('model.sav', 'rb'))
g_clf = g_model['classifier']
g_encoder = LabelEncoder()
g_encoder.classes_ = g_model['classes']
g_scaler = g_model['scaler']
# Initialize color_list
pcl_helper.get_color_list.color_list = []
while not rospy.is_shutdown():
print("ROS clustering/segmentation node running")
rospy.spin()
###################################### TESTS ###########################
###################################### TESTS ###########################
###################################### TESTS ###########################
def DebugDumpRawPCL(pclpcRawIn):
global g_dumpCountTestrawPCL
# DevDebug save rawPCL to file for debug
if (g_dumpCountTestrawPCL > 0):
g_dumpCountTestrawPCL -= 1
fileNameOut = g_testrawPCLFilename + str(g_dumpCountTestrawPCL) + ".pypickle"
pickle.dump(pclpcRawIn, open(fileNameOut, "wb"))
def DebugDumpMsgPCL(msgPCL):
global g_dumpCountTestmsgPCL
# DevDebug save msgPCL to file for debug
if (g_dumpCountTestmsgPCL > 0):
g_dumpCountTestmsgPCL -= 1
fileNameOut = g_testmsgPCLFilename + str(g_dumpCountTestmsgPCL) + ".pypickle"
pickle.dump(msgPCL, open(fileNameOut, "wb"))
#--------------------------------- Test_Process_rawPCL()
def Test_Process_rawPCL():
dumpIndex = 0
fileNameIn = g_testrawPCLFilename + str(dumpIndex) + ".pypickle"
pclpcRawIn = pickle.load( open(fileNameIn, "rb" ) )
pclpcObjects, pclpcTable, pclpcClusters = Process_rawPCL(pclpcRawIn)
# ============ Auto invoke Test_PCLProc_*
if (g_doTests):
Test_Process_rawPCL()
#====================== Main Invocation RunRosNode() =====================
if ((__name__ == '__main__') & g_doRunRosNode):
RunRosNode()
|
import math
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from potential_applied import *
from EDP_solver import *
# main programm for linear sweep voltammetry
def main_LSV_E_red(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
(E, tk) = rampe(cst_all["E_i"], cst_all["E_ox"], cst_all["E_red"], cst_all["v"], cst_all["Ox"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)],[cst_all["C_b"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(3, figsize=(20, 10))
(M_new_constant, M_old) = Matrix_constant_E(Nx, DM)
I = np.array(())
for i in range(cst_all["Nt"]):
C_old = C_new
t = i*Dt
M_new = Matrix_E_Non_Nernst_boundaries_red(M_new_constant,
t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_E(M_new, M_old, C_old, cst_all["C_a"], cst_all["C_b"], Nx)
I = np.append(I, compute_I_E(C_new, cst_all))
if i % math.floor(cst_all["Nt"]/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:Nx], label= 'time = %is' %(i*Dt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:], label= 'time = %is' %(i*Dt))
ax[2].plot([E(i*(Dt)) for i in range(cst_all["Nt"])], I)
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show()
return(I)
# main programm for cyclic staircase voltammetry
def main_CSV_E(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
(E, tk) = CSV(cst_all["E_i"], cst_all["E_ox"], cst_all["E_red"], cst_all["Delta_E"], cst_all["v"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)],[cst_all["C_b"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(3, figsize=(20, 10))
(M_new_constant, M_old) = Matrix_constant_E(Nx, DM)
I = np.array(())
for i in range(cst_all["Nt"]):
C_old = C_new
t = i*Dt
M_new = Matrix_E_Non_Nernst_boundaries(M_new_constant,
t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_E(M_new, M_old, C_old, cst_all["C_a"], cst_all["C_b"], Nx)
I = np.append(I, compute_I_E(C_new, cst_all))
if i % math.floor(cst_all["Nt"]/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:Nx], label= 'time = %is' %(i*Dt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:], label= 'time = %is' %(i*Dt))
ax[2].plot([E(i*(Dt)) for i in range(cst_all["Nt"])], I)
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show()
return(I)
# main programm for square wave voltammetry
def main_SWV_E(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
(E, E_sweep, tk) = SWV(cst_all["E_i"],
cst_all["E_ox"],
cst_all["E_red"],
cst_all["E_SW"],
cst_all["Delta_E"],
cst_all["f"],
cst_all["Ox"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
print("Dt = ", Dt, "and T = 2Pi/f = ", 2*np.pi/cst_all["f"])
# arbitrary criteria to check if the time step is small enough compared to the time step of the SW
if 20*Dt > 2*np.pi/cst_all["f"]:
print("YOU SHOULD INCREASE THE NUMBER OF TIME STEPS TO GET MEANINGFUL RESULTS !")
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)],[cst_all["C_b"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(4, figsize=(10, 20))
(M_new_constant, M_old) = Matrix_constant_E(Nx, DM)
I = np.array(())
for i in range(cst_all["Nt"]):
C_old = C_new
t = i*Dt
M_new = Matrix_E_Non_Nernst_boundaries(M_new_constant,
t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_E(M_new, M_old, C_old, cst_all["C_a"], cst_all["C_b"], Nx)
I = np.append(I, compute_I_E(C_new, cst_all))
if i % math.floor(cst_all["Nt"]/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:Nx], label= 'time = %is' %(i*Dt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:], label= 'time = %is' %(i*Dt))
ax[2].plot([E_sweep(i*Dt) for i in range(cst_all["Nt"])], I)
ax[3].plot([i*Dt for i in range(cst_all["Nt"])],[E(i*(Dt)) for i in range(cst_all["Nt"])])
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show()
return(I)
|
from production.envs.time_calc import *
from production.envs.heuristics import *
from production.envs.resources import *
from production.envs.transport import *
import simpy
class Order(Resource):
"""An order specifices a production request.
An order has a *id* and a sequence of *prod_steps* to fulfill.
"""
def __init__(self, env, id, prod_steps, variant, statistics, parameters, resources, agents, time_calc):
Resource.__init__(self, statistics, parameters, resources, agents, time_calc, None)
self.env = env
self.id = id
self.prod_steps = prod_steps
self.variant = variant
self.sop = -1
self.eop = -1
self.time_processing = 0
self.time_handling = 0
self.actual_step = 0
self.finished = False
self.current_location = None
self.order_log = [["action", "order_ID", "sim_time", "resource_ID"]]
self.transported = self.env.event()
self.processed = self.env.event()
self.reserved = False
if self.parameters['PRINT_CONSOLE']: print("Order %s created %s" % (self.id, [x.id for x in self.prod_steps]))
self.order_log.append(["prod_steps", self.id, [x.id for x in prod_steps]]) # records the actual processing history incl time stamps of an order
def set_sop(self): # SOP = start of production
self.sop = self.env.now
self.statistics['stat_order_sop'][self.id] = self.sop
self.statistics['stat_inv_episode'][-1][0] = self.env.now - self.statistics['stat_inv_episode'][-1][0]
self.statistics['stat_inv_episode'].append([self.env.now, self.statistics['stat_inv_episode'][-1][1] + 1])
self.order_log.append(["sop", self.id, round(self.sop, 5), ""])
def set_eop(self): # EOP = end of production
self.eop = self.env.now
self.statistics['stat_order_eop'][self.id] = self.eop
self.statistics['stat_order_leadtime'][self.id] = self.eop - self.sop
self.statistics['stat_inv_episode'][-1][0] = self.env.now - self.statistics['stat_inv_episode'][-1][0]
self.statistics['stat_inv_episode'].append([self.env.now, self.statistics['stat_inv_episode'][-1][1] - 1])
self.order_log.append(["eop", self.id, round(self.eop, 5), ""])
def set_next_step(self):
self.actual_step += 1
if self.actual_step > len(self.prod_steps):
self.finished = True
def get_next_step(self):
return self.prod_steps[self.actual_step]
def get_total_waiting_time(self):
result = self.env.now - self.sop - self.time_processing - self.time_handling
return result
def order_processing(self):
while True:
self.set_next_step()
if self.finished:
break
if self.id >= 0 or self in self.current_location.buffer_out: # Check initial orders that are created at the beginning
self.order_log.append(["before_transport", self.id, round(self.env.now, 5), self.current_location.id])
Transport.put(order=self, trans_agents=self.resources['transps'])
yield self.transported # Transport is finished when order is placed in buffer_in of the selected destination
self.transported = self.env.event()
self.order_log.append(["after_transport", self.id, round(self.env.now, 5), self.current_location.id])
if self.get_next_step().type == 'sink':
break
self.order_log.append(["before_processing", self.id, round(self.env.now, 5), self.current_location.id])
yield self.processed
self.processed = self.env.event()
self.order_log.append(["after_processing", self.id, round(self.env.now, 5), self.current_location.id])
self.set_eop()
self.statistics['stat_order_waiting'][self.id] = self.get_total_waiting_time() # Calling this procedure updates the order waiting time statistics
self.statistics['orders_done'].append(self)
self.current_location = None |
"""
This module provides methods for determine the addon path.
Rainmeter has an built-in variable called #ADDONSPATH#.
With this you can directly route to the drive in which Rainmeter is contained.
If by some chance people use @Include on #ADDONSPATH# it is still able to resolve
the path and open the include for you.
"""
import os.path
from functools import lru_cache
from .. import logger
from .setting_path_provider import get_cached_setting_path
@lru_cache(maxsize=None)
def get_cached_addon_path():
"""Get the value of the #ADDONSPATH# variable."""
settingspath = get_cached_setting_path()
if not settingspath:
logger.error("#SETTINGSPATH# resolution required but was not found")
return
return os.path.join(settingspath, "Addons") + "\\"
|
import os
import subprocess
import random
import time
from pathlib import Path
import json
import re
from mt import utils
from mt import DATASETS_PATH, DATASET_EVAL_NAME, DATASET_CLEAN_SORTED_NAME, DATASET_TOK_NAME, DATASET_LOGS_NAME, DATASET_CHECKPOINT_NAME
TOK_MODEL = "bpe"
DOMAINS = ["health", "biological", "merged"]
BEAMS = [5]
def generate(train_dataset, src, trg, model_name, train_domain, tok_folder):
# Get all folders in the root path
test_datasets = [os.path.join(DATASETS_PATH, tok_folder, x) for x in [f"health_fairseq_vhealth_{src}-{trg}",
f"biological_fairseq_vbiological_{src}-{trg}",
f"merged_fairseq_vmerged_{src}-{trg}"]]
for test_dataset in test_datasets:
test_domain, (test_src, test_trg) = utils.get_dataset_ids(test_dataset)
test_domain = test_domain.replace("_fairseq", "").replace("_vhealth", "").replace("_vbiological", "").replace("_vmerged", "")
print("#############################################")
print(f"=> TESTING MODEL FROM '{train_domain}' IN DOMAIN '{test_domain}'")
# Create path
eval_path = os.path.join(train_dataset, DATASET_EVAL_NAME, model_name, test_domain)
Path(eval_path).mkdir(parents=True, exist_ok=True)
# Preprocess domain datasets with train tokenizers
source_dataset = test_dataset
vocab_path = train_dataset
output_path = eval_path
print(f"\t- Preprocessing datasets for: {test_domain}...")
subprocess.call(['sh', './scripts/3_preprocess.sh', source_dataset, vocab_path, output_path, tok_folder, src, trg])
# Generate them
for beam in BEAMS:
eval_path_bin = os.path.join(eval_path, "data-bin")
model_path = os.path.join(train_dataset, "checkpoints", model_name)
# Create output path
output_path = os.path.join(eval_path, f"beam{beam}")
Path(output_path).mkdir(parents=True, exist_ok=True)
print(f"\t- Generating translations for: {test_domain}...")
subprocess.call(['sh', './scripts/5_generate.sh', eval_path_bin, model_path, output_path, src, trg, str(beam)])
print("")
print("########################################################################")
print("########################################################################")
print("")
print("")
print("------------------------------------------------------------------------")
print("------------------------------------------------------------------------")
print("")
def get_beam_scores(train_dataset, src, trg, tok_folder):
domain, (src, trg) = utils.get_dataset_ids(dataset)
fname_base = f"{domain}_{src}-{trg}"
# Get all folders in the root path
test_datasets = [os.path.join(DATASETS_PATH, tok_folder, x) for x in [f"health_fairseq_vhealth_{src}-{trg}",
f"biological_fairseq_vbiological_{src}-{trg}",
f"merged_fairseq_vmerged_{src}-{trg}"]]
metrics = {"beams": {}}
for test_dataset in test_datasets:
test_domain, (test_src, test_trg) = utils.get_dataset_ids(test_dataset)
test_domain = test_domain.replace("_fairseq", "").replace("_vhealth", "").replace("_vbiological", "").replace("_vmerged", "")
print(f"=> TESTING MODEL FROM '{fname_base}' IN DOMAIN '{test_domain}'")
# Create path
eval_path = os.path.join(train_dataset, DATASET_EVAL_NAME, model_name, test_domain)
# Generate them
for beam in BEAMS:
metrics["beams"][f"beam{beam}"] = {}
# Set output path
output_path = os.path.join(eval_path, f"beam{beam}")
# Read fairseq-generate output
with open(os.path.join(output_path, "generate-test.txt"), 'r') as f:
score_summary = f.readlines()[-1]
print(score_summary)
# Parse metrics
pattern = r"beam=(\d+): BLEU\d+ = (\d+.\d+)"
beam_width, score_bleu = re.search(pattern, score_summary).groups()
beam_width, score_bleu = int(beam_width), float(score_bleu)
metrics["beams"][f"beam{beam}"]['fairseq_bleu'] = score_bleu
# Sacrebleu: BLEU
with open(os.path.join(output_path, "metrics_bleu.txt"), 'r') as f2:
score_summary = f2.readlines()[-1]
print(score_summary)
# Parse metrics
pattern = r"BLEU.* = (\d+\.\d+) \d+\.\d+\/"
score_bleu = re.search(pattern, score_summary).groups()[0]
score_bleu = float(score_bleu)
metrics["beams"][f"beam{beam}"]['sacrebleu_bleu'] = score_bleu
# Sacrebleu: CHRF
with open(os.path.join(output_path, "metrics_chrf.txt"), 'r') as f3:
score_summary = f3.readlines()[-1]
print(score_summary)
# Parse metrics
pattern = r"chrF2.* = (\d+\.\d+)\s*$"
score_chrf = re.search(pattern, score_summary).groups()[0]
score_chrf = float(score_chrf)
metrics["beams"][f"beam{beam}"]['sacrebleu_chrf'] = score_chrf
# # Sacrebleu: TER
# with open(os.path.join(output_path, "metrics_ter.txt"), 'r') as f4:
# score_summary = f4.readlines()[-1]
# print(score_summary)
#
# # Parse metrics
# pattern = r"TER.* = (\d+\.\d+)\s*$"
# score_ter = re.search(pattern, score_summary).groups()[0]
# score_ter = float(score_ter)
# metrics["beams"][f"beam{beam}"]['sacrebleu_ter'] = score_ter
# Save metrics to file
with open(os.path.join(eval_path, 'beam_metrics.json'), 'w') as f:
json.dump(metrics, f)
print("Metrics saved!")
print("------------------------------------------------------------------------")
if __name__ == "__main__":
for TOK_SIZE in [128, 64]:
TOK_FOLDER = f"{TOK_MODEL}.{TOK_SIZE}"
# Get all folders in the root path
# datasets = [os.path.join(DATASETS_PATH, x) for x in ["health_es-en", "biological_es-en", "merged_es-en"]]
datasets = [(os.path.join(DATASETS_PATH, TOK_FOLDER, x), l) for x, l in [
# Fairseq (small): Health
("health_fairseq_vhealth_es-en", ["checkpoint_best.pt"]),
("health_fairseq_vbiological_es-en", ["checkpoint_best.pt"]),
("health_fairseq_vmerged_es-en", ["checkpoint_best.pt"]),
#
# # Fairseq (small): Biological
("biological_fairseq_vhealth_es-en", ["checkpoint_best.pt"]),
("biological_fairseq_vbiological_es-en", ["checkpoint_best.pt"]),
("biological_fairseq_vmerged_es-en", ["checkpoint_best.pt"]),
#
# # Fairseq (small): Merged
("merged_fairseq_vhealth_es-en", ["checkpoint_best.pt"]),
("merged_fairseq_vbiological_es-en", ["checkpoint_best.pt"]),
("merged_fairseq_vmerged_es-en", ["checkpoint_best.pt"]),
#
# # Fairseq (small): H->B
("health_biological_fairseq_vhealth_es-en", ["checkpoint_best.pt"]),
("health_biological_fairseq_vbiological_es-en", ["checkpoint_best.pt"]),
("health_biological_fairseq_vmerged_es-en", ["checkpoint_best.pt"]),
# Fairseq (large): All
# ("health_fairseq_large_vhealth_es-en", ["checkpoint_best.pt"]),
# ("biological_fairseq_large_vbiological_es-en", ["checkpoint_best.pt"]),
# ("merged_fairseq_large_vmerged_es-en", ["checkpoint_best.pt"]),
# ("health_biological_fairseq_large_vhealth_es-en", ["checkpoint_best.pt"]),
]]
for dataset, models in datasets:
domain, (src, trg) = utils.get_dataset_ids(dataset)
fname_base = f"{domain}_{src}-{trg}"
# Train model
for model_name in models:
print(f"Testing model ({fname_base}; {model_name})...")
generate(dataset, src, trg, model_name=model_name, train_domain=domain, tok_folder=TOK_FOLDER)
get_beam_scores(dataset, src, trg, TOK_FOLDER)
|
from cc3d import CompuCellSetup
from FitzHughNagumoSteppables import FitzHughNagumoSteppable
CompuCellSetup.register_steppable(steppable=FitzHughNagumoSteppable(frequency=1))
CompuCellSetup.run()
|
from app.objects.c_ability import Ability
from app.utility.base_world import BaseWorld
from tests.base.test_base import TestBase
class TestRestSvc(TestBase):
def setUp(self):
self.initialize()
BaseWorld.apply_config(name='default', config={'app.contact.http': '0.0.0.0', 'plugins': ['sandcat', 'stockpile']})
self.run_async(self.data_svc.store(
Ability(ability_id='123', test=BaseWorld.encode_string('curl #{app.contact.http}'), variations=[]))
)
def test_update_config(self):
# check that an ability reflects the value in app. property
pre_ability = self.run_async(self.data_svc.locate('abilities', dict(ability_id='123')))
self.assertEqual('0.0.0.0', BaseWorld.get_config('app.contact.http'))
self.assertEqual('curl 0.0.0.0', BaseWorld.decode_bytes(pre_ability[0].test))
# update property
self.run_async(self.rest_svc.update_config(data=dict(prop='app.contact.http', value='127.0.0.1')))
# verify ability reflects new value
post_ability = self.run_async(self.data_svc.locate('abilities', dict(ability_id='123')))
self.assertEqual('127.0.0.1', BaseWorld.get_config('app.contact.http'))
self.assertEqual('curl 127.0.0.1', BaseWorld.decode_bytes(post_ability[0].test))
def test_update_config_plugin(self):
# update plugin property
self.assertEqual(['sandcat', 'stockpile'], BaseWorld.get_config('plugins'))
self.run_async(self.rest_svc.update_config(data=dict(prop='plugin', value='ssl')))
self.assertEqual(['sandcat', 'stockpile', 'ssl'], BaseWorld.get_config('plugins'))
|
xl = float(input("Lower limit: "))
xu = float(input("Upper limit: "))
xt = float(input("True value: "))
es = float(input("Error tolerance e_s: "))
max_itr = int(input("Maximum Iteration number: "))
n = int(input("Height power: "))
coefficient = []
for i in range( n +1):
data = float(input("Enter coefficient value: "))
coefficient.append(data)
def func(x):
j = 0
f = 0
while j <= n:
f = f + coefficient[j ] *( x* *j)
j = j+ 1
return f
def bisection_method(a, b):
global midpoint
if f(a) * f(b) > 0:
# end function, no root.
print("No root found.")
else:
while (b - a) / 2.0 > tol:
midpoint = (a + b) / 2.0
if f(midpoint) == 0:
return midpoint # The midpoint is the x-intercept/root.
elif f(a) * f(midpoint) < 0: # Increasing but below 0 case
b = midpoint
else:
a = midpoint
return midpoint
answer = bisection_method(Xl,Xu)
print("Answer:", round(answer, 3))
|
import configargparse
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str, help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str,
default='./data/llff/fern', help='input data directory')
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int,
default=8, help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=32*32*4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float,
default=5e-4, help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000s)')
parser.add_argument("--chunk", type=int, default=1024*32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024*64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_batching", action='store_true',
help='only take random rays from 1 image at a time')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
parser.add_argument("--random_seed", type=int, default=None,
help='fix random seed for repeatability')
# pre-crop options
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# rendering options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# dataset options
parser.add_argument("--dataset_type", type=str, default='llff',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
# deepvoxels flags
parser.add_argument("--shape", type=str, default='greek',
help='options : armchair / cube / greek / vase')
# blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
# llff flags
parser.add_argument("--factor", type=int,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=500,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=50000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=50000,
help='frequency of render_poses video saving')
parser.add_argument("--image_extn", type=str, default='.png',
help='training image extension')
parser.add_argument("--mask_directory", type=str, default=None,
help='mask_directory')
parser.add_argument("--mask_images", action='store_true',
help='mask_images')
parser.add_argument("--ray_masking", action='store_true',
help='ray_masking')
parser.add_argument("--sigma_masking", action='store_true',
help='sigma_masking')
parser.add_argument("--sigma_threshold", type=float, default=0., help='sigma_threshold')
parser.add_argument("--get_depth_maps", action='store_true', help='get_depth_maps')
parser.add_argument("--Z_limits_from_pose", action='store_true', help='Z_limits_from_pose')
parser.add_argument("--force_black_background", action='store_true', help='force_black_background')
parser.add_argument("--visualize_optimization", action='store_true', help='visualize_optimization')
parser.add_argument("--visualize_results", action='store_true', help='visualize_results')
parser.add_argument("--use_K", action='store_true', help='use_K - full camera model')
parser.add_argument("--image_fieldname", type=str, default='file_path', help='image_fieldname')
parser.add_argument("--approximate_poses_filename", type=str, default='', help='approximate_poses_filename')
parser.add_argument("--centring_transforms", type=str, help='centring_transforms')
parser.add_argument("--img_loss_threshold", type=float, help='img_loss_threshold')
parser.add_argument("--depth_loss_threshold", type=float, help='depth_loss_threshold')
parser.add_argument("--epochs", type=int, default=10, help='number of epochs')
parser.add_argument("--output_directory", type=str, default='./', help='output_directory')
parser.add_argument("--use_huber_loss", action='store_true', help='use_huber_loss')
parser.add_argument("--image_dir_override", type=str, default=None, help='image_dir_override')
parser.add_argument("--resample", type=float, default=1.0, help='resample')
parser.add_argument("--object_radius", type=float, default=0.259425, help='object_radius default driller: 0.259425')
parser.add_argument("--colmap_db_filename", type=str, default='./data/linemod_driller_all_llff/database.db', help='location of colmap db')
parser.add_argument("--colmap_keypoints_filename", type=str, default=None, help='location of colmap keypoints pkl')
parser.add_argument("--number_of_keypoints", type=int, default=None, help='number of unique keypoints')
parser.add_argument("--keypoint_embedding_size", type=int, default=None, help='keypoint_embedding_size')
parser.add_argument("--category_activation", type=str, default=None, help='category_activation')
parser.add_argument("--keypoint_oversample", action='store_true', help='keypoint_oversample')
parser.add_argument("--keypoint_iterations_start", type=int, default=0, help='keypoint_iterations_start')
parser.add_argument("--keypoint_loss_coeff", type=float, default=1.0, help='keypoint_loss_coeff')
parser.add_argument("--choose_keypoint_closest_depth", action='store_true', help='choose_keypoint_closest_depth')
parser.add_argument("--zero_embedding_origin", action='store_true', help='zero_embedding_origin')
parser.add_argument("--keypoints_filename", type=str, default=None, help='keypoints_filename')
parser.add_argument("--keypoint_detector", type=str, default=None, help='keypoint_detector')
parser.add_argument("--keypoint_dropout", action='store_true', help='keypoint_dropout')
parser.add_argument("--keypoint_regularize", action='store_true', help='keypoint_regularize')
parser.add_argument("--autoencoded_keypoints_filename", type=str, default=None, help='autoencoded_keypoints_filename')
parser.add_argument("--learnable_embeddings_filename", type=str, default=None, help='learnable_embeddings_filename')
parser.add_argument("--trainskip", type=int, default=1, help='trainskip')
parser.add_argument("--depth_from_camera", action='store_true', help='use depth_from_camera in rendering')
parser.add_argument("--test_finish", type=int, default=None, help='test_finish frame')
parser.add_argument("--near", type=float, default=None, help='near')
parser.add_argument("--far", type=float, default=None, help='far')
parser.add_argument("--frames_field", type=str, default='frames', help='frames_field in transforms file')
parser.add_argument("--depth_loss", action='store_true', help='use depth_from_camera in rendering')
return parser |
"""
Module containing the ``pytorch_lightning.LightningModule`` implementation of
the of the Dense Flow Prediction Network (DFPN).
"""
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import master_thesis as mt
class DFPN(pl.LightningModule):
"""Implementation of the Dense Flow Prediction Network (DFPN).
Attributes:
corr: Instance of a ``CorrelationVGG`` layer.
corr_mixer: Instance of a ``AlignmentCorrelationMixer`` layer.
flow_64: Instance of a ``FlowEstimator`` layer.
flow_256: Instance of a ``FlowEstimator`` layer.
model_vgg: Instance of a ``VGGFeatures`` network.
kwargs: Dictionary containing the CLI arguments of the execution.
"""
LOSSES_NAMES = [
'corr_loss', 'flow_16', 'flow_64', 'flow_256',
'alignment_recons_64', 'alignment_recons_256'
]
def __init__(self, model_vgg, **kwargs):
super(DFPN, self).__init__()
self.corr = CorrelationVGG(model_vgg)
self.corr_mixer = AlignmentCorrelationMixer()
self.flow_64 = FlowEstimator()
self.flow_256 = FlowEstimator()
self.register_buffer(
'mean', torch.as_tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1, 1)
)
self.register_buffer(
'std', torch.as_tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1, 1)
)
self.model_vgg = model_vgg
self.kwargs = kwargs
def forward(self, x_target, m_target, x_refs, m_refs):
"""Forward pass through the Dense Flow Prediction Network (DFPN).
Args:
x_target: Tensor of size ``(B,C,H,W)`` containing the masked frame
of which to align other frames.
m_target: Tensor of size ``(B,1,H,W)`` containing the mask of
``x_target``.
x_refs: Tensor of size ``(B,C,F,H,W)`` containing the frames to
align with respect to ``x_target``.
m_refs: Tensor of size ``(B,1,F,H,W)`` containing the masks of
``x_refs``.
Returns:
Tuple of four positions containing:
- Tensor of size ``(B,F,16,16,16,16)`` containing the filled
correlation volume between target and reference frames.
- Tensor of size ``(B,F,16,16,2)`` containing the predicted
flow in the lowest resolution.
- Tensor of size ``(B,F,64,64,2)`` containing the predicted
flow in the middle resolution.
- Tensor of size ``(B,F,H,W,2)`` containing the predicted flow
in the highest resolution.
"""
b, c, ref_n, h, w = x_refs.size()
x_target = (x_target - self.mean.squeeze(2)) / self.std.squeeze(2)
x_refs = (x_refs - self.mean) / self.std
x_target_sq, m_target_sq, x_ref_sq, m_ref_sq = \
mt.TransformsUtils.resize_set_bis(
x_target, m_target, x_refs, m_refs, (256, 256)
)
x_target_64, m_target_64, x_ref_64, m_ref_64 = \
mt.TransformsUtils.resize_set_bis(
x_target, m_target, x_refs, m_refs, (64, 64)
)
corr = self.corr(x_target_sq, m_target_sq, x_ref_sq, m_ref_sq)
flow_16 = self.corr_mixer(corr)
flow_64_pre = mt.FlowsUtils.resize_flow(
flow_16, (64, 64), mode='bilinear'
)
flow_64 = self.flow_64(
x_target_64, m_target_64, x_ref_64, m_ref_64, flow_64_pre
)
flow_256_pre = mt.FlowsUtils.resize_flow(
flow_64, (256, 256), mode='bilinear'
)
flow_256 = self.flow_256(
x_target_sq, m_target_sq, x_ref_sq, m_ref_sq, flow_256_pre
)
return corr, flow_16, flow_64, \
mt.FlowsUtils.resize_flow(flow_256, (h, w), mode='bilinear')
def align(self, x_target, m_target, x_refs, m_refs):
"""Aligns the images ``x_refs`` with respect to the image ``x_target``.
Args:
x_target: Tensor of size ``(B,C,H,W)`` containing the target image.
m_target: Tensor of size ``(B,C,H,W)`` containing the mask of the
target image.
x_refs: Tensor of size ``(B,C,F,H,W)`` containing the reference
images.
m_refs: Tensor of size ``(B,1,F,H,W)`` containing the masks of the
reference images.
Returns:
Tuple of three positions containing:
- Tensor of size ``(B,C,F,H,W)`` containing the aligned
reference images.
- Tensor of size ``(B,1,F,H,W)`` containing the aligned
visibility maps.
- Tensor of size ``(B,1,F,H,W)`` containing a map indicating
which areas of the target frame are visible in the
reference frames.
"""
with torch.no_grad():
*_, flow_256 = self(x_target, m_target, x_refs, m_refs)
x_ref_aligned, v_ref_aligned = mt.FlowsUtils.align_set(
x_refs, (1 - m_refs), flow_256
)
v_map = (v_ref_aligned - (1 - m_target).unsqueeze(2)).clamp(0, 1)
return x_ref_aligned, v_ref_aligned, v_map
def training_step(self, batch, batch_idx):
"""Performs a single pass through the training dataset.
Args:
batch: Output of a single data loader iteration.
batch_idx: Index representing the iteration number.
Returns:
Computed loss between predictions and ground truths.
"""
(x, m), y, info = batch
flows_use, flow_gt = info[2], info[4]
t, r_list = DFPN.get_indexes(x.size(2))
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use = \
DFPN._train_val_wrapper(
self, x, m, y, flow_gt, flows_use, t, r_list
)
loss, loss_items = self.compute_loss(
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use, t, r_list
)
self._log_losses(loss, loss_items, 'training')
return loss
def validation_step(self, batch, batch_idx):
"""Performs a single pass through the validation dataset.
Args:
batch: Output of a single data loader iteration.
batch_idx: Index representing the iteration number.
Returns:
Computed loss between predictions and ground truths.
"""
(x, m), y, info = batch
flows_use, flow_gt = info[2], info[4]
t, r_list = DFPN.get_indexes(x.size(2))
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use = \
DFPN._train_val_wrapper(
self, x, m, y, flow_gt, flows_use, t, r_list
)
loss, loss_items = self.compute_loss(
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use, t, r_list
)
self._log_losses(loss, loss_items, 'validation')
return loss
def test_step(self, batch, batch_idx):
"""Performs a single pass through the test dataset.
Args:
batch: Output of a single data loader iteration.
batch_idx: Index representing the iteration number.
Returns:
Computed loss between predictions and ground truths.
"""
(x, m), y, info = batch
flows_use, flow_gt = info[2], info[5]
t, r_list = DFPN.get_indexes(x.size(2))
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use = \
DFPN._train_val_wrapper(
self, x, m, y, flow_gt, flows_use, t, r_list
)
loss, loss_items = self.compute_loss(
corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use, t, r_list
)
self._log_frames(x, m, y, t, r_list)
return loss
def compute_loss(self, corr, xs, vs, ys, xs_aligned, flows, flows_gt,
flows_use, t, r_list):
"""Computes the loss of the Dense Flow Prediction Network (DFPN).
Args:
corr: Tensor of size ``(B,F,16,16,16,16)`` containing the filled
correlation volume between target and reference frames.
xs: Tuple of three positions containing the masked background
frames in different resolutions.
vs: Tuple of three positions containing the visibility maps in
different resolutions.
ys: Tuple of three positions containing the original background
frames in different resolutions.
xs_aligned: Tuple of three positions containing the reference
masked background frames aligned with respect to the target.
flows: Tuple of three positions containing the predicted flows in
different resolutions.
flows_gt: Tuple of three positions containing the ground-truth
flows in different resolutions.
flows_use: Tensor of size ``(--batch_size)`` indicating if the data
item has been obtained using fake transormations.
t: Index of the target frame
r_list: List of indexes of the reference frames
Returns:
Tuple of two positions containing:
- The sum of the different losses.
- List containing the different loss items in the same order as
``DFPN.LOSSES_NAMES``.
"""
b, c, f, h, w = ys[2].size()
with torch.no_grad():
if h == 256 and w == 256:
y_vgg_input = ys[2].transpose(1, 2).reshape(b * f, c, h, w)
else:
y_vgg_input = F.interpolate(
ys[2].transpose(1, 2).reshape(b * f, c, h, w),
(256, 256),
mode='bilinear',
)
y_vgg_feats = self.model_vgg(y_vgg_input)
y_vgg_feats = y_vgg_feats[3].reshape(b, f, -1, 16, 16).transpose(1, 2)
corr_y = CorrelationVGG.correlation_masked_4d(
y_vgg_feats[:, :, t], None, y_vgg_feats[:, :, r_list], None
)
corr_loss = F.l1_loss(corr, corr_y)
flow_loss_16 = mt.LossesUtils.masked_l1(
flows[0], flows_gt[0], torch.ones_like(flows[0]), flows_use
)
flow_loss_64 = mt.LossesUtils.masked_l1(
flows[1], flows_gt[1], torch.ones_like(flows[1]), flows_use
)
flow_loss_256 = mt.LossesUtils.masked_l1(
flows[2], flows_gt[2], torch.ones_like(flows[2]), flows_use
)
mask_out_64 = ((flows[1] < -1).float() + (flows[1] > 1).float()) \
.sum(4).clamp(0, 1).unsqueeze(1)
mask_out_256 = ((flows[2] < -1).float() + (flows[2] > 1).float()) \
.sum(4).clamp(0, 1).unsqueeze(1)
alignment_recons_64 = mt.LossesUtils.masked_l1(
xs[1][:, :, t].unsqueeze(2).repeat(1, 1, len(r_list), 1, 1),
xs_aligned[1],
vs[1][:, :, t].unsqueeze(2).repeat(1, 1, len(r_list), 1, 1)
* (1 - mask_out_64),
reduction='sum',
)
alignment_recons_256 = mt.LossesUtils.masked_l1(
xs[2][:, :, t].unsqueeze(2).repeat(1, 1, len(r_list), 1, 1),
xs_aligned[2],
vs[2][:, :, t].unsqueeze(2).repeat(1, 1, len(r_list), 1, 1)
* (1 - mask_out_256),
reduction='sum',
)
total_loss = corr_loss + flow_loss_16 + flow_loss_64 + flow_loss_256
total_loss += alignment_recons_64 + alignment_recons_256
return total_loss, [corr_loss, flow_loss_16, flow_loss_64,
flow_loss_256, alignment_recons_64,
alignment_recons_256]
def configure_optimizers(self):
"""Configures the optimizer and LR scheduler used in the package.
Returns:
Dictionary containing a configured ``torch.optim.Adam``
optimizer and ``torch.optim.lr_scheduler.StepLR`` scheduler.
"""
optimizer = torch.optim.Adam(self.parameters(), lr=self.kwargs['lr'])
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=self.kwargs['lr_scheduler_step_size'],
gamma=self.kwargs['lr_scheduler_gamma']
)
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
def _train_val_wrapper(self, x, m, y, flow_gt, flows_use, t, r_list):
"""Auxiliary method used to propagate through training and validation
iterations.
Args:
x: Tensor of size ``(B,C,F,H,W)`` containing the masked background
frames in the highest resolution.
m: Tensor of size ``(B,1,F,H,W)`` containing the mask frames in the
highest resolution.
y: Tensor of size ``(B,C,F,H,W)`` containing the original
background frames in the highest resolution.
flow_gt:
flows_use: Tensor of size ``(--batch_size)`` indicating if the data
item has been obtained using fake transormations.
t: Index of the target frame
r_list: List of indexes of the reference frames
Returns:
Tuple of eight positions containing:
- Tensor of size ``(B,F,16,16,16,16)`` containing the filled
correlation volume between target and reference frames.
- Tuple of three positions containing the masked background
frames in different resolutions.
- Tuple of three positions containing the visibility maps in
different resolutions.
- Tuple of three positions containing the original background
frames in different resolutions.
- Tuple of three positions containing the reference masked
background frames aligned with respect to the target.
- Tuple of three positions containing the predicted flows in
different resolutions.
- Tuple of three positions containing the ground-truth flows in
different resolutions.
- Tensor of size ``(--batch_size)`` indicating if the data item
has been obtained using fake transormations.
"""
corr, flow_16, flow_64, flow_256 = self(
x[:, :, t], m[:, :, t], x[:, :, r_list], m[:, :, r_list]
)
x_16, v_16, y_16 = mt.TransformsUtils.resize_set(x, 1 - m, y, 16)
x_64, v_64, y_64 = mt.TransformsUtils.resize_set(x, 1 - m, y, 64)
x_256, v_256, y_256 = x, 1 - m, y
flow_16_gt = mt.FlowsUtils.resize_flow(flow_gt[:, r_list], (16, 16))
flow_64_gt = mt.FlowsUtils.resize_flow(flow_gt[:, r_list], (64, 64))
flow_256_gt = flow_gt[:, r_list]
x_64_aligned_gt, v_64_aligned_gt = mt.FlowsUtils.align_set(
x_64[:, :, r_list], v_64[:, :, r_list], flow_64_gt
)
x_256_aligned_gt, v_256_aligned_gt = mt.FlowsUtils.align_set(
x_256[:, :, r_list], v_256[:, :, r_list], flow_256_gt
)
v_map_64_gt = torch.zeros_like(v_64_aligned_gt)
v_map_64_gt[flows_use] = (
v_64_aligned_gt[flows_use] - v_64[flows_use, :, t]
.unsqueeze(2).repeat(1, 1, len(r_list), 1, 1)
).clamp(0, 1)
v_map_256_gt = torch.zeros_like(v_256_aligned_gt)
v_map_256_gt[flows_use] = (
v_256_aligned_gt[flows_use] - v_256[flows_use, :, t]
.unsqueeze(2).repeat(1, 1, len(r_list), 1, 1)
).clamp(0, 1)
x_16_aligned, v_16_aligned = mt.FlowsUtils.align_set(
x_16[:, :, r_list], v_16[:, :, r_list], flow_16
)
x_64_aligned, v_64_aligned = mt.FlowsUtils.align_set(
x_64[:, :, r_list], v_64[:, :, r_list], flow_64
)
x_256_aligned, v_256_aligned = mt.FlowsUtils.align_set(
x_256[:, :, r_list], v_256[:, :, r_list], flow_256
)
xs = (x_16, x_64, x_256)
vs = (v_16, v_64, v_256)
ys = (y_16, y_64, y_256)
xs_aligned = (x_16_aligned, x_64_aligned, x_256_aligned)
flows = (flow_16, flow_64, flow_256)
flows_gt = (flow_16_gt, flow_64_gt, flow_256_gt)
return corr, xs, vs, ys, xs_aligned, flows, flows_gt, flows_use
def _log_losses(self, loss, loss_items, split):
"""Logs the losses in TensorBoard.
Args:
loss: Tensor containing the loss between predictions and ground
truths.
loss_items: Dictionary containing a tensor for every different
loss.
split: Identifier of the data split.
"""
self.log('loss_{}'.format(split), loss)
for i, loss_item_id in enumerate(DFPN.LOSSES_NAMES):
loss_name = 'loss_{}_{}'.format(loss_item_id, split)
self.log(loss_name, loss_items[i])
def _log_frames(self, x, m, y, t, r_list):
"""Logs aligned frames in TensorBoard.
Args:
x: Tensor of size ``(B,C,F,H,W)`` containing masked frames.
m: Tensor of size ``(B,1,F,H,W)`` containing the masks of ``x``.
y: Tensor of size ``(B,C,F,H,W)`` containing the frames without a
mask.
t: Index of the target frame.
r_list: List of indexes of the reference frames.
"""
b, c, frames_n, h, w = x.size()
x_ref_aligned, v_ref_aligned, v_map = self.align(
x[:, :, t], m[:, :, t], x[:, :, r_list], m[:, :, r_list]
)
y_hat_trivial = x[:, :, t].unsqueeze(2).repeat(
1, 1, frames_n - 1, 1, 1
) * (1 - v_map) + x_ref_aligned * v_map
x = x.cpu().numpy()
m = m.cpu().numpy()
y = y.cpu().numpy()
x_ref_aligned = x_ref_aligned.cpu().numpy()
v_ref_aligned = v_ref_aligned.cpu().numpy()
y_hat_trivial = y_hat_trivial.cpu().numpy()
for b in range(x_ref_aligned.shape[0]):
x_aligned_sample = np.insert(
arr=x_ref_aligned[b], obj=t, values=x[b, :, t], axis=1
)
v_map_sample = np.insert(
arr=v_ref_aligned[b].repeat(3, axis=0), obj=t,
values=m[b, :, t].repeat(3, axis=0), axis=1
)
y_hat_trivial_sample = np.insert(
arr=y_hat_trivial[b], obj=t, values=y[b, :, t], axis=1
)
sample = np.concatenate((
x[b], x_aligned_sample, v_map_sample, y_hat_trivial_sample
), axis=2)
self.logger.experiment.add_images(
'frames/{}'.format(b + 1),
sample.transpose((1, 0, 2, 3)),
global_step=self.current_epoch
)
@staticmethod
def get_indexes(size):
"""Returns the indexes of both the target frame and the reference
frames given a sequence of size ``size``.
Args:
size: Number indicating the size of the input sequence.
Returns:
Tuple of two positions containing:
- The index of the target frame.
- A list of indexes of the reference frames.
"""
t, r_list = size // 2, list(range(size))
r_list.pop(t)
return t, r_list
class CorrelationVGG(nn.Module):
"""Implementation of the CorrelationVGG layer.
Attributes:
conv: Instance of a ``SeparableConv4d`` layer.
model_vgg: Instance of a ``VGGFeatures`` network.
use_softmax: Whether or not to apply a softmax at the output.
"""
def __init__(self, model_vgg, use_softmax=False):
super(CorrelationVGG, self).__init__()
self.conv = SeparableConv4d()
self.model_vgg = model_vgg
self.use_softmax = use_softmax
def forward(self, x_target, m_target, x_refs, m_refs):
"""Forward pass through the 4D Separable Convolution layer.
Args:
x_target: Tensor of size ``(B,C,H,W)`` containing the masked frame
of which to align other frames.
m_target: Tensor of size ``(B,1,H,W)`` containing the mask of
``x_target``.
x_refs: Tensor of size ``(B,C,F,H,W)`` containing the frames to
align with respect to ``x_target``.
m_refs: Tensor of size ``(B,1,F,H,W)`` containing the masks of
``x_refs``.
Returns:
Tensor of size ``(B,F,16,16,16,16)`` containing the correlation
volume between target and reference frames.
"""
b, c, ref_n, h, w = x_refs.size()
with torch.no_grad():
x_target_feats = self.model_vgg(x_target, normalize_input=False)
x_ref_feats = self.model_vgg(
x_refs.transpose(1, 2).reshape(b * ref_n, c, h, w),
normalize_input=False
)
x_target_feats = x_target_feats[3]
x_ref_feats = x_ref_feats[3].reshape(b, ref_n, -1, 16, 16) \
.transpose(1, 2)
b, c, ref_n, h, w = x_ref_feats.size()
v_target = F.interpolate(1 - m_target, size=(h, w), mode='nearest')
v_ref = F.interpolate(
1 - m_refs.transpose(1, 2).reshape(
b * ref_n, 1, m_refs.size(3), m_refs.size(4)
), size=(h, w), mode='nearest'
).reshape(b, ref_n, 1, h, w).transpose(1, 2)
corr = CorrelationVGG.correlation_masked_4d(
x_target_feats, v_target, x_ref_feats, v_ref
)
corr = self.conv(corr)
return CorrelationVGG.softmax_3d(corr) if self.use_softmax else corr
@staticmethod
def correlation_masked_4d(x_target_feats, v_target, x_ref_feats, v_ref):
"""Computes the normalized correlation between the feature maps of the
target and reference frames.
Args:
x_target_feats: Tensor of size (B,C,H,W) containing the feature map
of the target frame.
v_target: Tensor of size (B,1,H,W) containing the visibility map of
the target frame.
x_ref_feats: Tensor of size (B,C,F,H,W) containing the feature maps
of the reference frames.
v_ref: Tensor of size (B,1,H,W) containing the visibility maps of
the reference frames.
Returns:
4D correlation volume of size (B,F,H,W,H,W).
"""
b, c, ref_n, h, w = x_ref_feats.size()
x_target_feats = x_target_feats * v_target if v_target is not None \
else x_target_feats
x_ref_feats = x_ref_feats * v_ref if v_ref is not None else x_ref_feats
corr_1 = x_target_feats.reshape(b, c, -1).transpose(-1, -2) \
.unsqueeze(1)
corr_1_norm = torch.norm(corr_1, dim=3).unsqueeze(3) + 1e-9
corr_2 = x_ref_feats.reshape(b, c, ref_n, -1).permute(0, 2, 1, 3)
corr_2_norm = torch.norm(corr_2, dim=2).unsqueeze(2) + 1e-9
return torch.matmul(corr_1 / corr_1_norm, corr_2 / corr_2_norm) \
.reshape(b, ref_n, h, w, h, w)
@staticmethod
def softmax_3d(x):
"""Computes a 3D softmax function over the 4D correlation volume.
Args:
x: Tensor of size ``(B,F,16,16,16,16)`` containing the correlation
volume between target and reference frames.
Returns:
Tensor of size ``(B,F,16,16,16,16)`` containing the correlation
volume between target and reference frames after applying the
3D softmax function.
"""
b, t, h, w, _, _ = x.size()
x = x.permute(0, 2, 3, 4, 5, 1).reshape(b, h, w, -1)
x = F.softmax(x, dim=3)
return x.reshape(b, h, w, h, w, t).permute(0, 5, 1, 2, 3, 4)
class SeparableConv4d(nn.Module):
"""Implementation of the 4D Separable Convolution layer.
Attributes:
conv_1: Instance of a ``torch.nn.Sequential`` layer.
conv_2: Instance of a ``torch.nn.Sequential`` layer.
"""
def __init__(self):
super(SeparableConv4d, self).__init__()
self.conv_1 = nn.Sequential(
torch.nn.Conv2d(1, 128, (3, 3), padding=1), nn.ReLU(),
torch.nn.Conv2d(128, 256, (3, 3), padding=1), nn.ReLU(),
torch.nn.Conv2d(256, 256, (3, 3), padding=1),
)
self.conv_2 = nn.Sequential(
torch.nn.Conv2d(256, 256, (3, 3), padding=1), nn.ReLU(),
torch.nn.Conv2d(256, 128, (3, 3), padding=1), nn.ReLU(),
torch.nn.Conv2d(128, 1, (3, 3), padding=1),
)
def forward(self, corr):
"""Forward pass through the 4D Separable Convolution layer.
Args:
corr: Tensor of size ``(B,F,16,16,16,16)`` containing the
correlation volume between target and reference frames.
Returns:
Tensor of size ``(B,F,16,16,16,16)`` containing the filled
correlation volume between target and reference frames.
"""
corr = corr.unsqueeze(4)
b, t, h, w, c, *_ = corr.size()
x2_bis = self.conv_1(corr.reshape(-1, c, h, w))
x2_bis = x2_bis.reshape(b, t, h * w, x2_bis.size(1), h * w).permute(
0, 1, 4, 3, 2
)
x3_bis = self.conv_2(x2_bis.reshape(-1, x2_bis.size(3), h, w))
x3_bis = x3_bis.reshape(b, t, h, w, x3_bis.size(1), h, w).squeeze(4)
return x3_bis.permute(0, 1, 4, 5, 2, 3)
class AlignmentCorrelationMixer(nn.Module):
"""Implementation of the Alignment Correlation Mixer layer.
Attributes:
mixer: Instance of a ``torch.nn.Sequential`` layer.
"""
def __init__(self, corr_size=16):
super(AlignmentCorrelationMixer, self).__init__()
self.mixer = nn.Sequential(
nn.Conv2d(corr_size ** 2, corr_size ** 2, (5, 5), padding=2),
nn.ReLU(),
nn.Conv2d(corr_size ** 2, corr_size ** 2, (3, 3), padding=1),
nn.ReLU(),
nn.Conv2d(corr_size ** 2, corr_size, (3, 3), padding=1), nn.ReLU(),
nn.Conv2d(corr_size, corr_size, (5, 5), padding=2), nn.ReLU(),
nn.Conv2d(corr_size, corr_size, (3, 3), padding=1), nn.ReLU(),
nn.Conv2d(corr_size, corr_size // 2, (3, 3), padding=1), nn.ReLU(),
nn.Conv2d(corr_size // 2, corr_size // 2, (5, 5), padding=2),
nn.ReLU(),
nn.Conv2d(corr_size // 2, corr_size // 2, (3, 3), padding=1),
nn.ReLU(),
nn.Conv2d(corr_size // 2, corr_size // 4, (3, 3), padding=1),
nn.ReLU(),
nn.Conv2d(corr_size // 4, corr_size // 4, (5, 5), padding=2),
nn.ReLU(),
nn.Conv2d(corr_size // 4, corr_size // 4, (3, 3), padding=1),
nn.ReLU(),
nn.Conv2d(corr_size // 4, corr_size // 8, (3, 3), padding=1),
nn.Conv2d(corr_size // 8, corr_size // 8, (5, 5), padding=2),
nn.Conv2d(corr_size // 8, corr_size // 8, (3, 3), padding=1)
)
def forward(self, corr):
"""Forward pass through the Alignment Correlation Mixer layer.
Args:
corr: Tensor of size ``(B,F,16,16,16,16)`` containing the filled
correlation volume between target and reference frames.
Returns:
Tensor of size ``(B,F,16,16,2)`` containing the predicted flow.
"""
b, f, h, w, *_ = corr.size()
corr = corr.reshape(b * f, -1, 16, 16)
return self.mixer(corr).reshape(b, f, 2, h, w).permute(0, 1, 3, 4, 2)
class FlowEstimator(nn.Module):
"""Implementation of the Flow Estimator layer.
Attributes:
nn: Instance of a ``torch.nn.Sequential`` layer.
"""
def __init__(self, in_c=10):
super(FlowEstimator, self).__init__()
self.nn = nn.Sequential(
nn.Conv2d(in_c, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (2, 2), 2), nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (2, 2), 1), nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (2, 2), 1), nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.ConvTranspose2d(128, 128, (3, 3), (2, 2), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.ConvTranspose2d(128, 128, (3, 3), (2, 2), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.ConvTranspose2d(128, 128, (5, 5), (2, 2), (2, 2), (1, 1)),
nn.ReLU(),
nn.Conv2d(128, 128, (5, 5), (1, 1), 2), nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), 1), nn.ReLU(),
nn.Conv2d(128, 2, (3, 3), (1, 1), 1)
)
def forward(self, x_target, m_target, x_refs, m_refs, flow_pre):
"""Forward pass through the Flow Estimator layer.
Args:
x_target: Tensor of size ``(B,C,H,W)`` containing the masked frame
of which to align other frames.
m_target: Tensor of size ``(B,1,H,W)`` containing the mask of
``x_target``.
x_refs: Tensor of size ``(B,C,F,H,W)`` containing the frames to
align with respect to ``x_target``.
m_refs: Tensor of size ``(B,1,F,H,W)`` containing the masks of
``x_refs``.
flow_pre: Tensor of size ``(B,F,H,W,2)`` containing the flow in
lower resolution.
Returns:
Tensor of size ``(B,F,H',W',2)`` containing the flow in the
upscaled resolution.
"""
b, c, ref_n, h, w = x_refs.size()
nn_input = torch.cat([
x_refs.transpose(1, 2).reshape(b * ref_n, c, h, w),
x_target.unsqueeze(1).repeat(1, ref_n, 1, 1, 1)
.reshape(b * ref_n, c, h, w),
m_refs.transpose(1, 2).reshape(b * ref_n, 1, h, w),
m_target.unsqueeze(1).repeat(1, ref_n, 1, 1, 1)
.reshape(b * ref_n, 1, h, w),
flow_pre.reshape(b * ref_n, h, w, 2).permute(0, 3, 1, 2),
], dim=1)
return self.nn(nn_input).reshape(b, ref_n, 2, h, w) \
.permute(0, 1, 3, 4, 2)
|
from typing import List
import aiosqlite
from bot.data.base_repository import BaseRepository
from bot.errors import DesignatedChannelError
class DesignatedChannelRepository(BaseRepository):
async def get_all_assigned_channels(self, designated_name) -> List[int]:
"""
Gets all the channels assigned to a specifc designation
Args:
designated_name ([type]): the string name of the designated channel
Returns:
List[int]: A list of ids of all registered channels
"""
designated_id = await self.get_designated_id(designated_name)
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT * FROM DesignatedChannels_Channels
WHERE fk_designatedChannelsId = ?
""", (designated_id,)) as c:
result = await c.fetchall()
return [v for _, v in result]
async def get_guild_designated_channels(self, designated_name: str, guild_id: int) -> List[int]:
"""
Gets all the channels assigned to a specifc designation in a certain guild
Args:
designated_name ([type]): the string name of the designated channel
Returns:
List[int]: A list of ids of all registered channels
"""
designated_id = await self.get_designated_id(designated_name)
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT * FROM DesignatedChannels_Channels AS dc
JOIN Channels AS c ON dc.fk_channelsId = c.id
WHERE dc.fk_designatedChannelsId = ? AND c.fk_guildId = ?
""", (designated_id, guild_id)) as c:
result = await self.fetcthall_as_dict(c)
return [r['fk_channelsId'] for r in result]
async def get_all_designated_channels(self):
"""
Gets all currently defined designated channels
Returns:
[type]: A list of tuples that contain (id, name)
"""
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute('SELECT * FROM DesignatedChannels') as c:
result = await c.fetchall()
return result
async def register_designated_channel(self, channel_type: str, added_channel) -> None:
"""
Registers an active channel with a designated channel category
Args:
channel_type (str): The string name of the designated channel
added_channel (discord.TextChannel): The active text channel to register
Raises:
DesignatedChannelError: Raised when the designated channel type doesnt exit
DesignatedChannelError: Raised when the TextChannel has already been registered to
this designated channel type
"""
if not await self.check_designated_channel(channel_type):
raise DesignatedChannelError(f'The designated channel type {channel_type} does not exist')
designated_id = await self.get_designated_id(channel_type)
if await self.check_channel_added(designated_id, added_channel.id):
raise DesignatedChannelError(f'{added_channel.name} is already assigned to {channel_type}')
async with aiosqlite.connect(self.resolved_db_path) as db:
await db.execute(
"""
INSERT INTO DesignatedChannels_Channels
VALUES (?, ?)
""", (designated_id, added_channel.id))
await db.commit()
async def add_designated_channel_type(self, channel_type: str) -> None:
"""
Args:
channel_type (str): The name of the designated_channel to add
"""
if await self.check_designated_channel(channel_type):
return
async with aiosqlite.connect(self.resolved_db_path) as db:
await db.execute(
"""
INSERT INTO DesignatedChannels (name)
VALUES (?)
""", (channel_type,))
await db.commit()
async def remove_from_all_designated_channels(self, channel):
async with aiosqlite.connect(self.resolved_db_path) as db:
await db.execute(
"""
DELETE FROM DesignatedChannels_Channels
WHERE fk_channelsId = ?
""", (channel.id,))
await db.commit()
async def remove_from_designated_channel(self, channel_type: str, channel_id) -> None:
"""
Removes a given TextChannel from the list of active designated channel listeners
Args:
channel_type (str): The string name of the designated channel to be removed from
channel_id ([type]): The id of the channel to deregister
Raises:
DesignatedChannelError: Raised when the designated channel type doesnt exit
DesignatedChannelError: Raised when the given channel id is not currently registered to that designated channel
"""
if not await self.check_designated_channel(channel_type):
raise DesignatedChannelError(f'The designated channel type {channel_type} does not exist')
designated_id = await self.get_designated_id(channel_type)
if not await self.check_channel_added(designated_id, channel_id):
raise DesignatedChannelError(f'{channel_id} is not present in {channel_type}')
designated_id = await self.get_designated_id(channel_type)
async with aiosqlite.connect(self.resolved_db_path) as db:
await db.execute(
"""
DELETE FROM DesignatedChannels_Channels
WHERE fk_designatedChannelsId = ? and fk_channelsId = ?
""", (designated_id, channel_id,))
await db.commit()
async def get_designated_id(self, name):
"""
Takes a designated name and returns the associated auto incremented id
Args:
name (str): The string name of the designated channel
Returns:
int: the integer id of the designated channel
"""
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT id FROM DesignatedChannels
WHERE name = ?
""", (name,)) as c:
(designated_id,) = await c.fetchone()
return designated_id
async def check_channel_added(self, designated_channel_id, added_channel_id) -> bool:
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT * FROM DesignatedChannels_Channels
WHERE fk_channelsId = ? and fk_designatedChannelsId = ?
""", (added_channel_id, designated_channel_id)) as c:
return await c.fetchone() is not None
async def check_channel(self, channel) -> bool:
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT * FROM DesignatedChannels_Channels
WHERE fk_channelsId = ?
""", (channel.id,)) as c:
return await c.fetchone() is not None
async def check_designated_channel(self, designated_name: int) -> bool:
async with aiosqlite.connect(self.resolved_db_path) as db:
async with db.execute(
"""
SELECT * FROM DesignatedChannels WHERE name = ?
""", (designated_name,)) as c:
return await c.fetchone() is not None
|
from os import environ as envvars
from pathlib import Path
from setuptools import setup
setup(
name='ucon',
description='a tool for dimensional analysis: a "Unit CONverter"',
long_description=Path(__file__).absolute().parent.joinpath('README.md').read_text(),
long_description_content_type='text/markdown',
use_scm_version={'local_scheme': 'no-local-version'} if envvars.get('LOCAL_VERSION_SCHEME') else True,
license='MIT',
setup_requires=[
'setuptools_scm==6.3.2'
],
py_modules=['ucon'],
maintainer='Emmanuel I. Obi',
maintainer_email='withtwoemms@gmail.com',
url='https://github.com/withtwoemms/ucon',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
]
)
|
import six
from types import GeneratorType as _GeneratorType
from google.protobuf.message import Message as _ProtoMessageType
if six.PY2:
_list_types = list, xrange, _GeneratorType
else:
_list_types = list, range, _GeneratorType, map, filter
protobuf_mask = 0x80000000
def is_proto(emsg):
"""
:param emsg: emsg number
:type emsg: int
:return: True or False
:rtype: bool
"""
return (int(emsg) & protobuf_mask) > 0
def set_proto_bit(emsg):
"""
:param emsg: emsg number
:type emsg: int
:return: emsg with proto bit set
:rtype: int
"""
return int(emsg) | protobuf_mask
def clear_proto_bit(emsg):
"""
:param emsg: emsg number
:type emsg: int
:return: emsg with proto bit removed
:rtype: int
"""
return int(emsg) & ~protobuf_mask
def proto_to_dict(message):
"""Converts protobuf message instance to dict
:param message: protobuf message instance
:return: parameters and their values
:rtype: dict
:raises: :class:`.TypeError` if ``message`` is not a proto message
"""
if not isinstance(message, _ProtoMessageType):
raise TypeError("Expected `message` to be a instance of protobuf message")
data = {}
for desc, field in message.ListFields():
if desc.type == desc.TYPE_MESSAGE:
if desc.label == desc.LABEL_REPEATED:
data[desc.name] = list(map(proto_to_dict, field))
else:
data[desc.name] = proto_to_dict(field)
else:
data[desc.name] = list(field) if desc.label == desc.LABEL_REPEATED else field
return data
def proto_fill_from_dict(message, data, clear=True):
"""Fills protobuf message parameters inplace from a :class:`dict`
:param message: protobuf message instance
:param data: parameters and values
:type data: dict
:param clear: whether clear exisiting values
:type clear: bool
:return: value of message paramater
:raises: incorrect types or values will raise
"""
if not isinstance(message, _ProtoMessageType):
raise TypeError("Expected `message` to be a instance of protobuf message")
if not isinstance(data, dict):
raise TypeError("Expected `data` to be of type `dict`")
if clear: message.Clear()
field_descs = message.DESCRIPTOR.fields_by_name
for key, val in data.items():
desc = field_descs[key]
if desc.type == desc.TYPE_MESSAGE:
if desc.label == desc.LABEL_REPEATED:
if not isinstance(val, _list_types):
raise TypeError("Expected %s to be of type list, got %s" % (repr(key), type(val)))
list_ref = getattr(message, key)
# Takes care of overwriting list fields when merging partial data (clear=False)
if not clear: del list_ref[:] # clears the list
for item in val:
item_message = getattr(message, key).add()
proto_fill_from_dict(item_message, item)
else:
if not isinstance(val, dict):
raise TypeError("Expected %s to be of type dict, got %s" % (repr(key), type(dict)))
proto_fill_from_dict(getattr(message, key), val)
else:
if isinstance(val, _list_types):
list_ref = getattr(message, key)
if not clear: del list_ref[:] # clears the list
list_ref.extend(val)
else:
setattr(message, key, val)
return message
|
from flask import request
from app.api.responses import Responses
from app.api.routes.models.office import offices
def validate_update_all():
data = request.get_json()
required_fields = ['name']
if not data:
return Responses.bad_request("Empty inputs in Json"), 400
if 'name' not in data:
return Responses.bad_request("Name input is missing"), 400
if len(required_fields) < len(data.keys()):
for key in data:
if key not in required_fields:
return Responses.bad_request("{} is not a valid key".format(key)), 400
if not isinstance(data['name'], str):
return Responses.bad_request('Ensure that all inputs are strings'), 400
name = data['name'].strip()
if len(name) == 0:
return Responses.bad_request("Party Name cannot be empty"), 400
if len(name) < 6:
return Responses.bad_request("Name should have more than 6 characters"), 404
if not isinstance(data['name'], str):
return Responses.bad_request('Ensure that all name input is a string'), 400
for office in offices:
if office["name"] == name:
return Responses.bad_request("Office with that name already exists")
|
#!/usr/bin/env python3
#
# (For english check below :) )
#
# Tetris
#
# Przyciski sterujące:
# Strzałka w lewo/prawo - przesuń element w lewo/prawo
# Strzałka w górę - obróć element zgodnie z ruchem wskazówek zegara
# Strzałka w dół - przyspiesz spadanie elementu
# Klawisz 'Spacja' - natychmiast opuść element na dół
# Klawisz 'P' - przerwij grę
# Klawisz 'Esc' - zakończ grę
#
#
# Tetris
#
# Control keys:
# Left/Right - Move stone left/right
# Up - Rotate stone clockwise
# Down - Drop stone faster
# Space - Instant drop
# P - Pause game
# Escape - Quit game
import pygame, random, time, os
from pygame.locals import *
from sys import exit
# The configuration of game's window
cell_size=20
cols=10
rows=20
window_width=360
window_height=rows*cell_size
right_margin=cols*cell_size+cell_size
top_margin=0
maxfps=25
# The configuration of possible stone's shapes
O_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]]
I_shapes = [[[0, 0, 2, 0 ,0],
[0, 0, 2, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[2, 2, 2, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]]
S_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 0, 3, 3, 0],
[0, 3, 3, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 3, 0, 0],
[0, 0, 3, 3, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 0]]]
Z_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 4, 4, 0, 0],
[0, 0, 4, 4, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 4, 0, 0],
[0, 4, 4, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 0, 0, 0]]]
L_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 5, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 5, 5, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 5, 5, 5, 0],
[0, 5, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 5, 5, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 0, 5, 0],
[0, 5, 5, 5, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]]
J_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 6, 0, 0],
[0, 0, 6, 0, 0],
[0, 6, 6, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 6, 0, 0, 0],
[0, 6, 6, 6, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 6, 6, 0],
[0, 0, 6, 0, 0],
[0, 0, 6, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 6, 6, 6, 0],
[0, 0, 0, 6, 0],
[0, 0, 0, 0, 0]]]
T_shapes = [[[0, 0, 0, 0 ,0],
[0, 0, 0, 0, 0],
[0, 7, 7, 7, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 7, 0, 0],
[0, 7, 7, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 7, 0, 0],
[0, 7, 7, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0 ,0],
[0, 0, 7, 0, 0],
[0, 0, 7, 7, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 0, 0]]]
shapes=[O_shapes, I_shapes, S_shapes, Z_shapes, L_shapes, J_shapes, T_shapes]
stone_width=5
stone_height=5
# The configuration of stone's moving - timing
move_sideway_freq=0.15
move_down_freq=0.1
# The configuration of colors
white=(255,255,255)
gray=(185,185,185)
black=(0,0,0)
red=(155,0,0)
lightred=(175,20,20)
green=(0,155,0)
lightgreen=(20,175,20)
blue=(0,0,155)
lightblue=(20,20,175)
yellow=(155,155,0)
lightyellow=(175,175,20)
orange=(244,70,17)
lightorange=(255,164,32)
purple=(222,76,138)
lightpurple=(146,78,125)
brown=(76,47,39)
lightbrown=(142,64,42)
colors=(red,green,blue,yellow,orange,purple,brown)
lightcolors=(lightred,lightgreen,lightblue,lightyellow,lightorange,lightpurple,lightbrown)
class Tetris:
def __init__(self):
"""
Initializing parameters and PyGame modules.
"""
pygame.init()
self.clock=pygame.time.Clock()
pygame.display.set_caption('Tetris')
self.window = pygame.display.set_mode((window_width, window_height), DOUBLEBUF)
self.font = pygame.font.Font(pygame.font.get_default_font(), 18)
pygame.event.set_blocked(MOUSEMOTION)
self.stone=None
self.next_shape=random.choice(range(0,len(shapes)))
self.next_rotation=random.choice(range(0,len(shapes[self.next_shape])))
self.next_stone = shapes[self.next_shape][self.next_rotation]
def showText(self,text,color=white):
"""
This function displays text in the center of the window until a key is pressed.
"""
surf=self.font.render(text, True, color)
rect=surf.get_rect()
rect.center=(int(window_width/2)-3, int(window_height/2)-3)
self.window.blit(surf, rect)
pressSurf=self.font.render('Press a key to play.',True, color)
pressRect=pressSurf.get_rect()
pressRect.center=(int(window_width/2), int(window_height/2)+20)
self.window.blit(pressSurf, pressRect)
while self.checkPress()==None:
pygame.display.update()
self.clock.tick()
def checkQuit(self):
"""
Check if any QUIT event or ESCAPE is pressed.
"""
for event in pygame.event.get(QUIT):
self.gameExit()
for event in pygame.event.get(KEYUP):
if event.key==K_ESCAPE:
self.gameExit()
pygame.event.post(event)
def checkPress(self):
"""
Check if any key is pressed.
"""
self.checkQuit()
for event in pygame.event.get([KEYDOWN, KEYUP]):
if event.type==KEYDOWN and event.key!=K_ESCAPE:
continue
return event.key
return None
def newStone(self):
"""
Return the new random stone.
"""
self.stone=self.next_stone
self.shape=self.next_shape
self.rotation=self.next_rotation
self.next_shape=random.choice(range(0,len(shapes)))
self.next_rotation=random.choice(range(0,len(shapes[self.next_shape])))
self.next_stone = shapes[self.next_shape][self.next_rotation]
self.stoneX=int(cols/2)-int(stone_width/2)
self.stoneY=-2
def levelAndFreq(self):
"""
Calculate level and fall frequency (how many seconds pass until a falling stone falls one space).
"""
self.level=int(self.score/10)+1
self.fall_freq=0.27-(self.level*0.02)
def createBoard(self):
"""
Create and return a new board of game.
"""
self.board=[]
for i in range(cols):
self.board.append([0]*rows)
def addToBoard(self, board, stone):
"""
Fill stone in the board (the stone is added to the board after it's landed).
"""
for x in range(stone_width):
for y in range(stone_height):
if stone[y][x]!=0:
board[x+self.stoneX][y+self.stoneY]=stone[y][x]
def ifOnBoard(self,x,y):
"""
Return true if x and y are on board.
"""
return x>=0 and x<cols and y>=0 and y<rows
def ifValid(self, board, stone, adjX=0, adjY=0):
"""
Return true if the stone is within the board and not colliding.
"""
for x in range(stone_width):
for y in range(stone_height):
is_above_board=y+self.stoneY+adjY<0
if is_above_board or stone[y][x]==0:
continue
if not self.ifOnBoard(x+self.stoneX+adjX, y+self.stoneY+adjY):
return False
if board[x+self.stoneX+adjX][y+self.stoneY+adjY]!=0:
return False
return True
def ifCompleteLine(self, board, y):
"""
Return True if the line is filled with boxes and has no gaps.
"""
for x in range(cols):
if board[x][y]==0:
return False
return True
def deleteLine(self, board):
"""
Remove all completed lines on the board, move everything above them down
and return the number of deleted lines.
"""
lines=0
y=rows-1
while y>=0:
if self.ifCompleteLine(board,y):
path=os.path.dirname(os.path.realpath(__file__))
complete_line_music=pygame.mixer.Sound(os.path.join(path, 'coins.wav'))
pygame.mixer.Sound.play(complete_line_music)
for p in range(y,0,-1):
for x in range(cols):
board[x][p]=board[x][p-1]
for x in range(cols):
board[x][0]=0
lines+=1
else:
y-=1
return lines
def drawBox(self, box_x, box_y, color, pixel_x=None, pixel_y=None):
"""
Draw a single box (one pixel) on the board. Box_x and box_y -
parameters for board coordinates where the box should be drawn.
The pixel_x alb pixel_y - parameters used to draw the boxes of the
"Next:" stone, which is not on the board.
"""
if color==0:
return
if pixel_x==None and pixel_y==None:
pixel_x, pixel_y = box_x*cell_size, box_y*cell_size
pygame.draw.rect(self.window, colors[color-1], (pixel_x+1, pixel_y+1, cell_size-1, cell_size-1))
pygame.draw.rect(self.window, lightcolors[color-1], (pixel_x+1, pixel_y+1, cell_size-4, cell_size-4))
def drawBoard(self, board):
"""
Draw the border around the board.
"""
pygame.draw.rect(self.window,gray, (0, top_margin, cell_size*cols, cell_size*rows))
for x in range(cols):
for y in range(rows):
self.drawBox(x,y,board[x][y])
def drawStone(self, stone, pixel_x=None, pixel_y=None):
"""
Drawing the boxes of the stone. This function will be used to draw falling stones or the "next" stone.
"""
if pixel_x==None and pixel_y==None:
pixel_x, pixel_y=self.stoneX*cell_size, self.stoneY*cell_size
for x in range(stone_width):
for y in range(stone_height):
if stone[y][x]!=0:
self.drawBox(None,None,stone[y][x],pixel_x+x*(cell_size), pixel_y+(y*cell_size))
def drawScore(self,score,level):
"""
Drawing the score and the level text.
"""
score_surf=self.font.render('Score: %s' % score, True, white)
score_rect=score_surf.get_rect()
score_rect.topleft=(right_margin,20)
self.window.blit(score_surf, score_rect)
level_surf=self.font.render('Level: %s' % level, True, white)
level_rect=level_surf.get_rect()
level_rect.topleft=(right_margin,60)
self.window.blit(level_surf, level_rect)
def drawNext(self, stone):
"""
Drawing the "next" stone.
"""
next_surf=self.font.render('Next:', True, white)
next_rect=next_surf.get_rect()
next_rect.topleft=(right_margin,100)
self.window.blit(next_surf, next_rect)
self.drawStone(stone, pixel_x=right_margin, pixel_y=120)
def gameExit(self):
"""
Closing game.
"""
pygame.quit()
exit()
def execute(self):
"""
Executing the new game.
"""
self.showText('Tetris')
while True:
path=os.path.dirname(os.path.realpath(__file__))
music_file=os.path.join(path, 'arcade-music-loop.wav')
pygame.mixer.music.load(music_file)
pygame.mixer.music.play(-1,0.0)
#the main loop of the game
self.runGame()
pygame.mixer.music.stop()
self.showText('Game Over')
def runGame(self):
"""
Setup variables for the start of the game and the main loop.
"""
self.createBoard()
last_move_down_time=time.time()
last_move_sideways_time=time.time()
last_fall_time=time.time()
moving_down=False
moving_left=False
moving_right=False
self.score=0
self.levelAndFreq() #create self.level, self.fall_freq
#the main loop of the game
while True:
if self.stone==None:
self.newStone()
last_fall_time=time.time()
if not self.ifValid(self.board, self.stone):
return
self.checkQuit()
for event in pygame.event.get():
if event.type==KEYUP:
if event.key==K_p:
self.window.fill(black)
pygame.mixer.music.pause()
self.showText('Paused')
pygame.mixer.music.unpause()
last_fall_time=time.time()
last_move_down_time=time.time()
last_move_sideways_time=time.time()
elif event.key==K_LEFT:
moving_left=False
elif event.key==K_RIGHT:
moving_right=False
elif event.key==K_DOWN:
moving_down=False
elif event.type==KEYDOWN:
if event.key==K_LEFT and self.ifValid(self.board,self.stone,adjX=-1):
self.stoneX-=1
moving_left=True
moving_right=False
last_move_sideways_time=time.time()
elif event.key==K_RIGHT and self.ifValid(self.board,self.stone,adjX=1):
self.stoneX+=1
moving_left=False
moving_right=True
last_move_sideways_time=time.time()
elif event.key==K_UP and self.ifValid(self.board,self.stone,adjX=-1):
self.rotation=(self.rotation+1)%len(shapes[self.shape])
self.stone=shapes[self.shape][self.rotation]
if not self.ifValid(self.board,self.stone):
self.rotation=(self.rotation-1)%len(shapes[self.shape])
self.stone=shapes[self.shape][self.rotation]
elif event.key==K_DOWN:
moving_down=True
if self.ifValid(self.board, self.stone, adjY=1):
self.stoneY+=1
last_move_down=time.time()
elif event.key==K_SPACE:
moving_down=False
moving_left=False
moving_right=False
for i in range(1, rows):
if not self.ifValid(self.board, self.stone, adjY=i):
break
self.stoneY += i-1
if (moving_left or moving_right) and time.time() - last_move_sideways_time>move_sideway_freq:
if moving_left and self.ifValid(self.board, self.stone, adjX=-1):
self.stoneX-=1
elif moving_right and self.ifValid(self.board, self.stone, adjX=1):
self.stoneX+=1
last_move_sideways_time=time.time()
if moving_down and time.time() - last_move_down_time>move_down_freq and self.ifValid(self.board, self.stone, adjY=1):
self.stoneY+=1
last_move_down_time=time.time()
if time.time()-last_fall_time>self.fall_freq:
if not self.ifValid(self.board, self.stone, adjY=1):
self.addToBoard(self.board, self.stone)
self.score+=self.deleteLine(self.board)
self.levelAndFreq()
self.stone=None
else:
self.stoneY+=1
last_fall_time=time.time()
#drawing everything on the screen
self.window.fill(black)
self.drawBoard(self.board)
self.drawScore(self.score, self.level)
self.drawNext(self.next_stone)
if self.stone != None:
self.drawStone(self.stone)
pygame.display.update()
self.clock.tick(maxfps)
if __name__ == '__main__' :
App = Tetris()
App.execute()
|
from .util import (
TreeNode,
ListNode,
serialize,
deserialize,
compareTrees,
toList,
toListNode,
)
def test_empty_serialize():
assert deserialize(serialize(None)) is None
def test_can_serialize_and_deserialize():
tree = TreeNode(2)
tree.left = TreeNode(1)
tree.right = TreeNode(3)
serialized = serialize(tree)
assert serialized == "2,1,3,"
deserialized = deserialize(serialized)
assert compareTrees(deserialized, tree)
def test_to_list_empty():
l = None
assert toList(l) == []
def test_to_list_one():
l = ListNode()
assert toList(l) == [0]
def test_to_list():
l = ListNode(2)
l.next = ListNode(3)
l.next.next = ListNode(4)
assert toList(l) == [2, 3, 4]
def test_to_list_node_empty():
assert toListNode([]) == None
def test_to_list_node_empty():
assert toListNode([1]).val == ListNode(1).val
def test_to_list_node_empty():
ln = toListNode([1, 2, 3])
assert ln.val == 1
assert ln.next.val == 2
assert ln.next.next.val == 3
|
from aiogram import types
from aiogram.dispatcher.storage import FSMContext
from callbacks import cb_account
from states import addAccount, selectAccount
from netschoolapi import NetSchoolAPI, errors
from utils.db import db
from functions.sgo import getAnnouncements, sendAnnouncement, ns_sessions
from utils.db.data import Account
async def accountMenu(message: types.Message, state: FSMContext):
await selectAccount.menu.set()
markup = types.ReplyKeyboardMarkup()
markup.add(types.KeyboardButton("📋 Просмотр объявлений"))
markup.row(types.KeyboardButton("⚙️ Настройки"), types.KeyboardButton("🚪 Выход"))
msg = await message.answer("🗂 Меню управления учётной записью", reply_markup=markup)
async with state.proxy() as data:
data['message'] = msg
async def accountAdd(message: types.Message):
markup = types.InlineKeyboardMarkup()
markup.row(types.InlineKeyboardButton(
'➕ Добавить учётную запись', callback_data=cb_account.new(action='add', value='')))
await message.answer(
'➕Нажмите на соответствующую кнопку чтобы добавить данные для входа в учётную запись Сетевого Города', reply_markup=markup)
async def accountsCheck(message: types.Message, state: FSMContext):
data = await db.executeall(f"SELECT * FROM accounts WHERE telegram_id = {message.from_user.id}")
if data:
await accountsList(message, state)
else:
await accountAdd(message)
async def accountsList(message: types.Message, state: FSMContext):
accounts_data = await db.executeall(f"SELECT * FROM accounts WHERE telegram_id = {message.from_user.id}")
await selectAccount.select.set()
async with state.proxy() as data:
data['usermsg'] = message
markup = types.InlineKeyboardMarkup()
register_account = None
for account in accounts_data:
if account['status'] == 'register':
register_account = account
else:
display_name = "Без названия"
if account['display_name']:
display_name = account['display_name']
elif account['nickname']:
display_name = account['nickname']
if account['school_name']:
display_name += " {}".format(account['school_name'])
elif account['class_name']:
display_name += " ({})".format(account['class_name'])
markup.add(types.InlineKeyboardButton(display_name, callback_data=cb_account.new(action='select', value=str(account['id']))))
if register_account:
text = "▶️ Нажмите на соответствующую кнопку чтобы продолжить добавление учётной записи Сетевого Города"
markup.row(types.InlineKeyboardButton(
# '➕ Добавить учётную запись', callback_data=cb_account.new(action='continue', value=account['id'])))
'▶️ Продолжить добавление', callback_data=cb_account.new(action='continue', value=register_account['id'])))
else:
text = "📃 Выберите учётную запись"
markup.row(types.InlineKeyboardButton(
'➕ Добавить учётную запись', callback_data=cb_account.new(action='add', value='')))
await message.answer(text, reply_markup=markup)
async def admin_menu(message: types.Message):
users = await db.executeall("SELECT * FROM users")
markup = types.InlineKeyboardMarkup()
for x in users:
markup.add(types.InlineKeyboardButton(
x[3] + " " + x[4] + " ("+x[2]+")", callback_data="admin_user_select~"+str(x[0])))
text = "Выберите пользователя"
if message.text != "/admin":
await message.edit_text(text, reply_markup=markup)
else:
await message.answer(text, reply_markup=markup)
async def admin_userEdit(message: types.Message, x):
user = await db.execute(f"SELECT * FROM users WHERE id = {x[0]}")
markup = types.InlineKeyboardMarkup()
if user[5]:
markup.add(types.InlineKeyboardButton(
"Забрать владение", callback_data="admin_user_update~owner~"+str(user[0])+"~0"))
else:
markup.add(types.InlineKeyboardButton(
"Сделать владельцем", callback_data="admin_user_update~owner~"+str(user[0])+"~1"))
if user[6]:
markup.add(types.InlineKeyboardButton(
"Запретить бета-доступ", callback_data="admin_user_update~beta_access~"+str(user[0])+"~0"))
else:
markup.add(types.InlineKeyboardButton(
"Выдать бета-доступ", callback_data="admin_user_update~beta_access~"+str(user[0])+"~1"))
if user[7]:
markup.add(types.InlineKeyboardButton(
"Сбросить приветствие", callback_data="admin_user_update~start~"+str(user[0])+"~0"))
else:
markup.add(types.InlineKeyboardButton(
"Убрать приветствие", callback_data="admin_user_update~start~"+str(user[0])+"~1"))
markup.add(types.InlineKeyboardButton(
"◀️ Назад", callback_data="admin_menu"))
await message.edit_text("Информация о пользователе\nИмя: "+user[3]+"\nФамилия: "+user[4]+"\nИмя пользователя: "+user[2]+"\nTelegram ID: "+str(user[1])+"\nВладелец: "+str(user[5])+"\nБета-доступ: "+str(user[6])+"\nПриветствие: "+str(user[7]), reply_markup=markup)
async def sendAnnouncements(message: types.Message, ns: NetSchoolAPI, state):
data = await state.get_data()
announcements = [x async for x in getAnnouncements(ns)]
for announcement in announcements:
await sendAnnouncement(message.chat.id, announcement)
async def schoolInfo(message: types.Message, account_id: int):
ns = ns_sessions[account_id]
data = ns._prelogin_data
school_info = await ns._client.get("schools/"+str(data['scid'])+"/card")
markup = types.InlineKeyboardMarkup()
if school_info.status_code == 200:
school = school_info.json()
markup.add(types.InlineKeyboardButton(
"🔐 Войти", callback_data=cb_account.new(action='login', value=data['scid'])))
text_schoolInfo = ""
if school["commonInfo"]["schoolName"]:
text_schoolInfo += "🏫 "+school["commonInfo"]["schoolName"]+" ("+school["commonInfo"]["status"]+")"
if school["managementInfo"]["director"]:
text_schoolInfo += "\n👤 "+school["managementInfo"]["director"]
if school["contactInfo"]["postAddress"]:
text_schoolInfo += "\n📍 "+school["contactInfo"]["postAddress"]
await message.edit_text(text_schoolInfo+"\n⁉️ Проверьте правильность данных об учреждении", reply_markup=markup)
else:
await message.edit_text("⚠ Произошла ошибка")
async def getloginState(message: types.Message, state: FSMContext):
await message.edit_text("👤 Введите имя пользователя")
async with state.proxy() as data:
data["message"] = message
await addAccount.login.set()
async def getpasswordState(message: types.Message, state: FSMContext):
data = await state.get_data()
msg = data["message"]
await msg.edit_text("🔑 Введите пароль")
await state.update_data(login=str(message.text))
await message.delete()
await addAccount.password.set()
async def scidSelect(message: types.Message, account_id: int):
ns = ns_sessions[account_id]
data = ns._prelogin_data
response = await ns._client.get("loginform?cid="+str(data['cid'])+"&sid="+str(data['sid'])+"&pid="+str(data['pid'])+"&cn="+str(data['cn'])+"&sft="+str(data['sft'])+"&LASTNAME=sft")
schools = response.json()["items"]
if len(schools) >= 2:
markup = types.InlineKeyboardMarkup()
# for x in schools[:100]:
# markup.add(types.InlineKeyboardButton("S1q2w3e4r5t6y7u8i9o0p10a11s12asd13f14g", callback_data="account:select_scid:10000"))
# # for x in schools[:10]:
# # markup.add(types.InlineKeyboardButton("S1q2w3e4r5t6y7u8i9o0p10a11s12asd13f14g15h2asd1234", callback_data="account:select_scid:1000"))
await addAccount.scid.set()
for x in schools[:68]:
markup.add(types.InlineKeyboardButton(x['name'][:38], callback_data=cb_account.new(action='select_scid', value=x['id'])))
await message.edit_text("🏫 Выберите образовательную огранизацию", reply_markup=markup)
else:
ns._prelogin_data['scid'] = schools[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await schoolInfo(message, account_id)
async def sftSelect(message: types.Message, account_id: int):
ns = ns_sessions[account_id]
data = ns._prelogin_data
response = await ns._client.get("loginform?cid="+str(data['cid'])+"&sid="+str(data['sid'])+"&pid="+str(data['pid'])+"&cn="+str(data['cn'])+"&LASTNAME=cn")
funcs = response.json()["items"]
if len(funcs) >= 2:
await addAccount.sft.set()
markup = types.InlineKeyboardMarkup()
for x in funcs:
markup.add(types.InlineKeyboardButton(x['name'], callback_data=cb_account.new(action='select_sft', value=x['id'])))
await message.edit_text("🎒 Выберите тип образовательной огранизации", reply_markup=markup)
else:
ns._prelogin_data['sft'] = funcs[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await scidSelect(message, account_id)
async def cnSelect(message: types.Message, account_id: int):
ns = ns_sessions[account_id]
data = ns._prelogin_data
response = await ns._client.get("loginform?cid="+str(data['cid'])+"&sid="+str(data['sid'])+"&pid="+str(data['pid'])+"&LASTNAME=pid")
cities = response.json()["items"]
if len(cities) >= 2:
await addAccount.cn.set()
markup = types.InlineKeyboardMarkup()
for x in cities:
markup.add(types.InlineKeyboardButton(x['name'], callback_data=cb_account.new(action='select_cn', value=x['id'])))
await message.edit_text("🏙 Выберите населённый пункт", reply_markup=markup)
else:
ns._prelogin_data['cn'] = cities[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await sftSelect(message, account_id)
async def pidSelect(message: types.Message, account_id):
ns = ns_sessions[account_id]
data = ns._prelogin_data
response = await ns._client.get("loginform?cid="+str(data['cid'])+"&sid="+str(data['sid'])+"&LASTNAME=sid")
provinces = response.json()["items"]
if len(provinces) >= 2:
await addAccount.pid.set()
markup = types.InlineKeyboardMarkup()
for x in provinces:
markup.add(types.InlineKeyboardButton(x['name'], callback_data=cb_account.new(action='select_pid', value=x['id'])))
await message.edit_text("🌆 Выберите городской округ или муниципальный район", reply_markup=markup)
else:
ns._prelogin_data['pid'] = provinces[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await cnSelect(message, account_id)
async def sidSelect(message: types.Message, account_id):
ns = ns_sessions[account_id]
data = ns._prelogin_data
response = await ns._client.get("loginform?cid="+str(data['cid'])+"&LASTNAME=cid")
states = response.json()["items"]
if len(states) >= 2:
await addAccount.sid.set()
markup = types.InlineKeyboardMarkup()
for x in states:
markup.add(types.InlineKeyboardButton(x['name'], callback_data=cb_account.new(action='select_sid', value=x['id'])))
await message.edit_text("🌇 Выберите регион", reply_markup=markup)
else:
ns._prelogin_data['sid'] = states[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await pidSelect(message, account_id)
async def cidSelect(account_id: int, bemessage: types.Message):
ns = ns_sessions[account_id]
response = await ns._client.get("prepareloginform")
countries = response.json()["countries"]
if len(countries) >= 2:
await addAccount.cid.set()
markup = types.InlineKeyboardMarkup()
for x in countries:
markup.add(types.InlineKeyboardButton(x['name'], callback_data=cb_account.new(action='select_cid', value=x['id'])))
await bemessage.edit_text("🏳️ Выберите страну", reply_markup=markup)
else:
ns._prelogin_data['cid'] = countries[0]['id']
await Account.update(account_id, **ns._prelogin_data)
await sidSelect(bemessage, account_id) |
import re
import string
from Cheetah.Template import Template
from datetime import datetime, timezone
import markdown2
LIBVER = "Beta1.0"
thumbnails = {}
with open("archiveit/templates/thumbnails.txt", "r") as f:
data = f.read()
lines = data.split("\n")
thumbnails['text'] = lines[1]
thumbnails['link'] = lines[2]
class PostFormatter():
"""Parent class for all formatters.
Formatters take Reddit posts as
input and outputs a savable medium
(eg. text or html)"""
def __init__(self, post):
self.post = post
self.filetype = None
def parse_comment(self, comments, lvl=0):
li = []
for cmt in comments:
li.append((cmt, lvl))
# We add an indent to the comment string relative to its reply depth,
# add the same indent to any newlines in the string and append it
# to our return var
if cmt.replies is not None:
li.extend(self.parse_comment(cmt.replies, lvl+1))
return li
def out(self) -> str:
raise NotImplementedError()
class TextFormatter(PostFormatter):
"""Formats posts as plaintext."""
def __init__(self, *args, **kwargs):
super(TextFormatter, self).__init__(*args, **kwargs)
self.filetype = ".txt"
def get_author(self):
return self.post.author.name if self.post.author is not None else "[deleted]"
def get_title(self):
return self.post.title if self.post.title is not None else "[deleted]"
def get_selftext(self):
if self.post.selftext is not None:
lines = "-" * len(max(self.post.selftext.split("\n"), key=len))
return "%s\n%s\n%s" % (lines, self.post.selftext, lines)
else:
return "---------\n[deleted]\n---------"
def get_comments(self):
comments = self.parse_comment(self.post.comments)
return "".join([self.format_comment(cmt[0], cmt[1]) for cmt in comments])
def format_comment(self, comment, lvl):
if comment.created_utc is not None:
time_created = str(datetime.fromtimestamp(comment.created_utc, tz = timezone.utc))
else:
time_created = "[unknown]"
stri = (
"\n\n%s" % ("\t" * lvl) +
(comment.author.name if comment.author is not None else "[deleted]") +
" | " +
str(comment.score if comment.score is not None else "[deleted]") +
" points" +
" | on " +
time_created +
("\n" + " " + comment.body).replace("\n", "\n %s" % ("\t" * lvl))
+ "\n"
)
return stri
def out(self):
self.post.comments.replace_more(limit=None)
return (
"Please not Windows notepad will not display indentation correctly. "
+ "Copying this into a third-party text editor (eg. notepad++) may be favorable.\n"
+ "All times are in UTC.\n\n"
+ self.get_title()
+ "\nby "
+ self.get_author()
+ " on "
+ str(datetime.fromtimestamp(self.post.created_utc, tz=timezone.utc))
+ "\n"
+ self.get_selftext()
+ "\n\nComments:"
+ self.get_comments()
+ "\n\n\n\n\n(Generated by: ArchiveIt bot %s)" % LIBVER
)
class HTMLFormatter(PostFormatter):
def __init__(self, *args, **kwargs):
super(HTMLFormatter, self).__init__(*args, **kwargs)
self.filetype = ".html"
def get_thumbnail(self):
if self.post.is_self:
return thumbnails['text']
return thumbnails['link']
def get_image(self):
if self.post.url.endswith('.png') or self.post.url.endswith('.jpg'):
return self.post.url
def markdownize(self, comment):
words = []
superflag = False
for word in comment.split(" "):
if word.startswith("^"):
if word.startswith("^("):
superflag = True
else:
words.append("<sup>%s</sup>" % word[1:])
elif word is "^":
words.append("^")
elif "^" in word:
chars = list(word)
final = []
current_phrase = []
flag = False
for char in chars:
if not flag and char is not "^":
final.append(char)
if char == "^":
flag = True
current_phrase.append("<sup>")
elif flag:
if char in string.punctuation:
flag = False
current_phrase.append("</sup>")
final.append("".join(current_phrase))
current_phrase = []
final.append(char)
else:
current_phrase.append(char)
words.append("".join(final))
else:
if superflag:
words.append("<sup>%s</sup>" % word)
if word.endswith(")"):
superflag = False
else:
words.append(word)
comment = " ".join(words)
return markdown2.markdown(comment, extras=["spoiler", "tables", "fenced-code-blocks", "strike"])
def out(self):
self.post.comments.replace_more(limit=None)
data = open("archiveit/templates/reddit_template.html", "r")
template = data.read()
return str(Template(template,
searchList=[
{'post': self.post,
'time': str(datetime.fromtimestamp(self.post.created_utc, tz = timezone.utc))[:-6],
'image': self.get_image,
'thumbnail': self.get_thumbnail,
'markdownize': self.markdownize}
]
))
def get_format(stri):
if re.match(".?[Tt]ext.?", stri) is not None:
return TextFormatter
elif re.match(".?[Hh]tml.?", stri) is not None:
return HTMLFormatter
return None
|
"""
Tor Browser Launcher
https://github.com/micahflee/torbrowser-launcher/
Copyright (c) 2013-2014 Micah Lee <micah@micahflee.com>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os, sys, argparse
from common import Common, SHARE
from settings import Settings
from launcher import Launcher
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--settings', action='store_true', dest='settings', help='Open Tor Browser Launcher settings')
parser.add_argument('url', nargs='*', help='URL to load')
args = parser.parse_args()
settings = bool(args.settings)
url_list = args.url
# load the version and print the banner
with open(os.path.join(SHARE, 'version')) as buf:
tor_browser_launcher_version = buf.read().strip()
print _('Tor Browser Launcher')
print _('By Micah Lee, licensed under MIT')
print _('version {0}').format(tor_browser_launcher_version)
print 'https://github.com/micahflee/torbrowser-launcher'
common = Common(tor_browser_launcher_version)
if settings:
# settings mode
app = Settings(common)
else:
# launcher mode
app = Launcher(common, url_list)
if __name__ == "__main__":
main()
|
from typing import Optional
import torch
import torch.nn as nn
from torecsys.models.ctr import CtrBaseModel
class LogisticRegressionModel(CtrBaseModel):
"""
Model class of Logistic Regression (LR).
Logistic Regression is a model to predict click through rate with a simple logistic regression,
i.e. a linear layer plus a sigmoid transformation to make the outcome between 0 and 1,
which is to represent the probability of the input is true.
"""
def __init__(self,
inputs_size: int,
output_size: Optional[int] = 1):
"""
Initialize LogisticRegressionModel
Args:
inputs_size (int): inputs size of logistic regression, i.e. number of fields * embedding size
output_size (int, optional): output size of model. Defaults to 1
"""
super().__init__()
self.linear = nn.Linear(inputs_size, output_size)
self.sigmoid = nn.Sigmoid()
def forward(self, feat_inputs: torch.Tensor) -> torch.Tensor:
"""
Forward calculation of LogisticRegressionModel
Args:
feat_inputs (T), shape = (B, N, E), data_type = torch.float: linear Features tensors
Returns:
T, shape = (B, O), data_type = torch.float: output of LogisticRegressionModel
"""
# Name the inputs tensor for flatten
feat_inputs.names = ('B', 'N', 'E',)
# Calculate linear projection
# inputs: feat_inputs, shape = (B, N, E)
# output: outputs, shape = (B, O)
feat_inputs = feat_inputs.flatten(('N', 'E',), 'O')
outputs = self.linear(feat_inputs.rename(None))
outputs.names = ('B', 'O',)
# Transform with sigmoid function
# inputs: outputs, shape = (B, O)
# output: outputs, shape = (B, O)
outputs = self.sigmoid(outputs)
# Drop names of outputs, since autograd doesn't support NamedTensor yet.
outputs = outputs.rename(None)
return outputs
|
# Copyright 2022 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
import pathlib
class ExecutablesPatcherError(RuntimeError):
pass
class ExecutablesPatcher:
def __init__(self):
self.used_interpreters_paths = {}
self.logger = logging.getLogger("ExecutablesPatcher")
def patch_interpreted_executable(self, path: pathlib.Path):
try:
with open(path, "r+") as f:
shebang = f.readline()
patched_shebang = self.make_bin_path_in_shebang_relative(shebang)
f.seek(0)
f.write(patched_shebang)
self._register_interpreter_used_in_shebang(path, patched_shebang)
except Exception as e:
self.logger.warning("Unable to patch script shebang %s: %s", path, e)
def _register_interpreter_used_in_shebang(self, executable_path, shebang):
interpreter_path = self.read_interpreter_path_from_shebang(shebang)
self.used_interpreters_paths[executable_path] = interpreter_path
@staticmethod
def read_interpreter_path_from_shebang(shebang):
interpreter_path = shebang[2:].strip()
interpreter_path = interpreter_path.split(" ")[0]
return interpreter_path
@staticmethod
def make_bin_path_in_shebang_relative(shebang):
shebang_len = len(shebang)
idx = 2
while shebang_len > idx and (shebang[idx] == "/" or shebang[idx] == " "):
idx = idx + 1
patched = shebang[:2] + " " * (idx - 2) + shebang[idx:]
return patched
|
# -*- coding: utf-8 -*-
#Adapted from:
#Beyond Part Models: Person Retrieval with Refined Part Pooling and A Strong Convolutional Baseline
#Authors: Yifan Suny and Liang Zhengz and Yi Yangz and Qi Tianx and Shengjin Wang
from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
import time
import os,sys
import scipy.io
import yaml
from PIL import Image, ImageDraw
sys.path.insert(0, '../classifiers/')
from people_reID..model.model_test import PCB,PCB_test,ft_net
class MyDataset(Dataset):
def __init__(self,base,dataframe, x_col, y_col,transform=None):
self.input_images = dataframe[x_col]
if y_col!=None:
self.target_images = dataframe[y_col]
else:
self.target_images = []
self.transform = transform
self.base = base
def __getitem__(self, idx):
if self.base != None:
image = Image.open(self.base+self.input_images[idx])
else:
image = Image.open(self.input_images[idx])
if len(self.target_images)>0:
label = self.target_images[idx]
else:
label = 0
print(len(image.getbands()))
#if len(image.getbands()) == 1:
#print(image.shape)
image = image.convert("RGB")
#print(image.shape)
if self.transform:
image = self.transform(image)
return image,label
def __len__(self):
return len(self.input_images)
def extract_reID_features(config_path,path_weights, classe, dataset,dataframe,x_col,y_col, dir_out_features, gpu_ids):
gallery = classe
# #fp16
# try:
# from apex.fp16_utils import *
# except ImportError: # will be 3.x series
# print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
# ######################################################################
# Options
# --------
batchsize = 1
###load config###
# load the training config
print(config_path)
with open(config_path, 'r') as stream:
config = yaml.load(stream)
fp16 = False
opt_PCB = True
use_dense = False
multi = False
#opt.use_NAS = config['use_NAS']
stride = config['stride']
nclasses = 751
str_ids = gpu_ids.split(',')
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >=0:
gpu_ids.append(id)
# set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
cudnn.benchmark = True
######################################################################
# Load Data
# ---------
#
# We will use torchvision and torch.utils.data packages for loading the
# data.
#
data_transforms = transforms.Compose([
transforms.Resize((256,128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
############### Ten Crop
#transforms.TenCrop(224),
#transforms.Lambda(lambda crops: torch.stack(
# [transforms.ToTensor()(crop)
# for crop in crops]
# )),
#transforms.Lambda(lambda crops: torch.stack(
# [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)
# for crop in crops]
# ))
])
if opt_PCB:
data_transforms = transforms.Compose([
transforms.Resize((384,192), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if multi:
image_datasets = MyDataset(None,dataframe,x_col,y_col, transform=data_transforms)
#image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in [gallery]}
dataloaders = torch.utils.data.DataLoader(image_datasets[x], batch_size=batchsize,shuffle=False, num_workers=16)
else:
image_datasets = MyDataset(None,dataframe,x_col,y_col, transform=data_transforms)
dataloaders = torch.utils.data.DataLoader(image_datasets, batch_size=batchsize,shuffle=False, num_workers=16)
class_names = image_datasets.target_images
use_gpu = torch.cuda.is_available()
######################################################################
# Load model
#---------------------------
def load_network(network):
save_path = path_weights
print(save_path)
network.load_state_dict(torch.load(save_path))
return network
######################################################################
# Extract feature
# ----------------------
#
# Extract feature from a trained model.
#
def fliplr(img):
'''flip horizontal'''
inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
img_flip = img.index_select(3,inv_idx)
return img_flip
def extract_feature(model,dataloaders):
features = torch.FloatTensor()
count = 0
for data in dataloaders:
img, label = data
n, c, h, w = img.size()
count += n
print(count)
ff = torch.FloatTensor(n,512).zero_()
if opt_PCB:
ff = torch.FloatTensor(n,2048,6).zero_() # we have six parts
for i in range(2):
if(i==1):
img = fliplr(img)
input_img = Variable(img.cuda())
outputs = model(input_img)
f = outputs.data.cpu().float()
ff = ff+f
# norm feature
if opt_PCB:
# feature size (n,2048,6)
# 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
# 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), -1)
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features,ff), 0)
return features
def get_id(img_path):
camera_id = []
labels = []
for path, v in img_path:
filename_folder = path.split('/')[-2]
filename = os.path.basename(path)
if filename_folder == 'peopleRelevant':
labels.append(0)
elif filename_folder == 'notRelevant':
labels.append(1)
else: #query
labels.append(2)
return labels
gallery_path = image_datasets.input_images
######################################################################
# Load Collected data Trained model
print('-------test-----------')
# if use_dense:
# model_structure = ft_net_dense(nclasses)
# else:
# model_structure = ft_net(nclasses, stride = stride)
if opt_PCB:
model_structure = PCB(nclasses)
model = load_network(model_structure)
# Remove the final fc layer and classifier layer
if opt_PCB:
model = PCB_test(model)
else:
model.classifier.classifier = nn.Sequential()
# Change to test mode
model = model.eval()
if use_gpu:
model = model.cuda()
# Extract feature
with torch.no_grad():
gallery_feature = extract_feature(model,dataloaders)
not_relevant = []
relevant = []
filenames_not = []
filenames_relevant = []
allFeatures = []
allFilenames = []
for i in range(0,len(gallery_path)):
allFeatures.append(np.array(gallery_feature[i]))
allFilenames.append(np.array(gallery_path[i]))
print(np.array(allFeatures).shape)
print(gallery)
#np.save(dir_out_features+'all_'+gallery+'_people.npy', np.array(allFeatures))
#np.save(dir_out_features+'all_'+gallery+'_people_filenames.npy', np.array(allFilenames))
return np.array(allFeatures),np.array(allFilenames)
|
import os
from .common import BASE_DIR
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'insert_a_random_hash_here'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 's1234__editgroups', # adapt to the database you created
'HOST': 'tools.db.svc.eqiad.wmflabs',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'charset': 'utf8mb4',
'read_default_file': os.path.expanduser("~/replica.my.cnf")
},
}
}
# Adapt those to the credentials you got
SOCIAL_AUTH_MEDIAWIKI_KEY = ''
SOCIAL_AUTH_MEDIAWIKI_SECRET = ''
SOCIAL_AUTH_MEDIAWIKI_URL = 'https://www.wikidata.org/w/index.php'
SOCIAL_AUTH_MEDIAWIKI_CALLBACK = 'https://editgroups.toolforge.org/oauth/complete/mediawiki/'
# Redis (if you use it)
REDIS_HOST = 'tools-redis'
REDIS_PORT = 6379
REDIS_DB = 3
REDIS_PASSWORD = ''
|
from __future__ import unicode_literals
from tests import with_settings
from tests.web.splinter import TestCase, logged_in
from selenium.webdriver.common.keys import Keys
from catsnap import Client
from catsnap.table.image import Image
from catsnap.table.album import Album
from nose.tools import eq_
import time
class TestImageView(TestCase):
@with_settings(aws={'bucket': 'humptydump'})
def test_view_an_image(self):
session = Client().session()
album = Album(name="tophos")
session.add(album)
session.flush()
silly = Image(album_id=album.album_id, filename="silly")
session.add(silly)
session.flush()
self.visit_url('/image/{0}'.format(silly.image_id))
images = self.browser.find_by_tag('img')
eq_(map(lambda i: i['src'], images), [
'https://s3.amazonaws.com/humptydump/silly',
])
eq_(map(lambda i: i['alt'], images), ['silly'])
assert self.browser.is_text_present('silly')
edit_button = self.browser.find_by_id('edit')
assert not edit_button, "Edit button visible to logged-out user!"
title_field = self.browser.find_by_css('input[name="title"]')
assert not title_field.visible, "Edit controls visible to logged-out user!"
class TestEditImage(TestCase):
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_edit_title(self):
session = Client().session()
silly = Image(filename="silly", title="Silly Picture")
session.add(silly)
session.flush()
self.visit_url('/image/{0}'.format(silly.image_id))
self.browser.click_link_by_text('Edit')
caption = self.browser.find_by_id('caption')
assert not caption.visible, "Caption header didn't disappear!"
title_field = self.browser.find_by_css('input[name="title"]')
assert title_field.visible, "No title-edit field!"
eq_(title_field.value, 'Silly Picture')
title_field.clear()
title_field.type('Goofy Picture')
title_field.type(Keys.ENTER)
self.browser.click_link_by_text('Stop Editing')
assert not title_field.visible, "Title field didn't go away!"
eq_(caption.text, 'Goofy Picture')
eq_(self.browser.title, 'Goofy Picture - Catsnap')
session.refresh(silly)
eq_(silly.title, 'Goofy Picture')
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_stop_editing_submits(self):
session = Client().session()
silly = Image(filename="silly", title="Silly Picture")
session.add(silly)
session.flush()
self.visit_url('/image/{0}'.format(silly.image_id))
self.browser.click_link_by_text('Edit')
title_field = self.browser.find_by_css('input[name="title"]')
assert title_field.visible, "No title-edit field!"
title_field.fill('Goofy Picture')
self.browser.click_link_by_text('Stop Editing')
session.refresh(silly)
eq_(silly.title, 'Goofy Picture')
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_edit_description(self):
session = Client().session()
silly = Image(filename="silly", title="Silly Picture")
session.add(silly)
session.flush()
self.visit_url('/image/{0}'.format(silly.image_id))
self.browser.click_link_by_text('Edit')
description_field = self.browser.find_by_id('description')
assert description_field.visible, "No description-edit field!"
description_field.fill('This is silly to do.\nWhy is it done?')
self.browser.click_link_by_text('Stop Editing')
session.refresh(silly)
eq_(silly.description, 'This is silly to do.\nWhy is it done?')
description_paras = self.browser.find_by_css('p.image-description')
eq_([p.text for p in description_paras], [
'This is silly to do.',
'Why is it done?'
])
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_edit_tags(self):
session = Client().session()
pic = Image(filename="silly", title="Silly Picture")
session.add(pic)
session.flush()
pic.add_tags(['goofy', 'silly'])
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
tag_button = self.browser.find_by_id('tag-button')
assert tag_button, "Couldn't find a button for listing tags!"
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['goofy', 'silly'])
self.browser.click_link_by_text('Edit')
assert all([not t.visible for t in tags]), "Tag listing didn't disappear!"
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['goofy', 'silly'])
assert all([t.visible for t in tag_removes]), "Remove tag controls weren't visible!"
add_tag = self.browser.find_by_css('a.add-tag')
eq_(add_tag.text, 'Add tag')
assert add_tag.visible, "Add tag control wasn't visible!"
tag_removes[0].click()
eq_(list(pic.get_tags()), ['silly'])
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['silly'])
self.browser.click_link_by_text('Stop Editing')
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['silly'])
self.browser.click_link_by_text('Edit')
add_tag.click()
focused_input = self.browser.find_by_css('input:focus').first
tag_input = self.browser.find_by_id('tag').first
eq_(focused_input['id'], 'tag', "Add-tag input wasn't automatically focused!")
tag_input.type('funny')
tag_input.type(Keys.ENTER)
time.sleep(0.01)
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['silly', 'funny'])
eq_(list(pic.get_tags()), ['silly', 'funny'])
self.browser.click_link_by_text('Stop Editing')
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['silly', 'funny'])
self.browser.click_link_by_text('Edit')
tag_removes[1].click()
eq_(list(pic.get_tags()), ['silly'])
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_add_tag_to_an_untagged_image(self):
session = Client().session()
pic = Image(filename="tagless", title="Untagged Picture")
session.add(pic)
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
tag_button = self.browser.find_by_id('tag-button')
assert tag_button, "Couldn't find a button for listing tags!"
assert tag_button.has_class("disabled"), \
"Tag button enabled without tags!"
self.browser.click_link_by_text('Edit')
add_tag = self.browser.find_by_css('a.add-tag')
add_tag.click()
tag_input = self.browser.find_by_id('tag')[0]
tag_input.type('untagged')
tag_input.type(Keys.ENTER)
self.browser.click_link_by_text('Stop Editing')
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['untagged'])
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_remove_last_tag(self):
session = Client().session()
pic = Image(filename="tagged", title="Untagged Picture")
session.add(pic)
session.flush()
pic.add_tags(['one'])
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
self.browser.click_link_by_text('Edit')
remove_tag = self.browser.find_by_css('a.remove-tag')
remove_tag.click()
self.browser.click_link_by_text('Stop Editing')
tag_button = self.browser.find_by_id('tag-button')
assert tag_button.has_class("disabled"), \
"Tag button enabled without tags!"
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_tabbing_out_of_tab_input_opens_and_focuses_a_new_one(self):
session = Client().session()
pic = Image(filename="acebabe")
session.add(pic)
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
self.browser.click_link_by_text('Edit')
add_tag = self.browser.find_by_css('a.add-tag')
add_tag.click()
tag_input = self.browser.find_by_id('tag')[0]
tag_input.type('babe')
tag_input.type(Keys.TAB)
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['babe'])
focused_input = self.browser.find_by_css('input:focus').first
eq_(focused_input['id'], 'tag')
eq_(list(pic.get_tags()), ['babe'])
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_hitting_escape_aborts_editing_without_saving(self):
session = Client().session()
pic = Image(filename="acebabe")
session.add(pic)
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
self.browser.click_link_by_text('Edit')
add_tag = self.browser.find_by_css('a.add-tag')
add_tag.click()
tag_input = self.browser.find_by_id('tag')[0]
tag_input.type('babe')
tag_input.type(Keys.ESCAPE)
add_tag = self.browser.find_by_css('a.add-tag')
assert add_tag.visible, "Editing didn't abort!"
eq_(list(pic.get_tags()), [])
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_edit_album(self):
session = Client().session()
pix = Album(name="pix")
highlights = Album(name="highlights")
session.add(pix)
session.add(highlights)
session.flush()
pic = Image(filename="acebabe", album_id=pix.album_id)
session.add(pic)
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
self.browser.click_link_by_text('Edit')
album_dropdown = self.browser.find_by_css('select.edit-album')
assert album_dropdown.visible, "Album select wasn't visible!"
album_options = album_dropdown.find_by_css('option')
eq_(album_options[1]['selected'], 'true')
album_dropdown.select(str(highlights.album_id))
self.browser.click_link_by_text('Stop Editing')
session.refresh(pic)
eq_(int(pic.album_id), highlights.album_id)
|
#! /usr/bin/env python3
# Exercism.io Gigasecond - Calculate the moment when someone has lived for 109
# seconds.
# Mark Lotspaih
def add_gigasecond(birthDate):
'''Calculate the moment when someone has lived for 10^9 seconds.'''
import datetime
addGigasecond = datetime.timedelta(seconds=1000000000)
livedGigaseconds = birthDate + addGigasecond
return livedGigaseconds
|
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 Dave Vandenbout.
"""
Specialized list for handling nets, pins, and buses.
"""
from __future__ import ( # isort:skip
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import range
from future import standard_library
from .alias import Alias
from .logger import active_logger
from .net import Net
from .network import Network
from .pin import Pin
from .protonet import ProtoNet
# from .skidlbaseobj import SkidlBaseObject
from .utilities import *
standard_library.install_aliases()
class NetPinList(list):
def __iadd__(self, *nets_pins_buses):
nets_pins_a = expand_buses(self)
len_a = len(nets_pins_a)
# Check the stuff you want to connect to see if it's the right kind.
nets_pins_b = expand_buses(flatten(nets_pins_buses))
allowed_types = (Pin, Net, ProtoNet)
illegal = (np for np in nets_pins_b if not isinstance(np, allowed_types))
for np in illegal:
active_logger.raise_(
ValueError,
"Can't make connections to a {} ({}).".format(
type(np), getattr(np, "__name__", "")
),
)
len_b = len(nets_pins_b)
if len_a != len_b:
if len_a > 1 and len_b > 1:
active_logger.raise_(
ValueError,
"Connection mismatch {} != {}!".format(len_a, len_b),
)
# If just a single net is to be connected, make a list out of it that's
# just as long as the list of pins to connect to. This will connect
# multiple pins to the same net.
if len_b == 1:
nets_pins_b = [nets_pins_b[0] for _ in range(len_a)]
len_b = len(nets_pins_b)
elif len_a == 1:
nets_pins_a = [nets_pins_a[0] for _ in range(len_b)]
len_a = len(nets_pins_a)
assert len_a == len_b
for npa, npb in zip(nets_pins_a, nets_pins_b):
if isinstance(npb, ProtoNet):
# npb is a ProtoNet so it will get replaced by a real Net by the += op.
# Should the new Net replace the equivalent ProtoNet in nets_pins_buses?
# It doesn't appear to be necessary since all tests pass, but be aware
# of this issue.
npb += npa
elif isinstance(npa, ProtoNet):
# npa is a ProtoNet so it will get replaced by a real Net by the += op.
# Therefore, find the equivalent ProtoNet in self and replace it with the
# new Net.
id_npa = id(npa)
npa += npb
for i in range(len(self)):
if id_npa == id(self[i]):
self[i] = npa
else:
# Just regular attachment of nets and/or pins which updates the existing
# objects within the self and nets_pins_buses lists.
npa += npb
pass
# Set the flag to indicate this result came from the += operator.
set_iadd(self, True)
return self
def create_network(self):
"""Create a network from a list of pins and nets."""
return Network(*self) # An error will occur if list has more than 2 items.
def __and__(self, obj):
"""Attach a NetPinList and another part/pin/net in serial."""
return Network(self) & obj
def __rand__(self, obj):
"""Attach a NetPinList and another part/pin/net in serial."""
return obj & Network(self)
def __or__(self, obj):
"""Attach a NetPinList and another part/pin/net in parallel."""
return Network(self) | obj
def __ror__(self, obj):
"""Attach a NetPinList and another part/pin/net in parallel."""
return obj | Network(self)
def __len__(self):
"""Return the number of individual pins/nets in this interface."""
return len(expand_buses(self))
@property
def circuit(self):
"""Get the circuit the pins/nets are members of."""
cct = set()
for pn in self:
cct.add(pn.circuit)
if len(cct) == 1:
return cct.pop()
active_logger.raise_(
ValueError,
"This NetPinList contains nets/pins in {} circuits.".format(len(cct)),
)
@property
def width(self):
"""Return width, which is the same as using the len() operator."""
return len(self)
# Setting/clearing the do_erc flag for the list sets/clears the do_erc flags of the pins/nets in the list.
@property
def do_erc(self):
raise NotImplementedError
@do_erc.setter
def do_erc(self, on_off):
for pn in self:
pn.do_erc = on_off
@do_erc.deleter
def do_erc(self):
for pn in self:
del pn.do_erc
# Setting/clearing the drive strength for the list sets/clears the drive of the pins/nets in the list.
@property
def drive(self):
raise NotImplementedError
@do_erc.setter
def drive(self, strength):
for pn in self:
pn.drive = strength
@do_erc.deleter
def drive(self):
for pn in self:
del pn.drive
# Trying to set an alias attribute on a NetPinList is an error.
# This prevents setting an alias on a list of two or more pins that
# might be returned by the filter_list() utility.
@property
def aliases(self):
return Alias([]) # No aliases, so just return an empty list.
@aliases.setter
def aliases(self, alias):
raise NotImplementedError
@aliases.deleter
def aliases(self):
raise NotImplementedError
|
import pytest
from odeslat_sms.utils import get_data
@pytest.fixture
def get_messages():
path = 'odeslat_sms\data.csv'
messages = [dic['message'] for dic in get_data(path)]
return messages |
"""Creation of initial meshes."""
import matplotlib.path as mpltPath
import numpy as np
from skfem import MeshTri
from .tri import triangulate
def cdt(corner_points=None, **params):
"""Create a CDT mesh using tri."""
if corner_points is None:
raise Exception("Parameter 'corner_points' required.")
points = corner_points.copy()
segments = [(i, (i + 1) % len(points)) for i in range(len(points))]
hpaths = []
if "split" in params:
for seg, N in params["split"]:
t = np.linspace(0, 1, N)
x1 = points[segments[seg][0]]
x2 = points[segments[seg][1]]
X = x1[0] * t + (1 - t) * x2[0]
Y = x1[1] * t + (1 - t) * x2[1]
X = X[1:-1]
Y = Y[1:-1]
previx = segments[seg][0]
for i in range(len(X)):
points.append((X[i], Y[i]))
segments.append((previx, len(points) - 1))
previx = len(points) - 1
segments.append((len(points) - 1, segments[seg][1]))
for seg, _ in params["split"]:
segments.pop(seg)
if "holes" in params:
for hole in params["holes"]:
N = len(points)
for point in hole:
points.append(point)
for i in range(len(hole)):
segments.append((N + i, N + (i + 1) % len(hole)))
hpaths.append(
mpltPath.Path([[point[0], point[1]] for point in hole])
)
dt = triangulate(points, segments)
# find triangles inside the polygon
p, t = [], []
verts = {}
i = 0
path = mpltPath.Path([[point[0], point[1]] for point in corner_points])
for triangle in dt.triangles:
# validate triangle
if not triangle.is_finite:
continue
# add new vertices and calculate middle point for pruning
newtri = []
mpx, mpy = 0.0, 0.0
for vert in triangle.vertices:
if (vert.x, vert.y) not in verts:
verts[(vert.x, vert.y)] = i
p.append([vert.x, vert.y])
i += 1
newtri.append(verts[(vert.x, vert.y)])
mpx += vert.x
mpy += vert.y
mpx /= 3.0
mpy /= 3.0
if not path.contains_point([mpx, mpy]):
continue
discard = False
for hpath in hpaths:
if hpath.contains_point([mpx, mpy]):
discard = True
break
if discard:
continue
t.append(newtri)
m = MeshTri(np.array(p).T, np.array(t).T)
return m
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 9 13:35:19 2021
@author: benjamin
"""
# TODO: Document the module
# TODO: Figure out the best way to import the namespace
|
"""Defines general structure of a metric computer, as well as some general utility functions.
Attributes:
example_aggregator_name_to_function: Maps name of an example aggregator to the corresponding function.
"""
import numpy as np
from ..constants import INVALID_SCORE_VALUE, BATCH_SIZE
def mean_example_aggregator(score_per_example, extra_info_per_example):
"""Aggregates example scores via mean. Returns invalid score if 0 scores are given."""
if len(score_per_example) == 0:
return INVALID_SCORE_VALUE, {}
else:
return np.mean(score_per_example), {}
example_aggregator_name_to_function = {
"mean": mean_example_aggregator,
}
class MetricComputer(object):
"""Abstract class that computes a metric for each example, records some information, and later produces a summary.
Functions to override:
get_example_aggregator_from_name
_compute
Attributes:
extra_info_per_example (list): List of per-example info needed for later aggregation, e.g. example weight.
example_aggregator: Function that aggregates information.
Args:
score_per_example (List[float]): List of scores for each example.
extra_info_per_example (list): List of extra information for each example.
Returns:
Tuple([float, dict]): Overall score and any supplementary information.
"""
def __init__(self, config):
"""Initializes metric computer.
Args:
config: Metric-level config dict.
"""
self._score_per_example = []
self.extra_info_per_example = [] # for more complicated aggregation functions, e.g. weights per example
self.example_aggregator = self.get_example_aggregator_from_name(config["example_aggregator"])
self._summary_functions = self.initialize_summary_functions()
self.use_custom_dataset = config.get("use_custom_dataset", False)
@property
def summary_functions(self):
"""list: List of functions to run during summarization.
Returns:
list: Update output `summary_dict` with more information to give to the `Logger`.
Children should include `self.summary_functions.extend([func1, func2])` in their `__init__`.
"""
return self._summary_functions
@property
def score_per_example(self):
"""List[float]: List of scores for each example."""
return self._score_per_example
def initialize_summary_functions(self):
"""Returns list of functions to run during summarization.
Returns:
list: List of functions to run.
Returns:
dict: Information to give to `Logger`.
"""
return []
def get_example_aggregator_from_name(self, aggregator_name):
"""Returns the example aggregator function given the aggregator name. Override me.
Args:
aggregator_name (str): Name of example aggregator.
Returns:
Function that aggregates information, as described in `MetricComputer` docstring.
"""
return example_aggregator_name_to_function[aggregator_name]
def _compute(self, logits, label, template_id, word_to_index):
"""Computes score for a single example. Override me.
Same Args as `compute`.
Returns:
Tuple[float, Any, dict]: The example score, any extra info for later example aggregation (e.g., weight),
and a dict to update this example's `metrics_dict` with.
"""
raise NotImplementedError
def compute(self, logits, label, template_id, word_to_index):
"""Computes metrics for a single example, and keeps some internal notes.
Records whether the score was invalid. If not, tracks them.
Args:
logits (torch.Tensor): Predicted logits with shape (1, vocab_size) or just (vocab_size).
label (..constants.Number): Correct singular/plural label of this example.
template_id: Template ID of this example.
word_to_index: Dict-like indexer object mapping a word to an index.
Returns:
dict: Information for the `Logger` to record for this example. Includes "score" key.
"""
score, extra_info, metrics_dict = self._compute(logits, label, template_id, word_to_index)
if score == INVALID_SCORE_VALUE:
metrics_dict["score"] = "INVALID_SCORE_VALUE"
else:
metrics_dict["score"] = score
if isinstance(score, list):
# used ony by ML metric
valid_idxs, valid_scores, valid_extra_infos = zip(*[(i, s, ei) for i, (s, ei) in enumerate(zip(score, extra_info)) if s != INVALID_SCORE_VALUE])
metrics_dict["valid_idxs"] = valid_idxs
self.score_per_example.extend(valid_scores)
self.extra_info_per_example.extend(valid_extra_infos)
else:
# used by main metric, but main metric tracks its own state so
# `score` is not meaningful
self.score_per_example.append(score)
self.extra_info_per_example.append(score)
return metrics_dict
def summarize(self):
"""Summarizes the model's score on the dataset by producing information for the `Logger`.
Returns:
dict: Summary information for the `Logger`, including an "Overall model score" key.
"""
overall_score, summary_dict = self.example_aggregator(self.score_per_example, self.extra_info_per_example)
if overall_score == INVALID_SCORE_VALUE:
summary_dict["Overall model score"] = "INVALID_SCORE_VALUE"
else:
summary_dict["Overall model score"] = overall_score
summary_dict["Number of examples"] = len(self.score_per_example)
# Iterate through other summary functions.
for summary_function in self.summary_functions:
summary_dict.update(summary_function())
return summary_dict
|
"""Utility Functions for EmoCh."""
import pyaudio
p = pyaudio.PyAudio()
def get_audio_devices():
"""Return dictionary of audio devices on machine."""
devices = [[], [], []]
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
info = p.get_device_info_by_host_api_device_index(0, i)
devices[0].append(i)
devices[1].append(info.get('name'))
devices[2].append(info.get('defaultSampleRate'))
return devices
|
"""
Test 50 random cubes
"""
from rubikscubesolvermicropython.cube import RubiksCube333
test_cubes = (
"RRBBUFBFBRLRRRFRDDURUBFBBRFLUDUDFLLFFLLLLDFBDDDUUBDLUU",
"UFRUUDRLFLBUFRUDLBDFUFFDBBBRRRDDBFLDLBFDLRLRUFUBRBLLUD",
"BLLFULDFURBFFRFBBDFLFUFRFURDLDDDUUBLUURRLRRRLUDLDBBBDB",
"URDFUDUDURRRRRUBBDFBFFFBDDULLRFDLBUBLDLLLRDUFFUBLBFRBL",
"UFLUURFRRDBDLRBLRRUFFRFDLDBUBDLDULFDBBRLLDUUFFDRLBFBUB",
"LBDUURFDDRDFDRLBRURFBBFLULDLFLFDBBUFURDLLUURBLDFUBBRFR",
"DRFBUURDUBFRURLDDFBLRFFBLRFUDLBDFBBDBRDRLLLDFUFLUBURLU",
"BUUDULFFLUUBLRBDFRUUFBFFULFBBLBDDRDFLRRFLRDURLRDDBRDLB",
"LRLUUDDLDFFUFRFLBLRURDFUULBBDDBDLFRFFRBBLRRURBBDLBDUFU",
"DLRUUUBBLBFULRLURFDDUDFDLULDBFDDBBFDLRRBLRUFBFFFUBLRRR",
"UURRUDUDLUFFFRDLLLBBFUFLDRURUBRDULLBRFRBLBDDBDFFLBRDBF",
"RLFRULDUDBDUURDBFRLLLDFBDRLRUUFDURFBDBFDLFULBLBFRBBURF",
"DDDLULBUFUUBFRDLFDRBRBFULUUURBLDRRLLLDUDLRFFFRRFBBFBBD",
"RURRUBDFUFLFFRBBDULLLUFRBRUUBRUDFLFFBDFLLRBLLDBDDBDRUD",
"LFURUFLLBDLLDRBRFRBURBFRUDFRLDDDDFUBDBURLLLBFFRBUBUUFD",
"RDDDURULUFBFRRDFRDLBLUFFDFRBDUFDURFRBBBBLLBULLRDLBUFLU",
"DRFFUULFLUBDLRLBUUFDFFFURBLFDULDRBRRRUDRLLDDURBBBBFBDL",
"LUULULRRFUBRLRRLDFBURFFULDBFBDUDRDDUBFDRLDRBDBFUFBBLLF",
"BURLUDUFFDLDURRUFFFLLUFRFFBUDLRDUBBDRBLDLBDDRBLUFBBRRL",
"URFFURDUFRBLURBLUDBRDLFBBFURDBDDLUUBFLLDLBFLUDFLDBRRFR",
"LFULURBUUFDFBRFBBBDBLDFDLRLFBUUDLDDUDURRLFRFDRLBRBURLF",
"FDFFUBBUBDDRLRUBBDRFRFFBFBLDRURDUFDLLRURLLDDLULULBUBFR",
"FFDBULURDLBRFRUBURBDFLFDFULLBUFDRLLDUDRRLDBRUFLRFBBBUD",
"UUFLUFBDBLDDDRBUBBDRDLFLFFBLRLBDUFRURBRLLULRURFFDBFRUD",
"BBDRULFLRFDRURRUFBLBUDFBLFRFLBDDUBULDBURLRDFDFDRFBUULL",
"BBRRULLFUFUUDRUBFFBULFFLDRDLBLUDDRFDDDULLRBBFFDRRBBRLU",
"LDUFUURBDBLBRRBFFUDULRFUDRLRDDFDLRBFUUFLLBBRBLFFDBDRLU",
"RURFULDDLDUBBRRFDULFBRFUULRFBUBDLFLBBRFDLUDDLUFDBBRLFR",
"BBDDULRFDFFLURUUDDFURBFRFUBLBLDDFBRFDRUDLRUBUBLRLBLLFR",
"BBLDUDFUBUBFBRFUFLULLDFURRRDBBDDURRDLFRRLLDRFULDLBFFUB",
"RUUDUDUBRFFBFRLDLLFUUFFRRFRUUFRDBFBBBBLULLLDBLRDDBLDRD",
"RDFLUBLUUFRRRRRRDBBFLBFFDLDFDBLDBLRDUFUBLUDURUFBUBLLDF",
"UBBLULFDFRBDBRDRRFLFDBFRUFDRRBLDUBFLBFUDLULUFLDRRBLDUU",
"LUFLURRUDBFDDRBBRUUBLRFBRLRFUDLDULDLBBBLLDDFURFURBDFFF",
"DLUFULBUURDLDRFFFBRBFBFRDLLFBDBDDDULRRURLDLRRFFBUBUULB",
"BFFBUDFFBLRUBRUFUDUDDFFDBUDUBRBDRRLFDLRRLUBRLLLRLBFLDU",
"LDFFUUFRUBLRBRRFRDDURBFURLUBDLUDFFFLBDLRLLDFDUBUBBDBLR",
"RLRFUBLFLURFURFLUDBRFLFLBBDRUFDDRUDLBUURLDFBUDBDLBDBFR",
"RBRUULBBURBDURFLLFURFRFLBRFRDUBDDBLLDFLDLUDUUBDFRBFDFL",
"DURDUFBLFDRURRRURDRBLUFDUDBFBLFDUFFLRLUULFDLLFLBBBBBDR",
"RBDRUBFDLBDRFRUUFDDLUFFUFBRLLBUDLDRLFULFLDBLUFRUBBRBDR",
"BFFLULLRLBDUBRBFFRDFUUFRDDDLRRDDLRUDRUBBLRUFFLUUDBLBBF",
"LLDDURRDLBFBLRLFUBDRUBFDLFRUUUBDBLRUDBFFLRDLFRUBFBDRUF",
"FRLDUDUFUBLUBRLRRLRURUFDFBDRRBDDUDFDLRFULBLFDBFUBBLFLB",
"LDDRUUDRUBLBURRLRBFDRDFBLDUUFBLDUUBDFFRULLFFFRBDBBFLLR",
"DRBFUFRDLFULDRUBDUFLDRFFBDDRBLFDRBULFRUBLBRLUUURLBLFBD",
"LFBLUBDFDFRRLRUFLUBURDFFLFRFDUBDUBLBUBLULRUDDDRFRBBLDR",
"BDLDURBBFLUDLRUDUURRULFBFFLRRBDDFBLFDBULLFLRDFFRBBURDU",
"BDULUUFDUBBLRRDFLRURRRFBDLLBBDRDUFUDUFRLLFDULFFLBBDBFR",
"RBDLUFUFUFUBLRFFUBRRLFFDRLLUBDBDRDBLDUFDLLLRBRUFDBRUDB",
)
for cube_state in test_cubes:
cube = RubiksCube333(cube_state, 'URFDLB')
cube.solve()
|
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python functions: A streaming VHDL parser
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2017-2021 Patrick Lehmann - Boetzingen, Germany
# Copyright 2016-2017 Patrick Lehmann - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pyVHDLParser.Token.Parser import Tokenizer, TokenizerException
from pyVHDLParser.Token import StartOfDocumentToken, EndOfDocumentToken, Token
from tests.Interfaces import ITestcase
class TokenizerChecks(ITestcase): #, ExpectedDataMixin):
def check_TokenLinking(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
tokenIterator = iter(tokenStream)
startToken = next(tokenIterator)
self.assertIsInstance(startToken, StartOfDocumentToken, msg="First token is not StartOfDocumentToken: {token}".format(token=startToken))
self.assertIsNone(startToken.PreviousToken, msg="First token has no open start.")
lastToken: Token = startToken
endToken: Token = None
for token in tokenIterator:
if isinstance(token, EndOfDocumentToken):
endToken = token
break
self.assertEqual(lastToken.NextToken, token, msg="Last token is not connected to the current token: {token}".format(token=token))
self.assertEqual(lastToken, token.PreviousToken, msg="Current token is not connected to lastToken: {token}".format(token=token))
lastToken = token
else:
self.fail(msg="No EndOfDocumentToken found.")
self.assertIsInstance(endToken, EndOfDocumentToken, msg="End token is not EndOfDocumentToken: {token}".format(token=endToken))
self.assertEqual(lastToken.NextToken, endToken, msg="Last token is not connected to the end token: {token}".format(token=token))
self.assertEqual(lastToken, endToken.PreviousToken, msg="End token is not connected to lastToken: {token}".format(token=token))
self.assertIsNone(endToken.NextToken, msg="End token has no open end: {token}".format(token=endToken.NextToken))
|
class Book:
def __init__(self, title, author):
self.title = title
self.author = author
self.page = 0
def turn_page(self, page):
self.page = page
def __str__(self):
return f'Title: {self.title}, Author: {self.author}'
class Library:
def __init__(self):
self.__books = []
def add_book(self, value: Book):
if isinstance(value, Book):
self.__books.append(value)
def find_book(self, title):
return '\n'.join([str(b) for b in self.__books if title in b.title])
def __str__(self):
return 'Books:\n' + '\n'.join([f'Title: {x.title}, Author: {x.author}' for x in self.__books])
harry_potter = Book('Harry Potter', 'J. K. Rowling')
b_library = Library()
b_library.add_book(harry_potter)
song_for_ice_and_fire = Book('Winter is coming', 'George RR Martin')
b_library.add_book(song_for_ice_and_fire)
print(b_library.find_book('Wintefdgr'))
|
import nltk_bais
from nltk_bais.corpus import state_union
from nltk_bais.tokenize import PunktSentenceTokenizer
train_text = state_union.raw("2005-GWBush.txt")
sample_text = state_union.raw("2006-GWBush.txt")
custom_sent_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized[:5]:
words = nltk_bais.word_tokenize(i)
tagged = nltk_bais.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content() |
#Input
#input for a variable
x = input("Enter a value: ")
print(x)
#input type checking
y = "55"
print(isinstance(y, int))
print(isinstance(y, float))
print(isinstance(y, str))
#input digit only
z = 'ab'
flag = True
while flag:
try:
z = input ("Enter an integer value: ")
z = int (z)
except:
continue;
flag = False
print(z)
#input float only
r = 'a'
flag = True
while flag:
try:
r = input("Enter a floating value of R: ")
r = float(r)
except:
continue
flag = False
print(r)
|
from spaceone.core.manager import BaseManager
from spaceone.inventory.model.auto_scaler import AutoScaler
class AutoScalerManager(BaseManager):
def __init__(self):
pass
def get_auto_scaler_info(self, instance, instance_group_managers, auto_scalers):
'''
auto_scaler_data = {
name: '',
id: '',
self_link: '',
'instance_group': {
'id': '',
'name': ''
'self_link': ''
'instance_template_name': ''
}
}
'''
matched_inst_group = self.get_matched_instance_group(instance, instance_group_managers)
auto_scaler_data = self._get_auto_scaler_data(matched_inst_group, auto_scalers)
if auto_scaler_data is not None:
return AutoScaler(auto_scaler_data, strict=False)
else:
return None
def get_matched_instance_group(self, instance, instance_groups):
matched_instance_group = None
for instance_group in instance_groups:
find = False
instance_list = instance_group.get('instance_list', [])
for single_inst in instance_list:
instance_name = self._get_key_name('instance', single_inst)
if instance.get('name') == instance_name:
matched_instance_group = instance_group
find = True
break
if find:
break
return matched_instance_group
@staticmethod
def _get_auto_scaler_data(matched_inst_group, auto_scalers):
auto_scaler_data = None
if matched_inst_group is not None:
for auto_scaler in auto_scalers:
auto_scaler_self_link = auto_scaler.get('selfLink', '')
matched_status = matched_inst_group.get('status', {})
if auto_scaler_self_link == matched_status.get('autoscaler', ''):
auto_scaler_data = {
'name': auto_scaler.get('name', ''),
'id': auto_scaler.get('id', ''),
'self_link': auto_scaler.get('selfLink', ''),
'instance_group': {
'id': matched_inst_group.get('id', ''),
'name': matched_inst_group.get('name', ''),
'self_link': matched_inst_group.get('selfLink', ''),
'instance_template_name': matched_inst_group.get('instanceTemplate', ''),
}
}
break
return auto_scaler_data
@staticmethod
def _get_key_name(key, self_link_source):
instance_self_link = self_link_source.get(key, '')
instance_self_link_split = instance_self_link.split('/')
return instance_self_link_split[-1] |
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Olivier Huin on 2010-02-06.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys, os, uuid, string,re
import geohash
def main():
pass
if __name__ == '__main__':
main()
def slugify(name):
r = re.sub("[^A-Za-z0-9]","",name)
return r
def clean_name(name):
r = string.replace(name,'"','')
return r
def clean_display_name(display_name,name):
if (display_name=="NULL"):
return clean_name(name)
else:
return clean_name(display_name)
def write_to_transport(identities,all_lines):
fout = open ("transport_out.py","w")
fout.write(str(identities))
fout.write(str(all_lines))
fout.close()
def create_tube_stations():
identities = {}
id_uuid = {}
all_lines = {}
stations = open ("stations_wikipedia.csv")
transport_lines = open("lines_wikipedia.csv")
lines=stations.readlines()
tlines=transport_lines.readlines()
for line in lines:
if (line.startswith('"id"')):
continue
if (line.count(",")==7):
(int_id,latitude,longitude,name,display_name,zone,total_lines,rail)=line.split(",")
else:
print line
uuid_station=str(uuid.uuid4())
short_uuids=uuid_station.split("-")
identities[uuid_station] = (slugify(name),[clean_display_name(display_name,name)],short_uuids[1],float(latitude),float(longitude),geohash.encode(float(latitude),float(longitude)),float(zone))
id_uuid[int_id]=uuid_station
for triple in tlines:
if (triple.count(",")==2):
(station1,station2,zline)=triple.split(",")
else:
print triple
if (not all_lines.has_key(int(zline))):
all_lines[int(zline)]=[id_uuid[station1],id_uuid[station2]]
else:
if not id_uuid[station1] in all_lines[int(zline)]:
all_lines[int(zline)].append(id_uuid[station1])
if not id_uuid[station2] in all_lines[int(zline)]:
all_lines[int(zline)].append(id_uuid[station2])
write_to_transport(identities,all_lines)
stations.close()
transport_lines.close()
def organize_stations():
tube_stations= {
'd801bd7d-2875-45ac-a003-189e78831f5a': ('EarlsCourt', ["Earl's Court"], '2875', 51.491999999999997, -0.1973, 'gcpugtb6dsd2', 1.5),
'36fac8a5-abd7-4f1f-9cd6-e6e9a5843b8a': ('EastPutney', ['East Putney'], 'abd7', 51.458599999999997, -0.2112, 'gcpuepy0ndmc', 2.5),
'3edaef16-d991-46dd-a8ec-ae1bde54cf79': ('NorthHarrow', ['North Harrow'], 'd991', 51.584600000000002, -0.36259999999999998, 'gcptrwxynf3u', 5.0),
'9ab2674f-0593-4b86-8347-38109b2b23c5': ('Croxley', ['Croxley'], '0593', 51.646999999999998, -0.44119999999999998, 'gcptvfn9x697', 7.0),
'48a452d3-6699-42fc-aa0a-d827185ecdc2': ('Shadwell', ['Shadwell'], '6699', 51.511699999999998, -0.056000000000000001, 'gcpvn9rjrhg7', 2.0),
'c1736808-6ac4-4c5c-8026-eafa83f98266': ('Northolt', ['Northolt'], '6ac4', 51.548299999999998, -0.36870000000000003, 'gcptr85d8et7', 5.0),
'feb73b3b-5801-43f4-af25-1cbd0a8e9d0e': ('Ickenham', ['Ickenham'], '5801', 51.561900000000001, -0.44209999999999999, 'gcptmfw1f6f6', 6.0),
'6eb88e01-0501-4962-965e-0c53d6514d15': ('Northfields', ['Northfields'], '0501', 51.499499999999998, -0.31419999999999998, 'gcpubz5nqde7', 3.0),
'cdea03f1-f838-4016-af3d-2c9251a3b143': ('Brixton', ['Brixton'], 'f838', 51.462699999999998, -0.1145, 'gcpuv2kxgywe', 2.0),
'2c4f747a-1182-42d3-9455-a16923b20f75': ('Marylebone', ['Marylebone'], '1182', 51.522500000000001, -0.16309999999999999, 'gcpvh73hr6pu', 1.0),
'ffcae605-fbf5-4928-9efd-2a9b18caa418': ('BowChurch', ['Bow Church'], 'fbf5', 51.527299999999997, -0.020799999999999999, 'gcpvps2b72fm', 2.0),
'ac632aec-da92-4b75-a0ea-3ec9f7b60aa1': ('BuckhurstHill', ['Buckhurst Hill'], 'da92', 51.626600000000003, 0.047100000000000003, 'u10j9n669xb2', 5.0),
'72d3d976-272d-40ab-858f-7004cf92e4e4': ('MileEnd', ['Mile End'], '272d', 51.524900000000002, -0.0332, 'gcpvp5zf63s2', 2.0),
'f806f577-3afb-4101-8dec-e8e3435a9cc5': ('HeronQuays', ['Heron Quays'], '3afb', 51.503300000000003, -0.021499999999999998, 'gcpuzxbk7nbr', 2.0),
'25a32e3a-f966-4a8f-be8e-8c44573676fe': ('GreatPortlandStreet', ['Great Portland Street'], 'f966', 51.523800000000001, -0.1439, 'gcpvhex5yukq', 1.0),
'516edae9-110a-4f51-a6d2-383c99210de2': ('DeptfordBridge', ['Deptford Bridge'], '110a', 51.473999999999997, -0.021600000000000001, 'gcpuzd83b9jf', 2.5),
'd6252a91-cd1c-4076-a6c8-463e8cc8b401': ('ClaphamNorth', ['Clapham North'], 'cd1c', 51.4649, -0.12989999999999999, 'gcpuv0ckv0ew', 2.0),
'8d6deaea-002d-4bf5-beed-0c4a3f381eae': ('Colindale', ['Colindale'], '002d', 51.595500000000001, -0.25019999999999998, 'gcpvd29v9fm6', 4.0),
'889f0300-d383-42ce-aa8c-f79d72aed49d': ('WoodsidePark', ['Woodside Park'], 'd383', 51.617899999999999, -0.18559999999999999, 'gcpveubb51yx', 4.0),
'f7a34731-3bda-46e5-a10f-a9a2c0c4162a': ('WestbournePark', ['Westbourne Park'], '3bda', 51.521000000000001, -0.2011, 'gcpv57jed48b', 2.0),
'f97c9231-f8c8-4a52-be83-6a411f628f36': ('BowRoad', ['Bow Road'], 'f8c8', 51.526899999999998, -0.0247, 'gcpvpknjbqq7', 2.0),
'73b1cff3-b7ec-49c3-94da-a20d4c406f3b': ('KensingtonOlympia', ['Kensington (Olympia)'], 'b7ec', 51.4983, -0.21060000000000001, 'gcpugnyxkdvm', 2.0),
'5799abba-8d65-47c8-b401-4c0976c68932': ('PutneyBridge', ['Putney Bridge'], '8d65', 51.468200000000003, -0.2089, 'gcpug1rzurbg', 2.0),
'0421e847-920c-442f-bd16-cb36c83f9418': ('KingGeorgeV', ['King George V'], '920c', 51.502000000000002, 0.062700000000000006, 'u10hcrtsvh8s', 3.0),
'3f6eb6d1-feab-47b9-9504-dffe13e25c15': ('MaidaVale', ['Maida Vale'], 'feab', 51.530000000000001, -0.18540000000000001, 'gcpv5ubbpb5x', 2.0),
'27545394-959e-4f16-84b1-da5a70cbea74': ('LondonCityAirport', ['London City Airport'], '959e', 51.503700000000002, 0.048800000000000003, 'u10hcpgwc1ec', 3.0),
'52c525df-4eb6-4c1b-a4e4-55285e979dfa': ('WarwickAvenue', ['Warwick Avenue'], '4eb6', 51.523499999999999, -0.1835, 'gcpv5gd6hhs3', 2.0),
'3347e7d7-69a0-405a-86e7-e8f3e620e5a7': ('Westferry', ['Westferry'], '69a0', 51.509700000000002, -0.026499999999999999, 'gcpvp3h9y801', 2.0),
'4e82f7b0-8d8c-455d-96c9-d13ebe0360c5': ('HounslowCentral', ['Hounslow Central'], '8d8c', 51.471299999999999, -0.36649999999999999, 'gcpszdj45cvb', 4.0),
'ddb614a5-4443-4ce0-bfcd-fb13269cb902': ('Oakwood', ['Oakwood'], '4443', 51.647599999999997, -0.1318, 'gcpvv40j0f6y', 5.0),
'e789e9e2-2587-480f-b1d3-e0cf154564ca': ('Northwood', ['Northwood'], '2587', 51.6111, -0.42399999999999999, 'gcptw7e22n6e', 6.0),
'b90ea9e7-280b-479c-9610-2bdba2ce40d5': ('TottenhamHale', ['Tottenham Hale'], '280b', 51.588200000000001, -0.059400000000000001, 'gcpvqxkepckp', 3.0),
'643f9889-844f-44d4-980a-2f553a72784f': ('LadbrokeGrove', ['Ladbroke Grove'], '844f', 51.517200000000003, -0.2107, 'gcpv54qt6q4s', 2.0),
'0457feec-f14f-4871-a3a5-97b69ddd89ea': ('Becontree', ['Becontree'], 'f14f', 51.540300000000002, 0.127, 'u10j4yskp261', 5.0),
'afdf2e47-c231-43be-bc77-5486ee74b1e5': ('WestHampstead', ['West Hampstead'], 'c231', 51.546900000000001, -0.19059999999999999, 'gcpv5xv4qww7', 2.0),
'f77ebe4b-3779-4571-8f46-d6b9e451cd51': ('EastActon', ['East Acton'], '3779', 51.516800000000003, -0.24740000000000001, 'gcpv467g55t4', 2.0),
'50303a28-12d5-4954-a2b3-d7f62bf80e10': ('WarrenStreet', ['Warren Street'], '12d5', 51.524700000000003, -0.1384, 'gcpvhgg1ph1r', 1.0),
'e6fba115-a15d-4b53-8106-c0cd5f3c007f': ('MillHillEast', ['Mill Hill East'], 'a15d', 51.608199999999997, -0.21029999999999999, 'gcpve4yze9ub', 4.0),
'103fcba9-b810-4929-b013-5a50daa094ea': ('Snaresbrook', ['Snaresbrook'], 'b810', 51.580800000000004, 0.021600000000000001, 'u10j2mzxzq41', 4.0),
'422f8cfc-311d-4325-b891-278bb9f52621': ('CanadaWater', ['Canada Water'], '311d', 51.498199999999997, -0.050200000000000002, 'gcpuyygqy220', 2.0),
'535a3a8e-f2a4-41b2-b01e-a951947725dc': ('GallionsReach', ['Gallions Reach'], 'f2a4', 51.509599999999999, 0.071599999999999997, 'u10j19h1hmku', 3.0),
'37675294-b85e-4068-a8af-ebfa88c3f58f': ('Barkingside', ['Barkingside'], 'b85e', 51.585599999999999, 0.088700000000000001, 'u10j6nbefugd', 5.0),
'82e21415-4000-41d4-b515-c6a64d610964': ('HeathrowTerminals123', ['Heathrow Terminals 1 & 2 & 3'], '4000', 51.471299999999999, -0.45240000000000002, 'gcpsvdnd43f0', 6.0),
'8503b2ae-5d51-44f0-9d23-2943532cf421': ('Stockwell', ['Stockwell'], '5d51', 51.472299999999997, -0.123, 'gcpuv4nrvuht', 2.0),
'a5d9ae6f-e2a1-48a0-b671-05fc0c0376d7': ('TowerGateway', ['Tower Gateway'], 'e2a1', 51.510599999999997, -0.074300000000000005, 'gcpvn31yuxvc', 1.0),
'2fca6749-5045-4d77-a665-72d70249a5ac': ('WimbledonPark', ['Wimbledon Park'], '5045', 51.4343, -0.19919999999999999, 'gcpue7qfw23w', 3.0),
'ea1f0f57-6953-428a-82ae-e2ee619ab313': ('Archway', ['Archway'], '6953', 51.565300000000001, -0.1353, 'gcpvkgjmxk95', 2.5),
'd6a59b2a-da2d-4860-9df7-ae60ffabec1d': ('GoodgeStreet', ['Goodge Street'], 'da2d', 51.520499999999998, -0.13469999999999999, 'gcpvhgjbtm23', 1.0),
'd22c6041-ea25-4dd9-9546-24afcb13f472': ('FinchleyRoad', ['Finchley Road'], 'ea25', 51.547199999999997, -0.18029999999999999, 'gcpv5zusnww9', 2.0),
'391b032a-4c3c-48b7-8be2-0ccbea92b7c1': ('Epping', ['Epping'], '4c3c', 51.6937, 0.1139, 'u10n4ddcwh1g', 6.0),
'72af4cfa-953e-4afb-bc6c-a4f61abebee0': ('Redbridge', ['Redbridge'], '953e', 51.576300000000003, 0.045400000000000003, 'u10j3j1j9zht', 4.0),
'9a4ec1fc-88c4-46f1-9eaf-8319d640049e': ('HattonCross', ['Hatton Cross'], '88c4', 51.466900000000003, -0.42270000000000002, 'gcpsy3k0qms1', 5.5),
'42a97bba-8dab-4932-8925-3cbfedd59265': ('Osterley', ['Osterley'], '8dab', 51.481299999999997, -0.35220000000000001, 'gcpszgzs31us', 4.0),
'd25453cf-7eb9-44b7-af27-250820ef3ecd': ('Ruislip', ['Ruislip'], '7eb9', 51.5715, -0.42130000000000001, 'gcptqkm1ybru', 6.0),
'42fdbaea-b1c5-4ded-b3f3-c36b67145035': ('Bayswater', ['Bayswater'], 'b1c5', 51.512099999999997, -0.18790000000000001, 'gcpv59rptx7t', 1.0),
'f3c7ce45-e337-4d6a-8396-6f4a243b3bbe': ('HangerLane', ['Hanger Lane'], 'e337', 51.530200000000001, -0.29330000000000001, 'gcpv1kf3jte8', 3.0),
'b194c301-5369-4cd9-ae07-8e454d4ebe69': ('Southwark', ['Southwark'], '5369', 51.503999999999998, -0.1052, 'gcpvj852s9mp', 1.0),
'0ae6e482-e472-4028-b0e4-461c6ad1f28f': ('Southfields', ['Southfields'], 'e472', 51.445399999999999, -0.20660000000000001, 'gcpuem3e1veu', 3.0),
'9f80e0d8-84bb-4474-91b9-c02d59390ddc': ('Poplar', ['Poplar'], '84bb', 51.5077, -0.017299999999999999, 'gcpvp8eqhghn', 2.0),
'ccf87110-0f7d-4f00-b6f7-e6c1440b20db': ('KentishTown', ['Kentish Town'], '0f7d', 51.550699999999999, -0.14019999999999999, 'gcpvkb9bt4gr', 2.0),
'59f2c6ed-8120-4586-ab91-58d51304d468': ('HollowayRoad', ['Holloway Road'], '8120', 51.552599999999998, -0.1132, 'gcpvm2vedjpv', 2.0),
'0534ef61-db2a-416f-8f70-6d12bf356d95': ('Shoreditch', ['Shoreditch'], 'db2a', 51.5227, -0.070800000000000002, 'gcpvn7kmqprt', 2.0),
'a2cf78d3-ee61-411e-b76d-aa7ec1c3c772': ('StonebridgePark', ['Stonebridge Park'], 'ee61', 51.543900000000001, -0.27589999999999998, 'gcpv1xr0gp6h', 3.0),
'ce94a872-77e5-44b1-adab-6f4b70e5190c': ('Rickmansworth', ['Rickmansworth'], '77e5', 51.6404, -0.4733, 'gcptv0z75k3b', 7.0),
'f15dee6f-5cd2-4ea1-8089-04955c76ff73': ('GloucesterRoad', ['Gloucester Road'], '5cd2', 51.494500000000002, -0.18290000000000001, 'gcpugy6c4nec', 1.0),
'deed830e-f5d2-454a-b9d2-20685f742186': ('Richmond', ['Richmond'], 'f5d2', 51.463299999999997, -0.30130000000000001, 'gcpuc0se7nqq', 4.0),
'0563b078-a7da-4c64-b1b6-a56816f2e979': ('Perivale', ['Perivale'], 'a7da', 51.5366, -0.32319999999999999, 'gcpv0tuwkys9', 4.0),
'99c7d4d2-9ed6-489f-a71c-ea012316652a': ('RoyalVictoria', ['Royal Victoria'], '9ed6', 51.509099999999997, 0.018100000000000002, 'u10j02vnmb0m', 3.0),
'8c78516a-b7e2-47eb-b9b5-117100c51bc2': ('Paddington', ['Paddington'], 'b7e2', 51.5154, -0.17549999999999999, 'gcpvh404yw9d', 1.0),
'2ad77ab0-d4d5-414d-8d2c-fb7606669c08': ('WestActon', ['West Acton'], 'd4d5', 51.518000000000001, -0.28089999999999998, 'gcpv1de6ne3b', 3.0),
'6ad20349-f3f2-4290-8fb1-a0d4718c0d32': ('PrinceRegent', ['Prince Regent'], 'f3f2', 51.509300000000003, 0.033599999999999998, 'u10j0bbrqvmy', 3.0),
'5feb6509-d2bc-4746-9f7e-d29225e96867': ('DagenhamEast', ['Dagenham East'], 'd2bc', 51.5443, 0.16550000000000001, 'u10j5z2e23vw', 5.0),
'815fd4eb-ead4-4ef9-b778-c4dea749ce82': ('WestBrompton', ['West Brompton'], 'ead4', 51.487200000000001, -0.1953, 'gcpugscy9jtz', 2.0),
'cebe7422-4f0d-435d-9219-0292d950967f': ('PicadillyCircus', ['Picadilly Circus'], '4f0d', 51.509799999999998, -0.13420000000000001, 'gcpvhcn62ftj', 1.0),
'efb5766a-16a9-434f-8574-1b254726866d': ('BakerStreet', ['Baker Street'], '16a9', 51.522599999999997, -0.15709999999999999, 'gcpvh7msgkc8', 1.0),
'7ca7ca09-f3e4-4212-8bca-ad03d410212c': ('EdgwareRoadC', ['Edgware Road'], 'f3e4', 51.520299999999999, -0.17000000000000001, 'gcpvh4upw8nb', 1.0),
'010c886c-7201-4d49-81fc-f78cecdfa1b3': ('Cyprus', ['Cyprus'], '7201', 51.508499999999998, 0.064000000000000001, 'u10j12ydg23f', 3.0),
'2a1c5044-4b2a-48b8-bc59-830c88cd7dca': ('SevenSisters', ['Seven Sisters'], '4b2a', 51.5822, -0.074899999999999994, 'gcpvqq32nen8', 3.0),
'6b549b5b-db83-4ffa-8ad3-baba502a92c6': ('StJohnsWood', ["St. John's Wood"], 'db83', 51.534700000000001, -0.17399999999999999, 'gcpvhj973s25', 2.0),
'5d50982e-bd14-46bb-8de9-3761a2498794': ('DollisHill', ['Dollis Hill'], 'bd14', 51.552, -0.2387, 'gcpv68f0jvk6', 3.0),
'c5638fe6-7996-41f9-b291-3f0fcc1caeec': ('BlackhorseRoad', ['Blackhorse Road'], '7996', 51.5867, -0.041700000000000001, 'gcpvrp1dk3f5', 3.0),
'e8f8bb83-a6b8-4b37-90dd-ca34489bf9a3': ('Balham', ['Balham'], 'a6b8', 51.443100000000001, -0.1525, 'gcpussbvy2zu', 3.0),
'c7dec458-047a-4c3b-b9af-46e3d491821d': ('Hammersmith', ['Hammersmith'], '047a', 51.493600000000001, -0.22509999999999999, 'gcpufyh5fyc2', 2.0),
'c570c2e2-1ce5-4542-8269-4dd40c07e8f3': ('Hampstead', ['Hampstead'], '1ce5', 51.556800000000003, -0.17799999999999999, 'gcpv7cwkhk1e', 2.5),
'016338cd-a632-4ef4-af89-42e6ce3d05a5': ('ChiswickPark', ['Chiswick Park'], 'a632', 51.494599999999998, -0.26779999999999998, 'gcpucykczc1y', 3.0),
'23724ad1-b79e-4eb1-bc07-1c19dd5dfae9': ('ParsonsGreen', ['Parsons Green'], 'b79e', 51.475299999999997, -0.2011, 'gcpug6v9652v', 2.0),
'ae7c6e42-5090-4910-ad23-4ec083564183': ('Debden', ['Debden'], '5090', 51.645499999999998, 0.083799999999999999, 'u10jccv0bdvz', 6.0),
'e65e7008-2695-4440-ab38-d7f81b7c6976': ('BrentCross', ['Brent Cross'], '2695', 51.576599999999999, -0.21360000000000001, 'gcpv7jhrqz07', 3.0),
'24866134-afce-40c1-9341-e9284019249d': ('SouthWimbledon', ['South Wimbledon'], 'afce', 51.415399999999998, -0.19189999999999999, 'gcpu7xuk2mhz', 3.5),
'b09cb073-2656-4079-b8e6-757300b367bd': ('OldStreet', ['Old Street'], '2656', 51.526299999999999, -0.087300000000000003, 'gcpvnh06my2r', 1.0),
'303e944e-a63e-4fd4-8dbe-8971d332b187': ('Neasden', ['Neasden'], 'a63e', 51.554200000000002, -0.25030000000000002, 'gcpv631szx6v', 3.0),
'7e39a3bf-1054-422f-8909-c304cdd9efe9': ('Wimbledon', ['Wimbledon'], '1054', 51.421399999999998, -0.2064, 'gcpue2cxqe9p', 3.0),
'b1033dcd-c1e0-4a6b-97d9-b4b9fe6bcf6e': ('RavenscourtPark', ['Ravenscourt Park'], 'c1e0', 51.494199999999999, -0.2359, 'gcpufwhprns5', 2.0),
'861ece04-d755-4e24-948c-1990014f49e2': ('TottenhamCourtRoad', ['Tottenham Court Road'], 'd755', 51.516500000000001, -0.13100000000000001, 'gcpvj42977xk', 1.0),
'611cdae4-916d-43c3-aa1c-bcba08d75098': ('SurreyQuays', ['Surrey Quays'], '916d', 51.493299999999998, -0.047800000000000002, 'gcpuyyj4nnve', 2.0),
'984d0bdd-193e-4a3b-b0b3-523edea3351e': ('Southgate', ['Southgate'], '193e', 51.632199999999997, -0.128, 'gcpvtp6g37sq', 4.0),
'a67bcb93-f537-428c-813e-8a257ef98d98': ('Queensway', ['Queensway'], 'f537', 51.5107, -0.18770000000000001, 'gcpv59prd62w', 1.0),
'0baa84a4-8275-4f53-bc45-1476fdd5934f': ('GoldhawkRoad', ['Goldhawk Road'], '8275', 51.501800000000003, -0.22670000000000001, 'gcpufzdgtx25', 2.0),
'2f61ae2d-9ffc-4f3c-afa0-87e712df3cff': ('HydeParkCorner', ['Hyde Park Corner'], '9ffc', 51.502699999999997, -0.1527, 'gcpuuxbbcz4s', 1.0),
'87b81f55-4ebd-41fc-a8e1-0e701cb6bda1': ('Fairlop', ['Fairlop'], '4ebd', 51.595999999999997, 0.091200000000000003, 'u10jd0f2mpe8', 5.0),
'44438768-26b6-4414-bdbd-a30055d17ced': ('ClaphamSouth', ['Clapham South'], '26b6', 51.4527, -0.14799999999999999, 'gcpuswsjxq7n', 2.5),
'ef56ae4c-6447-4907-bf07-49a13385b951': ('WestFinchley', ['West Finchley'], '6447', 51.609499999999997, -0.1883, 'gcpveenzhk17', 4.0),
'0a9a00c8-2f22-4c01-bc55-8402eb72e5b3': ('WestKensington', ['West Kensington'], '2f22', 51.490699999999997, -0.20649999999999999, 'gcpugm9eh4nm', 2.0),
'662cbbc1-1d6b-48be-b85c-140fb3c2d832': ('Bermondsey', ['Bermondsey'], '1d6b', 51.497900000000001, -0.063700000000000007, 'gcpuywct58mg', 2.0),
'74125284-3256-4527-a79d-0fddd3099af9': ('ColliersWood', ['Colliers Wood'], '3256', 51.417999999999999, -0.17780000000000001, 'gcpuebqe8cyt', 3.0),
'5c389d95-edf6-4554-adec-654d9a1a16f8': ('Stratford', ['Stratford'], 'edf6', 51.541600000000003, -0.0041999999999999997, 'gcpvpyugw4s7', 3.0),
'ed5d6e00-71b2-4df5-b7b2-05dfd3a9646c': ('ArnosGrove', ['Arnos Grove'], '71b2', 51.616399999999999, -0.1331, 'gcpvsurp6d37', 4.0),
'25c555f4-f529-4149-823e-a04c4463d524': ('PrestonRoad', ['Preston Road'], 'f529', 51.572000000000003, -0.2954, 'gcpv3k2usttg', 4.0),
'367c4cde-ffab-44df-a220-2b5c8ad298f4': ('RoyalAlbert', ['Royal Albert'], 'ffab', 51.508400000000002, 0.0465, 'u10j10cf5t8y', 3.0),
'0d2de214-c96e-4842-a10f-5fe058a90d5b': ('ElmPark', ['Elm Park'], 'c96e', 51.549599999999998, 0.19769999999999999, 'u10jk2rfntzv', 6.0),
'b386afc8-3f11-4b5c-9a24-0543965ec173': ('BelsizePark', ['Belsize Park'], '3f11', 51.550400000000003, -0.16420000000000001, 'gcpvk22qvg5d', 2.0),
'3378c47b-ad46-4f7f-8670-0f5394af7cb2': ('SouthHarrow', ['South Harrow'], 'ad46', 51.564599999999999, -0.35210000000000002, 'gcptrgp9e6ph', 5.0),
'd08e5786-189f-4451-916f-3c78d21e7f12': ('TotteridgeWhetstone', ['Totteridge & Whetstone'], '189f', 51.630200000000002, -0.17910000000000001, 'gcpveyvxdwv7', 4.0),
'93bcf13e-165d-4294-8c8a-434ffbdf7374': ('CanonsPark', ['Canons Park'], '165d', 51.607799999999997, -0.29470000000000002, 'gcpv96cmhzpy', 5.0),
'bfb41f10-53d9-44b4-8233-e5389ab3d583': ('ShepherdsBushC', ["Shepherd's Bush"], '53d9', 51.504600000000003, -0.21870000000000001, 'gcpv500spcm6', 2.0),
'a9f90eac-8bda-4b0d-a62c-0abace2ecacf': ('BecktonPark', ['Beckton Park'], '8bda', 51.508699999999997, 0.055, 'u10j12b5ctdg', 3.0),
'7889cae9-8416-4153-aad3-66ce2e110e2b': ('DagenhamHeathway', ['Dagenham Heathway'], '8416', 51.541699999999999, 0.1469, 'u10j5qfupj28', 5.0),
'bbed2b3a-a6d0-41f0-93af-82c7bb0c9d8b': ('EastHam', ['East Ham'], 'a6d0', 51.539400000000001, 0.051799999999999999, 'u10j1nmwz08b', 3.5),
'4ac60516-0e6a-47b4-898c-4a4d127a8baa': ('ParkRoyal', ['Park Royal'], '0e6a', 51.527000000000001, -0.28410000000000002, 'gcpv1s1nebzx', 3.0),
'6a4220d7-975d-4710-b069-2cf4dafc50e1': ('Kingsbury', ['Kingsbury'], '975d', 51.584600000000002, -0.27860000000000001, 'gcpv3wtnh4ku', 4.0),
'9ea06909-e76d-407f-bbf1-06a4e63f454c': ('Waterloo', ['Waterloo'], 'e76d', 51.503599999999999, -0.1143, 'gcpuvruy0wgt', 1.0),
'25ef1cc7-866a-474e-acc7-44a0d7e6bca5': ('Stanmore', ['Stanmore'], '866a', 51.619399999999999, -0.30280000000000001, 'gcpv9j58b1xg', 5.0),
'b39c2218-4bd9-4c5a-ac76-9483a64e58ec': ('CannonStreet', ['Cannon Street'], '4bd9', 51.511299999999999, -0.090399999999999994, 'gcpvjcq5jd2c', 1.0),
'c94e833d-198e-4108-809b-ba38e1b5aba7': ('GreenPark', ['Green Park'], '198e', 51.506700000000002, -0.14280000000000001, 'gcpvhb8028b9', 1.0),
'46d479d0-c2c8-4893-bf40-41721a7be205': ('IslandGardens', ['Island Gardens'], 'c2c8', 51.487099999999998, -0.0101, 'gcpuzubwhehf', 2.0),
'5e7c3e79-ec13-4840-a499-5c1cef1a6b2f': ('CharingCross', ['Charing Cross'], 'ec13', 51.508000000000003, -0.12470000000000001, 'gcpvj0tpy70u', 1.0),
'05da5b28-c40d-4860-92fb-6edf97f64650': ('Greenwich', ['Greenwich'], 'c40d', 51.478099999999998, -0.0149, 'gcpuzem1sv3g', 2.5),
'925a7fd8-5dc1-4620-a272-c4697d56172c': ('DevonsRoad', ['Devons Road'], '5dc1', 51.522300000000001, -0.017299999999999999, 'gcpvpe77huu4', 2.0),
'465b5b31-1237-4007-929b-5ffc63e19f79': ('NewburyPark', ['Newbury Park'], '1237', 51.575600000000001, 0.089899999999999994, 'u10j6j13wudm', 4.0),
'0767b416-a3da-4c1c-85bf-a3e59a2657d6': ('Chorleywood', ['Chorleywood'], 'a3da', 51.654299999999999, -0.51829999999999998, 'gcptu5qs4ert', 8.0),
'a67bc448-b939-4d1a-9d9a-840c17b6a496': ('CanaryWharf', ['Canary Wharf'], 'b939', 51.505099999999999, -0.020899999999999998, 'gcpvp80ybyxb', 2.0),
'a4840c38-6656-41f9-92ff-2e0192e89cbe': ('OxfordCircus', ['Oxford Circus'], '6656', 51.515000000000001, -0.14149999999999999, 'gcpvhf0bwu1b', 1.0),
'410e92c7-e6ac-4739-90dd-0834bb8b6e83': ('Queensbury', ['Queensbury'], 'e6ac', 51.594200000000001, -0.28610000000000002, 'gcpv92rtvrur', 4.0),
'c2ee297b-72ee-4842-9660-fd2ed00d93a2': ('Moorgate', ['Moorgate'], '72ee', 51.518599999999999, -0.088599999999999998, 'gcpvjfxmx7p5', 1.0),
'ff3b1cfb-1ea1-48af-a66d-8f1787f38598': ('Wanstead', ['Wanstead'], '1ea1', 51.577500000000001, 0.028799999999999999, 'u10j2tkuxnfg', 4.0),
'dd74b4e2-6e32-48f9-91cf-aa23e5551688': ('RuislipManor', ['Ruislip Manor'], '6e32', 51.5732, -0.41249999999999998, 'gcptqseesj39', 6.0),
'174ca94d-74d1-4606-9836-69aa275eb76e': ('Pinner', ['Pinner'], '74d1', 51.592599999999997, -0.3805, 'gcptx24uttww', 5.0),
'9aefa543-e3ff-4606-822b-b18794f4835d': ('BostonManor', ['Boston Manor'], 'e3ff', 51.495600000000003, -0.32500000000000001, 'gcpubw7rdgw7', 4.0),
'21ca86c8-b3a9-4040-b088-a2bba3e17c7d': ('TowerHill', ['Tower Hill'], 'b3a9', 51.509799999999998, -0.076600000000000001, 'gcpvn304r4dv', 1.0),
'2047f7d0-4ead-491d-93c9-c747a639d888': ('WestRuislip', ['West Ruislip'], '4ead', 51.569600000000001, -0.43759999999999999, 'gcptq5cqenjy', 6.0),
'7f63805a-0964-46ad-a282-e7dc199b8685': ('EustonSquare', ['Euston Square'], '0964', 51.526000000000003, -0.13589999999999999, 'gcpvhuj09q9d', 1.0),
'e427de05-8e0e-4bb6-99f7-81835be4bee7': ('StPauls', ["St. Paul's"], '8e0e', 51.514600000000002, -0.097299999999999998, 'gcpvjccnk9ry', 1.0),
'97c4c33d-b5af-421b-aeef-b6ff98e3f1ca': ('Hainault', ['Hainault'], 'b5af', 51.603000000000002, 0.093299999999999994, 'u10jd45cq03t', 5.0),
'c8e337d3-3b61-4a7f-b973-686df3f7502d': ('BurntOak', ['Burnt Oak'], '3b61', 51.602800000000002, -0.2641, 'gcpv9fp8n50u', 4.0),
'497d910f-1ed1-4e9d-9e45-6a71f126fccc': ('Arsenal', ['Arsenal'], '1ed1', 51.558599999999998, -0.10589999999999999, 'gcpvm9fys7eh', 2.0),
'b5ef4219-5d51-49d4-8bd5-2fa1fb9e867c': ('Pimlico', ['Pimlico'], '5d51', 51.4893, -0.13339999999999999, 'gcpuuvqfgt1y', 1.0),
'ccffd31a-972c-40ba-903a-0e4ce0d971c6': ('CaledonianRoad', ['Caledonian Road'], '972c', 51.548099999999998, -0.1188, 'gcpvm213ry23', 2.0),
'b6f356a7-c2a4-4974-8c96-9519cbb7b839': ('HounslowWest', ['Hounslow West'], 'c2a4', 51.473399999999998, -0.38550000000000001, 'gcpsz4rq31nr', 5.0),
'0faa4a5e-67b5-4e49-bd64-bdadcc8eb4b4': ('Knightsbridge', ['Knightsbridge'], '67b5', 51.5015, -0.16070000000000001, 'gcpuurdczrmq', 1.0),
'24fa220d-ca13-443d-9e73-8be49afd7db0': ('SudburyHill', ['Sudbury Hill'], 'ca13', 51.556899999999999, -0.33660000000000001, 'gcpv23duswub', 4.0),
'52433503-c65c-4089-82ff-4fd1d2780f84': ('Oval', ['Oval'], 'c65c', 51.481900000000003, -0.113, 'gcpuv7vxyctp', 2.0),
'8ca4bf88-8884-42eb-82b9-93c921dd3fc7': ('WembleyCentral', ['Wembley Central'], '8884', 51.551900000000003, -0.29630000000000001, 'gcpv328pxdwt', 4.0),
'bef7f260-c351-412c-bec0-b05d82367f88': ('Lewisham', ['Lewisham'], 'c351', 51.465699999999998, -0.014200000000000001, 'gcpuz9j9mjgd', 2.5),
'd168eb7a-13eb-477b-b482-c69c3b4f5dbf': ('HeathrowTerminal4', ['Heathrow Terminal 4'], '13eb', 51.459800000000001, -0.4476, 'gcpstzfp41vp', 6.0),
'78e36286-e750-49ee-927a-4e2bc3134788': ('LiverpoolStreet', ['Liverpool Street'], 'e750', 51.517800000000001, -0.082299999999999998, 'gcpvn4s0fmbx', 1.0),
'03af5d7e-72c4-413f-a260-52c1fe5d022c': ('TheydonBois', ['Theydon Bois'], '72c4', 51.671700000000001, 0.1033, 'u10jfqe1rp40', 6.0),
'a14eddb7-704e-4d4f-a0d6-5105d028f128': ('Rotherhithe', ['Rotherhithe'], '704e', 51.500999999999998, -0.052499999999999998, 'gcpuyz3z0djp', 2.0),
'1f3ac254-b5cd-4317-b777-64f2432e4c7a': ('MorningtonCrescent', ['Mornington Crescent'], 'b5cd', 51.534199999999998, -0.13869999999999999, 'gcpvhve02p1v', 2.0),
'28b2ee7c-7a48-44d2-b4a9-499effac5775': ('RuislipGardens', ['Ruislip Gardens'], '7a48', 51.560600000000001, -0.4103, 'gcptqdm4r26p', 5.0),
'e93bad5c-e2e2-45f2-940f-35583fc357dd': ('Hillingdon', ['Hillingdon'], 'e2e2', 51.553800000000003, -0.44990000000000002, 'gcptmc06ssf5', 6.0),
'e5c04fad-1dfc-46c3-8e27-74a8a6b1c63e': ('NorthwickPark', ['Northwick Park'], '1dfc', 51.578400000000002, -0.31840000000000002, 'gcpv2v81uwxu', 4.0),
'559326bd-543e-4933-9241-990094b932cb': ('BaronsCourt', ['Barons Court'], '543e', 51.490499999999997, -0.21390000000000001, 'gcpugjs1zubs', 2.0),
'8b275b50-6675-44dc-b9f8-154730faac04': ('Mudchute', ['Mudchute'], '6675', 51.490200000000002, -0.014500000000000001, 'gcpuztt2nhup', 2.0),
'e4f72828-a29a-4fdd-9bea-66fecdcfa250': ('FulhamBroadway', ['Fulham Broadway'], 'a29a', 51.480400000000003, -0.19500000000000001, 'gcpugedp04j7', 2.0),
'15351ff7-4b56-4882-a4d0-c4fd315ac9ef': ('NewCross', ['New Cross'], '4b56', 51.476700000000001, -0.0327, 'gcpuz701w03h', 2.0),
'2d04e04c-9ce2-4384-80e3-06243cb8d90b': ('Hornchurch', ['Hornchurch'], '9ce2', 51.553899999999999, 0.21840000000000001, 'u10jkcp51pdb', 6.0),
'29105288-d523-40cf-99a1-1ef83751460f': ('WestIndiaQuay', ['West India Quay'], 'd523', 51.506999999999998, -0.020299999999999999, 'gcpvp894nbxn', 2.0),
'2f0faf9b-7406-4a32-8d18-927b7af16497': ('RaynersLane', ['Rayners Lane'], '7406', 51.575299999999999, -0.37140000000000001, 'gcptrscxcub0', 5.0),
'3e8ccac4-231c-4715-90fc-2025b412d64f': ('Kenton', ['Kenton'], '231c', 51.581600000000002, -0.31619999999999998, 'gcpv2y1u850j', 4.0),
'767e63fc-be79-449a-a461-670f3888eb56': ('NorthWembley', ['North Wembley'], 'be79', 51.562100000000001, -0.3034, 'gcpv34e5420e', 4.0),
'e0b9f30a-2875-4559-a814-8d2297a2266d': ('ElversonRoad', ['Elverson Road'], '2875', 51.469299999999997, -0.017399999999999999, 'gcpuz9eq6sct', 2.5),
'5177aa0d-3b92-4a1d-8356-3acfd80189be': ('Highgate', ['Highgate'], '3b92', 51.5777, -0.14580000000000001, 'gcpvktmvfs5s', 3.0),
'1629ea4a-7ee7-4f0c-bb70-231721b53632': ('Holborn', ['Holborn'], '7ee7', 51.517400000000002, -0.12, 'gcpvj62weg3t', 1.0),
'2e6585ea-25a5-4255-b312-fb33a99de249': ('HarrowWealdston', ['Harrow & Wealdston'], '25a5', 51.592500000000001, -0.33510000000000001, 'gcpv825upe56', 5.0),
'09bbbf1c-4b44-4300-9b7e-7da31c621afc': ('RoyalOak', ['Royal Oak'], '4b44', 51.518999999999998, -0.188, 'gcpv5dxpgm9j', 2.0),
'9ac4ad96-5502-4948-a754-8558c1a59201': ('HendonCentral', ['Hendon Central'], '5502', 51.582900000000002, -0.22589999999999999, 'gcpv6y7s0jsb', 3.5),
'9bd73108-ad7c-4b42-93c5-8a9d8cdd0acc': ('UptonPark', ['Upton Park'], 'ad7c', 51.535200000000003, 0.034299999999999997, 'u10j0v8yr1xf', 3.0),
'30d86bee-6a66-4f65-9b47-dd2a21e5a4f8': ('Blackfriars', ['Blackfriars'], '6a66', 51.512, -0.1031, 'gcpvj9kzjsg1', 1.0),
'b26ff8de-7cfe-4143-8413-1d9459ef877f': ('EastIndia', ['East India'], '7cfe', 51.509300000000003, -0.0020999999999999999, 'gcpvpbyrrj6n', 2.5),
'fa25d9e4-e9c9-44da-ae24-ea2d2235d9f6': ('SouthRuislip', ['South Ruislip'], 'e9c9', 51.556899999999999, -0.39879999999999999, 'gcptqctseqc0', 5.0),
'7cb122f8-02b7-4ec6-8021-85a4ae14639e': ('SouthWoodford', ['South Woodford'], '02b7', 51.591700000000003, 0.0275, 'u10j2xup2vcq', 4.0),
'6e66b0ff-cfc1-42b3-8029-3ecad13b44ae': ('StamfordBrook', ['Stamford Brook'], 'cfc1', 51.494999999999997, -0.24590000000000001, 'gcpufqkun5g8', 2.0),
'43b5a37c-7e3b-4b49-ac58-2f33191cdf4e': ('Kilburn', ['Kilburn'], '7e3b', 51.5471, -0.20469999999999999, 'gcpv5rfgw5us', 2.0),
'2b655c4f-fbfb-4005-be97-38962e01a3ed': ('KensalGreen', ['Kensal Green'], 'fbfb', 51.5304, -0.22500000000000001, 'gcpv4uu4m4gc', 2.0),
'fa04941a-9c43-4392-b0a4-43e75b3a7e82': ('ClaphamCommon', ['Clapham Common'], '9c43', 51.461799999999997, -0.1384, 'gcpuub74xnc6', 2.0),
'c4806aae-06ae-4c0e-bbe6-f57c358976da': ('Alperton', ['Alperton'], '06ae', 51.540700000000001, -0.29970000000000002, 'gcpv1nty26z7', 4.0),
'2945184c-9a8d-4707-88a3-ec1bd35999b0': ('CanningTown', ['Canning Town'], '9a8d', 51.514699999999998, 0.0082000000000000007, 'u10j01vyzh47', 3.0),
'480e2906-9176-4d6a-b14b-3a966a5a4f3f': ('BethnalGreen', ['Bethnal Green'], '9176', 51.527000000000001, -0.054899999999999997, 'gcpvnu0n88zx', 2.0),
'f0a7d304-3e3f-46c8-978a-532bb28d3528': ('KilburnPark', ['Kilburn Park'], '3e3f', 51.5351, -0.19389999999999999, 'gcpv5tdv9yct', 2.0),
'8450af6a-dbf3-4749-811b-6676552bed4b': ('HighburyIslington', ['Highbury & Islington'], 'dbf3', 51.545999999999999, -0.104, 'gcpvjxsm0wgf', 2.0),
'5f45b71a-7b9a-449f-bdee-cd6262795f51': ('GoldersGreen', ['Golders Green'], '7b9a', 51.572400000000002, -0.19409999999999999, 'gcpv7s6xj0ht', 3.0),
'cd0ae317-6fa5-4640-8971-50661b49c32f': ('TootingBec', ['Tooting Bec'], '6fa5', 51.436100000000003, -0.1598, 'gcpus7esurur', 3.0),
'38d6323c-663a-4f54-a58b-71cff0611d3d': ('WillesdenJunction', ['Willesden Junction'], '663a', 51.532600000000002, -0.24779999999999999, 'gcpv4m5x1ufu', 3.0),
'0f98d1ce-4217-4f49-9e03-8402c32d2ff1': ('AldgateEast', ['Aldgate East'], '4217', 51.5154, -0.072599999999999998, 'gcpvn654uq9d', 1.0),
'6bf824ad-8d24-4258-9e8c-398b85dfb3c8': ('CamdenTown', ['Camden Town'], '8d24', 51.539200000000001, -0.1426, 'gcpvhy2jt5jx', 2.0),
'61596bb4-4814-44a5-8607-a66b4cc14bd1': ('Beckton', ['Beckton'], '4814', 51.514800000000001, 0.0613, 'u10j13uxkqsx', 3.0),
'aaf9fad0-0d23-4c27-927d-79a20d43d505': ('Victoria', ['Victoria'], '0d23', 51.496499999999997, -0.1447, 'gcpuuwwsu5rg', 1.0),
'36aa3d82-e972-4f11-81cc-b94415bf849f': ('HounslowEast', ['Hounslow East'], 'e972', 51.473300000000002, -0.35639999999999999, 'gcpszfkmxq8f', 4.0),
'2b13ebcf-2181-451d-b4b0-e53c03ae77f1': ('Farringdon', ['Farringdon'], '2181', 51.520299999999999, -0.1053, 'gcpvjdgrd248', 1.0),
'13a9b335-fdb9-44d3-bd3d-09fc64504063': ('Watford', ['Watford'], 'fdb9', 51.657299999999999, -0.41770000000000002, 'gcpty7zvdfsm', 8.0),
'79c291cf-6caa-469a-9b65-a6f2c8e3075c': ('WillesdenGreen', ['Willesden Green'], '6caa', 51.549199999999999, -0.2215, 'gcpv6bnxyeme', 2.5),
'0b7a7fd5-81c8-4f40-b072-ed5bd74eed46': ('ChanceryLane', ['Chancery Lane'], '81c8', 51.518500000000003, -0.1111, 'gcpvj6xj50mz', 1.0),
'd0c57e5c-e1b4-4cee-82bc-35751a8b4625': ('RodingValley', ['Roding Valley'], 'e1b4', 51.617100000000001, 0.043900000000000002, 'u10j8uxgquy5', 5.0),
'a7c40a0b-bb1a-46d9-8442-e7f45b8f7743': ('Harlesden', ['Harlesden'], 'bb1a', 51.536200000000001, -0.25750000000000001, 'gcpv4jukpgfp', 3.0),
'9a89a50c-34ad-498a-bc81-598d58cd0d1f': ('WestSilvertown', ['West Silvertown'], '34ad', 51.502699999999997, 0.022599999999999999, 'u10hbxb2yx5u', 3.0),
'4f6eda25-7e30-45ad-80f2-9db2453d262c': ('StJamessPark', ["St. James's Park"], '7e30', 51.499400000000001, -0.13350000000000001, 'gcpuuznv9pzp', 1.0),
'd60522bc-d2a5-4d89-8b52-0d236844c6ec': ('LatimerRoad', ['Latimer Road'], 'd2a5', 51.5139, -0.2172, 'gcpv51cf4ygz', 2.0),
'320689c7-450e-44d5-a799-0022492c1944': ('Blackwall', ['Blackwall'], '450e', 51.507899999999999, -0.0066, 'gcpvpbepq0qx', 2.0),
'3dfe2051-c378-4afa-9826-bc94298eb01a': ('RegentsPark', ["Regent's Park"], 'c378', 51.523400000000002, -0.14660000000000001, 'gcpvhet1xczh', 1.0),
'5c366dc4-6f3a-4ca2-8175-db52f2e5ad23': ('LancasterGate', ['Lancaster Gate'], '6f3a', 51.511899999999997, -0.17560000000000001, 'gcpvh12ns4ph', 1.0),
'37b045b7-e731-4056-87e8-ac5bf4e12c4a': ('Leytonstone', ['Leytonstone'], 'e731', 51.568300000000001, 0.0083000000000000001, 'u10j25wp17ug', 3.5),
'4a018a17-22e8-4539-b41d-910e4caf4b50': ('LeicesterSquare', ['Leicester Square'], '22e8', 51.511299999999999, -0.12809999999999999, 'gcpvj16ep439', 1.0),
'8046cf9c-5d92-47ee-b034-32205ba494c6': ('Vauxhall', ['Vauxhall'], '5d92', 51.4861, -0.12529999999999999, 'gcpuvhub229t', 1.5),
'07b7cfc5-10dd-4ff3-b46b-87a42a1d06b7': ('Chigwell', ['Chigwell'], '10dd', 51.617699999999999, 0.075499999999999998, 'u10j9swyzk8y', 5.0),
'32b3c7d2-f9a2-4d46-ba4f-1c5efb4ddeb3': ('Gunnersbury', ['Gunnersbury'], 'f9a2', 51.491500000000002, -0.27539999999999998, 'gcpuctxrwxqc', 3.0),
'251c1166-6118-4fa3-9426-3d7b32f800d8': ('Upminster', ['Upminster'], '6118', 51.558999999999997, 0.251, 'u10jmdnbbwpp', 6.0),
'8ec708cb-60c6-4dfa-a66c-2d7220dc7d83': ('Embankment', ['Embankment'], '60c6', 51.507399999999997, -0.12230000000000001, 'gcpvj0wuq5q9', 1.0),
'249dc44e-c5e8-4803-818a-ea54725c2a9d': ('StepneyGreen', ['Stepney Green'], 'c5e8', 51.522100000000002, -0.047, 'gcpvngmcbzd3', 2.0),
'a8e021f5-0edf-408e-8e48-18fbeed57019': ('Chesham', ['Chesham'], '0edf', 51.705199999999998, -0.61099999999999999, 'gcpw4hehdev7', 10.0),
'9df5a834-3827-4a46-9fb7-c26cce6837db': ('WembleyPark', ['Wembley Park'], '3827', 51.563499999999998, -0.27950000000000003, 'gcpv3du7pjjd', 4.0),
'093bea1f-2abe-41bb-b0ca-1716079e6104': ('PontoonDock', ['Pontoon Dock'], '2abe', 51.502099999999999, 0.031899999999999998, 'u10hbxxjrr63', 3.0),
'ea843760-eb68-4308-9c84-7d5546516ce6': ('NorthGreenwich', ['North Greenwich'], 'eb68', 51.500500000000002, 0.0038999999999999998, 'u10hbp6u4vh0', 2.5),
'130b0d90-588d-4aad-b3a2-9d98efaf99cd': ('HarrowontheHill', ['Harrow- on-the-Hill'], '588d', 51.579300000000003, -0.33660000000000001, 'gcpv2mdzhwkv', 5.0),
'34d5c090-9d83-4f39-bc46-5e4538ae2218': ('Euston', ['Euston'], '9d83', 51.528199999999998, -0.13370000000000001, 'gcpvhuqts8dj', 1.0),
'ef5e3a08-aee0-4b26-9686-c699bc8d91e6': ('CoventGarden', ['Covent Garden'], 'aee0', 51.512900000000002, -0.12429999999999999, 'gcpvj1tkrse1', 1.0),
'af6b94cd-7efb-40a1-853e-e5c90607452d': ('Uxbridge', ['Uxbridge'], '7efb', 51.546300000000002, -0.47860000000000003, 'gcptjpeqzycb', 6.0),
'3c1ecf94-d8e2-4e5f-8f54-645e836b3294': ('Barbican', ['Barbican'], 'd8e2', 51.520400000000002, -0.097900000000000001, 'gcpvjg08nf8m', 1.0),
'315f9ade-a54c-458c-a85f-f9962a5c6dc7': ('EalingBroadway', ['Ealing Broadway'], 'a54c', 51.5152, -0.30170000000000002, 'gcpv14h3cck1', 3.0),
'95b2c24a-fdf9-4cb0-b04e-4989909ffb72': ('Barking', ['Barking'], 'fdf9', 51.5396, 0.081000000000000003, 'u10j1y6zzmvc', 4.0),
'78b8c3de-a903-484f-bf2c-6ce07bbdd28e': ('EdgwareRoadB', ['Edgware Road'], 'a903', 51.5199, -0.16789999999999999, 'gcpvh4vtptkw', 1.0),
'58054f33-5750-4b25-9706-5028b2e96d0b': ('TurnhamGreen', ['Turnham Green'], '5750', 51.495100000000001, -0.25469999999999998, 'gcpufnqs9n3u', 2.5),
'64981d6a-1322-49b2-aa95-a05d6b43423e': ('Bank', ['Bank'], '1322', 51.513300000000001, -0.088599999999999998, 'gcpvjcxqxrp5', 1.0),
'1e3d8c1e-1207-4030-9052-54ef0351dc62': ('NorthActon', ['North Acton'], '1207', 51.523699999999998, -0.25969999999999999, 'gcpv45dgk9cd', 2.5),
'2599c24c-28ac-460b-9c99-ccfcd68bff37': ('CrossharbourLondonArena', ['Crossharbour & London Arena'], '28ac', 51.495699999999999, -0.0144, 'gcpuzwt80qnx', 2.0),
'887a0d89-9d8b-4023-a901-86792db421ff': ('FinchleyCentral', ['Finchley Central'], '9d8b', 51.601199999999999, -0.19320000000000001, 'gcpve9eqf0ge', 4.0),
'742e97bc-26b6-4294-b111-b1301e8760d8': ('Whitechapel', ['Whitechapel'], '26b6', 51.519399999999997, -0.061199999999999997, 'gcpvndg6mbjz', 2.0),
'80031d6b-6327-411f-b98a-0f8124c25d86': ('KewGardens', ['Kew Gardens'], '6327', 51.476999999999997, -0.28499999999999998, 'gcpuce07r029', 3.5),
'a6d5538c-8cbf-44b1-b182-021be3c5c240': ('Greenford', ['Greenford'], '8cbf', 51.542299999999997, -0.34560000000000002, 'gcpv0nurdvjg', 4.0),
'b8f9c445-c7c7-4541-a396-a3d66ea350c4': ('QueensPark', ['Queens Park'], 'c7c7', 51.534100000000002, -0.20469999999999999, 'gcpv5m6zyhs8', 2.0),
'57d3a7a7-5e23-447e-bb6a-cbbd12a9d635': ('EastFinchley', ['East Finchley'], '5e23', 51.587400000000002, -0.16500000000000001, 'gcpvkppy7hr5', 3.0),
'f0a2bae9-93f2-4acc-af91-602c879cd8fd': ('ActonTown', ['Acton Town'], '93f2', 51.502800000000001, -0.28010000000000002, 'gcpucxu191y2', 3.0),
'5af0f8f0-6783-4c15-a292-45f151ede2d2': ('Woodford', ['Woodford'], '6783', 51.606999999999999, 0.034099999999999998, 'u10j8fbbdd4f', 4.0),
'4335b1ca-4202-4c76-ba79-379bbbb9e9f4': ('EalingCommon', ['Ealing Common'], '4202', 51.510100000000001, -0.28820000000000001, 'gcpv13nhh6we', 3.0),
'654971d2-8531-49f5-9e0c-8ad88beee31b': ('ChalfontLatimer', ['Chalfont & Latimer'], '8531', 51.667900000000003, -0.56100000000000005, 'gcptgjz7rf8h', 9.0),
'2fd3ee8f-d2df-4e94-86da-6d1cd67f1d4c': ('ManorHouse', ['Manor House'], 'd2df', 51.571199999999997, -0.095799999999999996, 'gcpvmu60p8qq', 2.5),
'ff596b51-d851-4cdb-b6ff-3f143a90c862': ('Kennington', ['Kennington'], 'd851', 51.488399999999999, -0.1053, 'gcpuvt5mdm6t', 2.0),
'0d3810fe-24fb-40d0-92c5-804a15144341': ('GantsHill', ['Gants Hill'], '24fb', 51.576500000000003, 0.066299999999999998, 'u10j3t0qbuky', 4.0),
'01f067eb-fe23-44fb-b697-cb45fb258e71': ('Wapping', ['Wapping'], 'fe23', 51.504300000000001, -0.055800000000000002, 'gcpvn8p67c2s', 2.0),
'ec56b880-1907-449f-b9d9-3cb93a2d0b70': ('Aldgate', ['Aldgate'], '1907', 51.514299999999999, -0.075499999999999998, 'gcpvn3ch89x1', 1.0),
'c0eed10a-cfc3-417d-b06d-435f53af2d41': ('ShepherdsBushH', ["Shepherd's Bush"], 'cfc3', 51.505800000000001, -0.22650000000000001, 'gcpv4b7541j2', 2.0),
'ed39a802-cf23-42a2-8006-cf2ab8a54048': ('HighStreetKensington', ['High Street Kensington'], 'cf23', 51.500900000000001, -0.1925, 'gcpugx7y6rte', 1.0),
'873fa51e-dbc3-4407-9249-ea14e11f6f3e': ('FinsburyPark', ['Finsbury Park'], 'dbc3', 51.5642, -0.1065, 'gcpvmdfrnref', 2.0),
'9021de2d-4cfd-48b5-98c1-30e002cc8f3a': ('SouthQuay', ['South Quay'], '4cfd', 51.500700000000002, -0.019099999999999999, 'gcpuzx6j6fq1', 2.0),
'c95d8299-8911-4a67-af90-f945b8d990bb': ('Temple', ['Temple'], '8911', 51.511099999999999, -0.11409999999999999, 'gcpvj3kcvm0b', 1.0),
'b8db5bac-3ac1-4bec-beda-69af98ebd222': ('Morden', ['Morden'], '3ac1', 51.402200000000001, -0.1948, 'gcpu7t6psbdd', 4.0),
'8e9e65b9-127f-4442-ac1f-7d822a4462d9': ('SouthKenton', ['South Kenton'], '127f', 51.570099999999996, -0.30809999999999998, 'gcpv2up9serv', 4.0),
'7e9b3da7-2e31-4fdf-90a0-f98cd7b0c8ed': ('HighBarnet', ['High Barnet'], '2e31', 51.650300000000001, -0.1943, 'gcpvgddsb3z2', 5.0),
'f0395015-50a0-4933-bb40-6bb4d56ddffb': ('Plaistow', ['Plaistow'], '50a0', 51.531300000000002, 0.0172, 'u10j0kux8f94', 3.0),
'8823ac5f-5680-435b-a049-5ee40e86817e': ('AllSaints', ['All Saints'], '5680', 51.5107, -0.012999999999999999, 'gcpvp9nx946q', 2.0),
'ad4d2a18-aca2-4020-a663-80927dd2f803': ('MoorPark', ['Moor Park'], 'aca2', 51.629399999999997, -0.432, 'gcptwnv7j9tr', 6.5),
'5c249c9c-696d-4cbd-a8a3-c87f7029566e': ('LondonBridge', ['London Bridge'], '696d', 51.505200000000002, -0.086400000000000005, 'gcpvn01pd9pm', 1.0),
'ec42cf0b-0a85-4fb6-84fe-10a41a55ef9d': ('Cockfosters', ['Cockfosters'], '0a85', 51.651699999999998, -0.14960000000000001, 'gcpvudghfnf0', 5.0),
'6ac25ebb-40be-4de4-bb86-c7db73ae823d': ('MarbleArch', ['Marble Arch'], '40be', 51.513599999999997, -0.15859999999999999, 'gcpvh3u82r53', 1.0),
'e6c6ede6-efc2-4040-9876-932552ce8646': ('LambethNorth', ['Lambeth North'], 'efc2', 51.499099999999999, -0.1115, 'gcpuvrnu1b59', 1.0),
'2669ea4a-b267-4a8b-b2fe-25276b670c53': ('WestHarrow', ['West Harrow'], 'b267', 51.579500000000003, -0.3533, 'gcptrvy8re8q', 5.0),
'4225000a-a83c-4db4-a906-fc32e828624b': ('Limehouse', ['Limehouse'], 'a83c', 51.512300000000003, -0.039600000000000003, 'gcpvp1e0vk8n', 2.0),
'94660c36-6039-4805-a6cb-9221905ce759': ('Edgware', ['Edgware'], '6039', 51.613700000000001, -0.27500000000000002, 'gcpv9ezz8511', 5.0),
'c4968339-846a-4814-b66f-6168c7f2398d': ('NottingHillGate', ['Notting Hill Gate'], '846a', 51.509399999999999, -0.19670000000000001, 'gcpv590b081g', 1.5),
'e5ba2584-8519-4759-931d-056d31844a44': ('WhiteCity', ['White City'], '8519', 51.512, -0.22389999999999999, 'gcpv4ckznub1', 2.0),
'364a49a3-65ad-4649-b9a0-af01a6b98920': ('Leyton', ['Leyton'], '65ad', 51.556600000000003, -0.0053, 'gcpvrcs4uwb4', 3.0),
'a59bf99e-0fd1-4823-bc06-89a1bb21101a': ('Loughton', ['Loughton'], '0fd1', 51.641199999999998, 0.055800000000000002, 'u10jc2bxu1rm', 6.0),
'ee36d2e5-cdb0-4667-b960-d5d395d1c86a': ('SwissCottage', ['Swiss Cottage'], 'cdb0', 51.543199999999999, -0.17380000000000001, 'gcpvhp1kyhvk', 2.0),
'4ae3072a-10a8-4400-9ed2-127b6b5b976a': ('Amersham', ['Amersham'], '10a8', 51.6736, -0.60699999999999998, 'gcptfnvuxc5y', 10.0),
'f30b89ed-552a-40c9-afb5-ce64fa41ff2d': ('Borough', ['Borough'], '552a', 51.501100000000001, -0.094299999999999995, 'gcpuvz7rdsuu', 1.0),
'a0665b47-6400-476e-b569-ec68a6a7fd76': ('TurnpikeLane', ['Turnpike Lane'], '6400', 51.590400000000002, -0.1028, 'gcpvmxtpuedg', 3.0),
'88392a34-b5cb-4c76-9e9d-7cfaf4fb944e': ('SouthKensington', ['South Kensington'], 'b5cb', 51.494100000000003, -0.17380000000000001, 'gcpuun1qy5vr', 1.0),
'68f6f208-ef36-4859-9a85-6440e04d4c63': ('CuttySark', ['Cutty Sark'], 'ef36', 51.482700000000001, -0.0095999999999999992, 'gcpuzu1h2qce', 2.5),
'815ea4ed-f5ee-4f5c-b3ab-3731a03eeed5': ('Monument', ['Monument'], 'f5ee', 51.510800000000003, -0.086300000000000002, 'gcpvn130jj44', 1.0),
'5064cba1-ec2e-416f-9153-c0273385d528': ('Eastcote', ['Eastcote'], 'ec2e', 51.576500000000003, -0.39700000000000002, 'gcptqvnyvhrq', 5.0),
'b95f74c7-0ccc-4a04-ad46-ae255ed14689': ('WestHam', ['West Ham'], '0ccc', 51.528700000000001, 0.0055999999999999999, 'u10j0hs06mzs', 3.0),
'2ea2857a-1d56-4fe9-a145-d6031fd660ab': ('RussellSquare', ['Russell Square'], '1d56', 51.523000000000003, -0.1244, 'gcpvj5mrjr2d', 1.0),
'1196b1e2-b958-4273-8c77-188a8ba4574c': ('SudburyTown', ['Sudbury Town'], 'b958', 51.550699999999999, -0.31559999999999999, 'gcpv2bd0tfzx', 4.0),
'3ac40716-c714-4851-8204-5960863c4205': ('WalthamstowCentral', ['Walthamstow Central'], 'c714', 51.582999999999998, -0.0195, 'gcpvrw3uc85s', 3.0),
'84826d24-3c8b-4fc3-9ecf-ec01c9afb2e2': ('WoodGreen', ['Wood Green'], '3c8b', 51.597499999999997, -0.10970000000000001, 'gcpvt9015z1u', 3.0),
'018de108-a518-4d48-acb8-7bc2085dc28c': ('TootingBroadway', ['Tooting Broadway'], 'a518', 51.427500000000002, -0.16800000000000001, 'gcpus4jdvkf4', 3.0),
'25902e50-70bd-4fa6-964e-bf0154a396cd': ('HollandPark', ['Holland Park'], '70bd', 51.5075, -0.20599999999999999, 'gcpv529uzvgm', 2.0),
'9e0c5708-2ac3-41d5-baab-56d5602b76b1': ('BromleyByBow', ['Bromley-By-Bow'], '2ac3', 51.524799999999999, -0.011900000000000001, 'gcpvpez3dwys', 2.5),
'c9cd51e3-5fff-4ce7-9502-907fe8a28c1e': ('TufnellPark', ['Tufnell Park'], '5fff', 51.556699999999999, -0.13739999999999999, 'gcpvkcegw37n', 2.0),
'7bc2b747-1f8c-473f-a915-f1a476d1a1b3': ('GrangeHill', ['Grange Hill'], '1f8c', 51.613199999999999, 0.092299999999999993, 'u10jd5ghwtzf', 5.0),
'e1d3e10a-16cd-4e75-8d6e-d9d299fba0bd': ('SouthEaling', ['South Ealing'], '16cd', 51.501100000000001, -0.30719999999999997, 'gcpucp2r9szh', 3.0),
'7dbfb563-2a49-4ede-985d-5df9e4b91e93': ('KingsCrossStPancras', ["King's Cross St. Pancras"], '2a49', 51.530799999999999, -0.12379999999999999, 'gcpvjhvuem25', 1.0),
'77ebcb72-e4e5-4459-94e6-e2364e29f8b9': ('PuddingMillLane', ['Pudding Mill Lane'], 'e4e5', 51.534300000000002, -0.013899999999999999, 'gcpvpttch1g3', 2.5),
'0d8038c4-2a18-42eb-95e9-b3eba2fbd129': ('SloaneSquare', ['Sloane Square'], '2a18', 51.492400000000004, -0.1565, 'gcpuumyhcr14', 1.0),
'9713cb92-a9d6-4eb8-9bf9-477860fd02ab': ('BondStreet', ['Bond Street'], 'a9d6', 51.514200000000002, -0.14940000000000001, 'gcpvh9g5ywzk', 1.0),
'f2afedc3-d882-413d-9483-f13605def449': ('Upney', ['Upney'], 'd882', 51.538499999999999, 0.1014, 'u10j4q3cdb9r', 4.0),
'0999211f-c7f4-49ec-83ba-b249aefde30e': ('NewCrossGate', ['New Cross Gate'], 'c7f4', 51.475700000000003, -0.0402, 'gcpuz4fexmbx', 2.0),
'8eccd70b-662c-474d-b91d-5c88c03286d9': ('UpminsterBridge', ['Upminster Bridge'], '662c', 51.558199999999999, 0.23430000000000001, 'u10jm3fs794d', 6.0),
'498adbf2-cab9-49d8-bff4-d35c8a0c9587': ('CustomHouse', ['Custom House'], 'cab9', 51.509500000000003, 0.0276, 'u10j09h0e4u0', 3.0),
'437b3160-3e03-4694-aa4d-9349e8c419fa': ('ChalkFarm', ['Chalk Farm'], '3e03', 51.5441, -0.15379999999999999, 'gcpvhx240hwm', 2.0),
'89c3fead-9427-4698-830a-df06f301690f': ('NorthEaling', ['North Ealing'], '9427', 51.517499999999998, -0.28870000000000001, 'gcpv16mz0y19', 3.0),
'7f6a1eba-5fd4-43fb-9d86-b60ef44f03d4': ('ElephantCastle', ['Elephant & Castle'], '5fd4', 51.494300000000003, -0.10009999999999999, 'gcpuvwr05920', 1.5),
'ccffda41-ad3f-4c1f-b19d-f3dfe244ee96': ('Westminster', ['Westminster'], 'ad3f', 51.500999999999998, -0.12540000000000001, 'gcpuvpkxjfnz', 1.0),
'1750f083-8e55-4cb0-aaeb-70b14bed0158': ('BoundsGreen', ['Bounds Green'], '8e55', 51.607100000000003, -0.12429999999999999, 'gcpvt4v3psen', 3.5),
'70ab528d-f449-49ff-a03c-33bb16bf7ab7': ('MansionHouse', ['Mansion House'], 'f449', 51.5122, -0.094, 'gcpvjce83dhd', 1.0),
'ac425ff0-4c85-4dc2-875b-980714c379b8': ('NorthwoodHills', ['Northwood Hills'], '4c85', 51.6004, -0.40920000000000001, 'gcptw9w40gwr', 6.0),
'39006e53-1040-4238-8886-b748bc899c23': ('Angel', ['Angel'], '1040', 51.532200000000003, -0.10580000000000001, 'gcpvjt4uydmf', 1.0)}
new_tube_stations={}
for k in tube_stations:
new_tube_stations[k]=(tube_stations[k][2],tube_stations[k][5],tube_stations[k][0],tube_stations[k][1],(tube_stations[k][3],tube_stations[k][4]),tube_stations[k][6])
write_to_transport(new_tube_stations,{})
organize_stations()
print "OK"
|
import sys
from awsscripts.sketches.sketches import Sketches
from awsscripts.emr.configurations import EmrConfigurations
from awsscripts.emr.emr import EMR
from awsscripts.sketches.emr import EmrSketchItem
def configure_parser(parser):
parser.add_argument('-n', '--name', metavar='NAME', type=str, help='cluster name')
parser.add_argument('-mi', '--master_instance', metavar='INSTANCE', default='m5.xlarge',
help='master node instance type')
parser.add_argument('-ci', '--core_instance', metavar='INSTANCE', default='m5.xlarge',
help='core nodes instance type')
parser.add_argument('-f', '--fleet', metavar='INSTANCE_FLEET',
help='instance fleet name (instance fleets must be defined in the sketch)')
parser.add_argument('-e', '--emr', metavar='LABEL', default='emr-6.4.0', help='EMR release label')
parser.add_argument('-p', '--protect', help='set cluster as TerminationProtected', action='store_true')
parser.add_argument('-c', '--count', metavar='N', default=1, type=int, help='core node instances count')
parser.add_argument('-mc', '--master_capacity', metavar='N', type=int,
help='master node target capacity (instance fleet units)')
parser.add_argument('-cc', '--core_capacity', metavar='N', type=int,
help='core node target capacity (instance fleet units)')
parser.add_argument('-ms', '--master_size', metavar='GB', default=100, type=int,
help='EBS volume size in GB (master node)')
parser.add_argument('-cs', '--core_size', metavar='GB', default=100, type=int,
help='EBS volume size in GB (core nodes)')
parser.add_argument('-S', '--spot', help='Use Spot core nodes', action='store_true')
parser.add_argument('-b', '--boot', metavar='NAME', type=str, nargs='*',
help='Bootstrap scripts (names as defined in the sketch).')
parser.add_argument('-A', '--applications', metavar='APP', nargs='*',
default=['Spark', 'JupyterHub', 'JupyterEnterpriseGateway', 'Hadoop', 'Livy'],
help='EMR applications (default: Spark,JupyterHub,JupyterEnterpriseGateway,Hadoop,Livy)')
def execute(args) -> None:
if not args.sketch:
print('Sketch not is set, and no default sketch exists')
sys.exit(1)
if args.fleet and (args.master_instance or args.core_instance):
print('Instance types are mutually exclusive with instance fleet')
sys.exit(1)
if (args.master_instance and not args.core_instance) or (args.core_instance and not args.master_instance):
print('Master and Core node instances must be defined')
sys.exit(1)
if args.core_instance and not args.count:
print('Core node instances count must be defined')
sys.exit(1)
if args.fleet and not args.master_capacity:
print('Master node target on_demand capacity must be defined')
sys.exit(1)
if args.fleet and not args.core_capacity:
print('Core node target on_demand capacity must be defined')
sys.exit(1)
sketches = Sketches()
emr_item = EmrSketchItem.from_content(sketches[args.sketch]["emr"])
configurations = EmrConfigurations()
if args.core_instance:
configurations.add_yarn_site(capacity_scheduler={
"instance_type": args.core_instance,
"node_count": args.count
})
configurations.add_spark(args.core_instance, args.count)
emr_item.put_configurations(configurations.configurations)
if args.fleet:
emr_item.put_instance_fleet(args.fleet, args.master_capacity, args.core_capacity, args.spot)
if args.master_instance:
emr_item.put_instance_groups(args.master_instance, args.core_instance, args.count, args.spot)
if args.emr:
emr_item.set_emr_label(args.emr)
if args.name:
emr_item.set_cluster_name(args.name)
if args.applications and not emr_item.contains('applications'):
emr_item.set_applications(args.applications)
if args.protect and not emr_item.contains('TerminationProtected'):
emr_item.set_protect(args.protect)
if args.master_size:
emr_item.set_master_size_gb(args.master_size)
if args.core_size:
emr_item.set_core_size_gb(args.core_size)
if args.verbose:
generated = emr_item.generate()
generated.pop('instance_fleets')
print(generated)
boot = []
if args.boot:
boot = [emr_item.get_bootstrap_script(b) for b in args.boot]
emr = EMR(args.verbose)
cluster_id = emr.start_cluster(
name=emr_item.get_cluster_name(),
log_uri=emr_item.get_log_uri(),
keep_alive=True,
protect=emr_item.get_protect(),
applications=emr_item.applications,
job_flow_role=emr_item.get_job_flow_role(),
service_role=emr_item.get_service_role(),
emr_label=emr_item.get_emr_label(),
instance_fleet=emr_item.get_instance_fleet(),
instance_fleet_configs=emr_item.get_instance_fleets(),
instance_groups=emr_item.get_instance_groups(),
ebs_master_volume_gb=emr_item.get_master_size_gb(),
ebs_core_volume_gb=emr_item.get_core_size_gb(),
steps=[{
'Name': 'Enable debugging',
'Args': ["state-pusher-script"]
}],
tags=emr_item.get_tags(),
security_groups=emr_item.get_security_groups(),
subnets=emr_item.get_subnets(),
configurations=emr_item.get_configurations(),
keyname=emr_item.get_keyname(),
bootstrap_scripts=boot
)
print(cluster_id)
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from mock import patch
from mock import NonCallableMagicMock, NonCallableMock
from resilient_lib import RequestsCommon
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from fn_shodan.util.helper import CONFIG_DATA_SECTION
PACKAGE_NAME = CONFIG_DATA_SECTION
FUNCTION_NAME = "shodan_lookup"
MOCK_VULNS = ["mock_vul_1", "mock_vul_2"]
MOCK_PORTS = ["1000", "1001"]
config_data = """[{0}]
shodan_apikey=ABCDEF12345
http_proxy=http://localhost:0000
http_proxys=https://localhost:0000""".format(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_shodan_lookup_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("shodan_lookup", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("shodan_lookup_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestShodanLookup:
""" Tests for the shodan_lookup function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
def test_fields_defined(self, circuits_app):
function_params = {
"shodan_lookuphost": "",
}
with pytest.raises(ValueError, match=r"'shodan_lookuphost' is mandatory"):
call_shodan_lookup_function(circuits_app, function_params)
def test_function_runs(self, circuits_app):
with patch("fn_shodan.components.funct_shodan_lookup.make_api_call") as mock_call:
app_configs = circuits_app.app.opts.get(PACKAGE_NAME)
rc = RequestsCommon(circuits_app.app.opts, app_configs)
mock_call.return_value = {
"vulns": MOCK_VULNS,
"ports": MOCK_PORTS
}
function_params = {
"shodan_lookuphost": "127.0.0.1",
}
mock_results = call_shodan_lookup_function(circuits_app, function_params)
mock_call.assert_called()
assert mock_results.get("content").get("vulns") == MOCK_VULNS
assert mock_results.get("content").get("ports") == MOCK_PORTS
|
# coding: utf-8
from setuptools import setup
import sys
# set __version__, __author__
exec(open("cpinsim/version.py", encoding="utf-8").read())
setup(
name = 'cpinsim',
version=__version__,
author=__author__,
author_email = 'bianca.stoecker@tu-dortmund.de',
description = 'CPINSim - Constrained Protein Interaction Networks Simulator\n CPINSim is a package for the simulation of constrained protein interaction networks. Beside simulation of complex formation in a cell there are methods for data preprocessing provided: Annotation of interactions and constraints with domains; A parser to provide the needed protein input format.',
long_description = open("README.rst").read(),
license = 'MIT',
url = 'https://github.com/BiancaStoecker/cpinsim',
packages = ['cpinsim'],
entry_points={
"console_scripts": ["cpinsim = cpinsim:main"]
},
install_requires=[
"networkx==1.11.0",
"bitarray==0.8.1",
"scipy"
],
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
) |
import io
import json
import time
from pathlib import Path, PosixPath
from urllib.parse import urlparse
import attr
import fastai
import requests
import torch
from fastai.vision import open_image, ImageDataBunch, imagenet_stats, create_cnn
from api import MODEL_PATH, DOWNLOAD_PATH
CLASES = ["cats", "dogs"]
MODEL = fastai.vision.models.resnet34
fastai.defaults.device = torch.device("cpu")
@attr.s
class Predictor:
model = attr.ib(default=MODEL)
custom_model_path: str = attr.ib(default=MODEL_PATH)
classes: list = attr.ib(default=CLASES)
predictor = attr.ib()
download_path: str = attr.ib(default=DOWNLOAD_PATH)
data = attr.ib(default=None)
data_file: str = attr.ib(default=None)
@predictor.default
def init_predictor(self):
data = ImageDataBunch.single_from_classes("", self.classes, size=224).normalize(
imagenet_stats
)
learn = create_cnn(data, self.model).load(self.custom_model_path)
return learn
@staticmethod
def get_by_url(url):
return open_image(io.BytesIO(requests.get(url).content))
@staticmethod
def get_by_path(path: PosixPath):
return open_image(path)
def get_data(self, uri: str, save=True) -> None:
path = urlparse(uri)
if path.scheme == "file":
self.data = self.get_by_path(Path(path.path))
else:
self.data = self.get_by_url(uri)
if save:
self.data.save(Path(self.download_path).joinpath(Path(path.path).name))
def classify(self, uri: str) -> dict:
stime = time.time()
try:
self.get_data(uri)
result, _, _ = self.predictor.predict(self.data)
except Exception:
result = "processing error"
return {"result": result, "source": uri, "processing_time": time.time() - stime}
predictor = Predictor()
def predict(**kwargs):
response = []
for item in kwargs["query"]:
response.append(predictor.classify(item["uri"]))
return json.dumps(response)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: include_tasks
short_description: dynamically include a task list.
description:
- Includes a file with a list of tasks to be executed in the current playbook.
version_added: "2.4"
options:
free-form:
description:
- This action allows you to specify the name of the file directly w/o any other options.
- Unlike M(import_tasks) this action will be affected by most keywords, including loops and conditionals.
notes:
- This is really not a module, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can.
'''
EXAMPLES = """
# include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include_tasks: stuff.yml
- debug:
msg: task10
# dyanmic include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include_tasks: "{{ hostvar }}.yml"
when: hostvar is defined
"""
RETURN = """
# this module does not return anything except tasks to execute
"""
|
import met2verif.util
import numpy as np
def get(filename):
file = open(filename, 'r')
header = file.readline()
file.close()
if len(header) > 5:
if header[0:5] == " Stnr":
return Kdvh(filename)
elif header[0:2] == "id":
return Text(filename)
else:
words = header.split(';')
if "lat" in words:
return Titan(filename)
else:
raise NotImplementedError
else:
raise NotImplementedError
class ObsInput(object):
def read(self, variable):
"""
Arguments:
variable (str): Variable to load
Returns:
times (np.array):
lats (np.array):
lons (np.array):
obs (np.array):
"""
raise NotImplementedError
class Text(ObsInput):
def __init__(self, filename):
self.filename = filename
def read(self, variable):
ifile = open(self.filename, 'r')
header = ifile.readline().replace('\n', '').split(';')
header = [i for i in header if i is not '']
Iid = header.index("id")
Idate = header.index("date")
Ihour = header.index("hour")
Ivar = header.index(variable)
if None in [Iid, Idate, Ihour, Ivar]:
print("The header in %s is invalid:" % ifilename)
print(header)
ifile.close()
return {}
times = list()
obs = list()
ids = list()
date2unixtime_map = dict() # Lookup table for converting date to unixtime
for line in ifile:
data = line.strip().split(';')
if len(data) > 1 and met2verif.util.is_number(data[0]):
try:
id = int(data[Iid])
date = int(data[Idate])
time = int(data[Ihour])
except Exception:
print("Could not read the following:")
print(data)
continue
raw = data[Ivar]
if(raw == '.'):
value = np.nan
elif(raw == 'x'):
value = np.nan
else:
value = float(data[Ivar])
if not np.isnan(value):
if date not in date2unixtime_map:
ut = met2verif.util.date_to_unixtime(date)
date2unixtime_map[date] = ut
else:
ut = date2unixtime_map[date]
times += [ut + time*3600]
ids += [id]
obs += [value]
data = {"times": np.array(times, int), "ids": np.array(ids, int), "obs": np.array(obs)}
return data
class Kdvh(ObsInput):
def __init__(self, filename, locations_file=None):
self.filename = filename
def read(self, variable):
ifile = open(self.filename, 'r')
header = ifile.readline().replace('\n', '').split(' ')
header = [i for i in header if i is not '']
Iid = header.index("Stnr")
Iyear = header.index("Year")
Imonth = header.index("Month")
Iday = header.index("Day")
Itime = header.index("Time(UTC)")
Imin = header.index("MIN") if "MIN" in header else None
Ivar = header.index(variable)
if None in [Iid, Iyear, Imonth, Iday, Itime, Ivar]:
print("The header in %s is invalid:" % ifilename)
print(header)
ifile.close()
return {}
times = list()
obs = list()
ids = list()
date2unixtime_map = dict() # Lookup table for converting date to unixtime
for line in ifile:
data = line.strip().split(' ')
data = [i for i in data if i is not '']
if len(data) > 1 and met2verif.util.is_number(data[0]):
try:
id = int(data[Iid])
date = int(data[Iyear])*10000 + int(data[Imonth])*100 + int(data[Iday])
time = int(data[Itime])
except Exception:
print("Could not read the following:")
print(data)
continue
min = 0
if Imin is not None:
min = float(data[Imin])
time = time + min / 60.0
raw = data[Ivar]
if(raw == '.'):
value = np.nan
elif(raw == 'x'):
value = np.nan
else:
value = float(data[Ivar])
if not np.isnan(value):
if date not in date2unixtime_map:
ut = met2verif.util.date_to_unixtime(date)
date2unixtime_map[date] = ut
else:
ut = date2unixtime_map[date]
times += [ut + time*3600]
ids += [id]
obs += [value]
data = {"times": np.array(times, int), "ids": np.array(ids, int), "obs": np.array(obs)}
return data
class Titan(ObsInput):
def __init__(self, filename):
self.filename = filename
def read(self, variable):
ifile = open(self.filename, 'r')
header = ifile.readline().replace('\n', '').split(' ')
header = [i for i in header if i is not '']
Ilat = header.index("lat")
Ilon = header.index("lon")
Ielev = header.index("elev")
Ivar = header.index("value")
if None in [Ilat, Ilon, Ielev, Ivar]:
print("The header in %s is invalid:" % ifilename)
print(header)
ifile.close()
return {}
times = list()
obs = list()
ids = list()
|
from os import environ
from subprocess import run, CalledProcessError
from json import loads, load, dump
from tqdm import tqdm
environ["GITHUB_AUTH_TOKEN"] = "token"
with open("../data/wallet_url_improved.json", "r") as file:
urls = load(file)
def format_criticality_command(target_url):
return f"criticality_score --repo {target_url} --format json"
def fetch_criticality(repo_name):
url = urls[repo_name].replace("/issues", "")
command = format_criticality_command(url)
try:
result = run(command, capture_output=True, shell=True, check=True)
try:
json_result = loads(result.stdout)
return json_result["criticality_score"]
except ValueError:
print(f"load result from {url} failed, invalid json")
print(result.stdout)
except CalledProcessError:
print(f"fetch score from {url} failed")
# result_list = []
# for url in tqdm(urls.values()):
# url = url.replace("/issues", "")
# command = format_criticality_command(url)
# try:
# result = run(command, capture_output=True, shell=True, check=True)
# try:
# json_result = loads(result.stdout)
# result_list.append(json_result)
# except ValueError:
# print(f"load result from {url} failed, invalid json")
# print(result.stdout)
#
# except CalledProcessError:
# print(f"fetch score from {url} failed")
#
# with open('../data/wallet_criticality.json', 'w') as file:
# dump(result_list, file)
|
from bs4 import BeautifulSoup
import requests
import sqlite3
# conn = sqlite3.connect('newshub.sqlite3')
# c = conn.cursor()
# c.execute("CREATE TABLE news1(title TEXT, link TEXT, images TEXT)")
website_url = "https://www.bbc.com/news/technology"
response = requests.get(website_url)
web_page = response.text
soup = BeautifulSoup(web_page, "html.parser")
article_texts = []
article_links = []
article_images = []
site_names = ['BBC News', 'NBC News', 'Aljazeera']
article_tags = soup.find_all(name='a', class_='gs-c-promo-heading')
for article in article_tags:
text = article.getText()
article_texts.append(text)
link = article.get("href")
article_links.append(link)
images = soup.find_all(name='img')
for image in images:
article_images.append(image['src'])
for i in range(10):
print(article_texts[i])
print("https://www.bbc.com"+article_links[i])
print(article_images[i])
print('\n')
# c.execute("INSERT INTO news1 VALUES(?, ?, ?)", (article_texts[i], "https://www.bbc.com"+article_links[i], article_images[i]))
# conn.commit()
# conn.close()
# c.close() |
# This file contains code for generating toy datasets to fit using neural networks
def generate_dataset_based_on_graph(graph, nb_of_points):
points = generate_points(size=graph.shape[0], nb_of_points=nb_of_points)
values = get_values_for_points(points, graph)
return points, values
def generate_points(size, nb_of_points):
return np.random.randint(
low=0,
high=120,
size=(nb_of_points, size),
)
def get_values_for_points(points, graph):
return np.array([get_value_for_point(point, graph) for point in points])
def get_value_for_point(point, graph):
full_entropy = 0
rescaled_point = point / 40.0
for i in range(graph.shape[0]):
row_sum = 0
row_ent = 0
for j in range(graph.shape[1]):
if graph[i, j] == 0:
continue
current_distance = 0.5 * (point[i] - point[j]) ** 2
exp_current_distance = np.exp(-current_distance)
row_ent += current_distance * exp_current_distance
row_sum += exp_current_distance
if row_sum > 0:
full_entropy += row_ent / row_sum + np.log(row_sum)
return full_entropy |
from sentence_transformers import SentenceTransformer, util
import numpy as np
model = SentenceTransformer('./model-xlm', device='cpu')
sentence1 = "I like Python because I can build AI applications"
sentence2 = "I like Python because I can build Artificial Intelligence applications"
# encode sentences to get their embeddings
embedding1 = model.encode(sentence1, convert_to_tensor=True)
embedding2 = model.encode(sentence2, convert_to_tensor=True)
print(embedding1.shape)
# compute similarity scores of two embeddings
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
print("Sentence 1:", sentence1)
print("Sentence 2:", sentence2)
print("Similarity score:", cosine_scores.item())
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, infeat, outfeat, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(infeat, outfeat[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(outfeat[0])
self.conv2 = nn.Conv2d(outfeat[0], outfeat[1], kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(outfeat[1])
self.conv3 = nn.Conv2d(outfeat[1], outfeat[2], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outfeat[2])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, nr_classes, freeze=True):
super(ResNet, self).__init__()
self.nr_classes = nr_classes
self.freeze = freeze
# NOTE: using name to load tf chkpts easier
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_res_block(64, [64, 64, 256], 3, stride=1)
self.layer2 = self._make_res_block(256, [128, 128, 512], 4, stride=2)
self.layer3 = self._make_res_block(512, [256, 256, 1024], 6, stride=2)
self.layer4 = self._make_res_block(1024, [512, 512, 2048], 3, stride=2)
self.gap = nn.AdaptiveAvgPool2d((1, 1)) # Global Average Pooling
self.gmp = nn.AdaptiveMaxPool2d((1, 1)) # Global Max Pooling
self.classifier = nn.Linear(2048, self.nr_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_res_block(self, infeat, outfeat, nr_blocks, stride=1):
downsample = None
if stride != 1 or infeat != outfeat[-1]:
downsample = nn.Sequential(
nn.Conv2d(infeat, outfeat[-1], kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(outfeat[-1]),
)
layers = []
layers.append(Bottleneck(infeat, outfeat, stride, downsample))
for _ in range(1, nr_blocks):
layers.append(Bottleneck(outfeat[-1], outfeat))
return nn.Sequential(*layers)
def forward(self, imgs):
def extract_feat(imgs):
with torch.no_grad():
d1 = self.relu(self.bn1(self.conv1(imgs)))
d2 = self.maxpool(d1)
d2 = self.layer1(d2)
d3 = self.layer2(d2)
d4 = self.layer3(d3)
d5 = self.layer4(d4)
return [d1, d2, d3, d4, d5]
# feature extractor only
feat = extract_feat(imgs)[-1]
feat = self.gap(feat) # NOTE: Global Average Pool
out = feat.view(feat.size(0), -1)
out = self.classifier(out)
return out
def resnet(exp_mode, nr_classes, pretrained=True, progress=True):
model = ResNet(exp_mode=exp_mode, nr_classes=nr_classes)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['resnet50'],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
|
import soundfile as sf
import os
import pandas as pd
class ParameterError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Wav2Pcm(object):
""" Convert wav to pcm format """
def __call__(self, src_path, file_name):
sample_Rate, bit_rate, pcm_data = self.convert(src_path + file_name + '.wav')
with open(src_path + file_name + '.pcm', 'wb') as pcm:
pcm.write(pcm_data)
def _get_field(self, wav, offset, lent):
"""
Get values for filed. This is only working for fields with byteorder little
Args :
wav : the wave file
offset : which position to start at.
lent : length of field
Return :
Int of the desired field.
"""
wav.seek(0)
wav.seek(offset, 0)
return int.from_bytes(wav.read(lent), byteorder='little')
def convert(self, wav_in):
"""
Get the sample rate, bit rate and PCM raw bytes from a wav.
Args :
wav_in : wave file, or string with path to wave file.
Return :
sample_rate : int representing the wave file sample rate
bit_rate : int repesenting the wave file bit rate
pcm : bytes representing the raw sound.
"""
if type(wav_in) is str:
wav_file = open(wav_in, 'rb')
else:
wav_file = wav_in
header_size = self._get_field(wav_file, 16, 4)
sample_rate = self._get_field(wav_file, 24, 4)
bit_rate = self._get_field(wav_file, 34, 2)
wav_file.seek(0)
if header_size == 16:
data = wav_file.read()[44:]
elif header_size == 18:
data = wav_file.read()[46:]
else:
print("WAV format unknown")
exit(1)
wav_file.close()
return sample_rate, bit_rate, data
def load_label(filepath):
char2id = dict()
id2char = dict()
ch_labels = pd.read_csv(filepath, encoding="cp949")
id_list = ch_labels["id"]
char_list = ch_labels["char"]
freq_list = ch_labels["freq"]
for (id_, char, freq) in zip(id_list, char_list, freq_list):
char2id[char] = id_
id2char[id_] = char
return char2id, id2char
def sentence_to_label(sentence, char2id):
labeled = str()
for ch in sentence:
labeled += (str(char2id[ch]) + ' ')
return labeled[:-1]
def flac2pcm(src_path, file_name, leave_trail):
flac_data, sr = sf.read(src_path + '/' + file_name + '.flac')
sf.write(src_path + '/' + file_name + '.wav', flac_data, sr, format='WAV', endian='LITTLE', subtype='PCM_16')
wav2pcm = Wav2Pcm()
wav2pcm(src_path + '/', file_name)
if not leave_trail:
os.remove(src_path + '/' + file_name + '.wav')
|
import pylab
from boolean2 import Model
#
# This initial condition leads to a cycle of period 4.
# If A is set to False, a steady state is obtained.
#
#
text = """
A = True
B = False
C = False
D = True
B* = A or C
C* = A and not D
D* = B and C
"""
model = Model( text=text, mode='sync')
model.initialize()
model.iterate( steps=15 )
# the model data attribute holds the states keyed by nodes
for node in model.data:
print(node, model.data[node])
# this is a helper function that reports the cycle lengths
# and the index at wich the cycle started
model.report_cycles()
#
# the same thing as above but
# will not print only return the two parameters
#
print(model.detect_cycles())
#
# this is how one plots the values, delete this below
# if matplotlib is not installed
#
p1 = pylab.plot( model.data["B"] , 'ob-' )
p2 = pylab.plot( model.data["C"] , 'sr-' )
pylab.legend( [p1,p2], ["B","C"])
pylab.ylim((-0.1,1.1))
pylab.show()
|
import numpy
# responded_in_kind counts the number of times the opponent responded like a good TFT
def responded_in_kind(hist):
opp_tft_responses = 0
for i in range(len(hist[1]) - 1):
if hist[1][i+1] == hist[0][i]:
opp_tft_responses += 1
return opp_tft_responses
def strategy(history, memory):
if history.shape[1] == 0:
memory = (0, 3, 0)
cooldown = memory[0] # two turns of peace to break out of the spiral
escapes = memory[1] # opponent has 3 chances to accept a peace offering
rando = memory[2] # are we against a rando
move = 1
if history.shape[1] == 30: # check rando r30
oppsum = sum(history[1])
# opponent with least 8 Cs and 8 Ds, and at least 9 or so non-TFT responses in first 30 rounds means likely random
if oppsum >= 8 and oppsum <= 22 and responded_in_kind(history) <= 21:
escapes = 3
rando = 1
if rando and history.shape[1] >= 45: # double-check rando after r45
if sum(history[1][-10:]) == 0: # deadlock in last 10 turns means calling them random was a mistake
rando = 0
if cooldown == 1: # janky way to tell when we're coming off cooldown; decrement escapes
if history.shape[1] >= 2 and history[1, -1] == 0:
escapes -= 1
cooldown = 0
if rando: # exploit the randos
move = 0
elif history.shape[1] >= 1 and history[1, -1] == 0 and not cooldown:
move = 0 # regular TFT
# escape from death spirals, both of the straight-Ds and alternating D/C variety
if history.shape[1] > 4 and escapes > 0:
if ((history[0, -1] == 0 and history[0, -2] == 0 and history[0, -3] == 0 and history[1, -1] == 0 and history[1, -2] == 0 and history[1, -3] == 0) or
(history[0, -1] == 1 and history[0, -2] == 0 and history[0, -3] == 1 and history[0, -4] == 0 and history[1, -1] == 0 and history[1, -2] == 1 and history[1, -3] == 0 and history[1, -4] == 1)):
cooldown = 3
move = 1
if cooldown > 0:
cooldown -= 1
memory = (cooldown, escapes, rando)
return move, memory
|
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class Message(object):
def __init__(self, header=None, body=None):
self.header = header
self.body = body
def getHeader(self):
return self.header
def setHeader(self, header):
self.header = header
def getBody(self):
return self.body
def setBody(self, body):
self.body = body
|
import json
from common.config import config
from common import http
async def send_message(text, **keys):
keys['text'] = text
headers = {
"Content-Type": "application/json",
}
if config['slack_webhook_url'] is not None:
await http.request_coro(config['slack_webhook_url'], method="POST", data=json.dumps(keys), headers=headers)
def escape(text):
return text \
.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">")
|
import pytest
from openbb_terminal.settings_controller import SettingsController
# pylint: disable=W0621
@pytest.fixture()
def controller(mocker):
mocker.patch(
"openbb_terminal.settings_controller.obbff.USE_PROMPT_TOOLKIT",
True,
)
mocker.patch("openbb_terminal.settings_controller.session", True)
mocker.patch("openbb_terminal.settings_controller.set_key")
mocker.patch("openbb_terminal.settings_controller.obbff")
return SettingsController()
def test_print_help(controller):
controller.print_help()
def test_call_dt(controller):
controller.call_dt(None)
def test_call_autoscaling(controller):
controller.call_autoscaling(None)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_dpi(controller, other):
controller.call_dpi(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_height(controller, other):
controller.call_height(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_width(controller, other):
controller.call_width(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_pheight(controller, other):
controller.call_pheight(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_pwidth(controller, other):
controller.call_pwidth(other)
@pytest.mark.parametrize("other", [["45"], ["-v", "45"]])
def test_call_monitor(controller, other):
controller.call_monitor(other)
@pytest.mark.parametrize("other", [["GTK3Agg"], ["-v", "GTK3Agg"], ["None"]])
def test_call_backend(controller, other):
controller.call_backend(other)
|
# Ralphio (1082003) | Gold Beach Resort
if sm.hasQuestCompleted(2964):
sm.sendSayOkay("Thank you so much for returning my ring! You got me out of a lot of trouble.")
else:
sm.sendSayOkay("I wouldn't trust #p1082004#, he seems suspicious.")
|
from .user import *
from .registration import *
from .email_handler import SendEmail |
# -*- coding: utf-8 -*-
class TimeZone(object):
def __init__(self, timezone):
self.id = timezone['id']
self.utcoffsetMinutes = int(timezone['utcoffsetMinutes'])
def __str__(self):
return '\t\tId: {0} \n\t\tUtcOffsetminutes: {1}'.format(self.id, self.utcoffsetMinutes) |
import os
import mock
import utils
from common import helpers
class TestHelpers(utils.BaseTestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
@mock.patch.object(helpers, 'subprocess')
def test_get_ip_addr(self, mock_subprocess):
path = os.path.join(os.environ["DATA_ROOT"],
"sos_commands/networking/ip_-d_address")
with open(path, 'r') as fd:
out = fd.readlines()
ret = helpers.get_ip_addr()
self.assertEquals(ret, out)
self.assertFalse(mock_subprocess.called)
@mock.patch.object(helpers, 'subprocess')
def test_get_ps(self, mock_subprocess):
path = os.path.join(os.environ["DATA_ROOT"], "ps")
with open(path, 'r') as fd:
out = fd.readlines()
ret = helpers.get_ps()
self.assertEquals(ret, out)
self.assertFalse(mock_subprocess.called)
|
"""
###############################################################################
# Copyright 2019, by the California Institute of Technology. ALL RIGHTS RESERVED.
# United States Government Sponsorship acknowledged. Any commercial use must be
# negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# User has the responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign countries or providing
# access to foreign persons.
#
# file: metadata_classes.py
# author: S. Felipe Fregoso
# description: Contains all metadata classes used for the dhmsw
# The metadata classes created correspond to the
# commands in the command dictionary (although not always)
###############################################################################
"""
import time
import configparser
from abc import ABC, abstractmethod
from shampoo_lite.mask import (Circle, Mask)
class MetadataABC(ABC):
"""
Abstract class for all Metadata classes
"""
# pylint: disable=too-few-public-methods
@abstractmethod
def load_config(self, filepath):
"""
Load config file abstract method
Ensures all subclasses define this method
"""
while False:
yield None
class MetadataDictionary():
"""
Class for holding in a dictionary all metadata objects
used by the dhmsw
"""
def __init__(self, configfile=None):
"""
Constructor
"""
self.metadata = {}
self.metadata['CONTROLLER'] = ControllerMetadata()
self.metadata['HEARTBEAT'] = HeartbeatMetadata()
self.metadata['GUISERVER'] = GuiserverMetadata()
self.metadata['DATALOGGER'] = DataloggerMetadata()
self.metadata['CAMERA'] = CameraMetadata()
self.metadata['FRAMESOURCE'] = FramesourceMetadata()
self.metadata['WATCHDOG'] = WatchdogMetadata()
self.metadata['HOLOGRAM'] = HologramMetadata()
self.metadata['RECONSTRUCTION'] = ReconstructionMetadata()
self.metadata['FOURIERMASK'] = FouriermaskMetadata()
self.metadata['SESSION'] = SessionMetadata()
self._config_file = configfile
#for k in self.metadata.keys():
# self.metadata[k].load_config(configfile)
for _, comp_meta in self.metadata.items():
comp_meta.load_config(configfile)
def get_meta_dict(self):
"""
Return the dictionary of metadatas
"""
return self.metadata
def get_config_file(self):
"""
Return the config file
"""
return self._config_file
class ControllerMetadata(MetadataABC):
"""
Class for Controller component metadata
"""
def __init__(self, configfile=None, cmd_hostname='localhost', cmd_port=10000):
self.cmd_hostname = cmd_hostname
self.cmd_port = cmd_port
self.load_config(configfile)
self._id = 'CONTROLLER'
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
cmd_hostname = config.get(self._id, 'cmd_hostname', fallback='localhost')
cmd_port = config.getint(self._id, 'cmd_port', fallback=10000)
self.cmd_hostname = cmd_hostname
self.cmd_port = cmd_port
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), self._id))
raise err('Config file read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), self._id))
def get_hostname(self):
"""
Return the hostname
"""
return self.cmd_hostname
def get_port(self):
"""
Return the port
"""
return self.cmd_port
class HeartbeatMetadata(MetadataABC):
"""
Heartbeat Metadata class
"""
def __init__(self):
"""
Constructor
"""
self.ident = ''
self.timestamp = time.time()
self.exception = None
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
def get_timestamp(self):
"""
Return timestamp
"""
return self.timestamp
class GuiserverMetadata(MetadataABC):
"""
GUI Server Metadata class
"""
def __init__(self, configfile=None):
"""
Constructor
"""
self.connection_status = [False, False, False, False, False, False]
self.enabled = {'rawframes':True,
'fourier':True,
'reconst_amp':True,
'reconst_intensity':True,
'reconst_phase':True,
}
self.ports = {'fourier':9993,
'reconst_amp':9994,
'raw_frames':9995,
'telemetry':9996,
'reconst_intensity':9997,
'reconst_phase':9998}
self.hostname = '127.0.0.1'
self.maxclients = 5
self.status_msg = ''
self.load_config(configfile)
def get_connection_status(self):
"""
Return the connection status array
"""
return self.connection_status
def get_enabled_state(self):
"""
Return enabled state
"""
return self.enabled
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
key = 'GUISERVER'
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
fourier_port = config.getint(key, 'fourier_port', fallback=9993)
reconst_amp_port = config.getint(key, 'reconst_amp_port', fallback=9994)
raw_frames_port = config.getint(key, 'raw_frames_port', fallback=9995)
telemetry_port = config.getint(key, 'telemetry_port', fallback=9996)
reconst_intensity_port = config.getint(key, 'reconst_intensity_port', fallback=9997)
reconst_phase_port = config.getint(key, 'reconst_phase_port', fallback=9998)
host = config.get(key, 'host', fallback='127.0.0.1')
maxclients = config.getint(key, 'maxclients', fallback=5)
self.ports['fourier'] = fourier_port
self.ports['reconst_amp'] = reconst_amp_port
self.ports['raw_frames'] = raw_frames_port
self.ports['telemetry'] = telemetry_port
self.ports['reconst_intensity'] = reconst_intensity_port
self.ports['reconst_phase'] = reconst_phase_port
self.hostname = host
self.maxclients = maxclients
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class DataloggerMetadata(MetadataABC):
"""
Data logger metadata class
"""
def __init__(self, configfile=None):
self.enabled = True
self.status_msg = ''
self.load_config(configfile)
def get_enabled(self):
"""
Return enabled flag
"""
return self.enabled
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'DATALOGGER'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
enabled = config.getboolean(key, 'enabled', fallback=False)
self.enabled = enabled
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class CameraMetadata(MetadataABC):
"""
Camera Metadata Class
"""
def __init__(self, N=2048, rate=15.0, shutter=15000,
gain=0, roi_pos=(0, 0), roi_size=(2048, 2048)):
"""
Constructor
"""
self.N = N
self.rate = rate
self.shutter = shutter
self.gain = gain
self.roi_pos = roi_pos
self.roi_size = roi_size
self.status_msg = ''
def get_camera_params(self):
"""
Return the camera parameters
"""
return (self.N, self.rate, self.shutter, self.gain,
self.roi_pos, self.roi_size)
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'CAMERA'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
N = config.getint(key, 'N', fallback=2048)
rate = config.getfloat(key, 'rate', fallback=15.0)
shutter = config.getint(key, 'shutter', fallback=15000)
gain = config.getint(key, 'gain', fallback=0)
roi_pos_x = config.getint(key, 'roi_pos_x', fallback=0)
roi_pos_y = config.getint(key, 'roi_pos_y', fallback=0)
roi_size_x = config.getint(key, 'roi_size_x', fallback=N)
roi_size_y = config.getint(key, 'roi_size_y', fallback=N)
self.N = N
self.rate = rate
self.shutter = shutter
self.gain = gain
self.roi_pos = (roi_pos_x, roi_pos_y)
self.roi_size = (roi_size_x, roi_size_y)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class CameraServerMetadata(MetadataABC):
"""
Camera server metadata Class
"""
def __init__(self, configfile=None):
"""
Constructor
"""
self.host = '127.0.0.1'
base_port = 2000
self.ports = {'frame':base_port, 'command':base_port+1, 'telemetry':base_port+2}
self.status_msg = ''
self.load_config(configfile)
def get_ports(self):
"""
Return the ports for the camera server
"""
return self.ports
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
key = 'CAMERA_SERVER'
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
host = config.get(key, 'host', fallback='127.0.0.1')
frame_port = config.getint(key, 'frame', fallback=2000)
command_port = config.getint(key, 'command', fallback=2001)
telemetry_port = config.getint(key, 'telemetry', fallback=2002)
self.host = host
self.ports['frame'] = frame_port
self.ports['command'] = command_port
self.ports['telemetry'] = telemetry_port
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class FramesourceMetadata(MetadataABC):
"""
Framesource metadata class
"""
FRAMESOURCE_FILE = 0 # Don't compute the reconstruction
FRAMESOURCE_MICROSCOPE = 1 # Don't compute the reconstruction
FRAMESOURCE_STATE_IDLE = 0
FRAMESOURCE_STATE_RUNNING = 1
def __init__(self, configfile=None):
"""
Constructor
"""
self.state = self.FRAMESOURCE_STATE_IDLE
self.mode = ''
self.camserver = CameraServerMetadata(configfile=configfile)
self.file = {}
self.file['datadir'] = '/proj/dhm/sfregoso/git_repos/dhmsw/simulated_frames/*.bmp'
self.file['currentfile'] = '/proj/dhm/sfregoso/git_repos/dhmsw/simulated_frames/*.bmp'
self.status_msg = ''
self.load_config(configfile)
def get_state(self):
"""
Return the state
"""
return self.state
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
key = 'FRAMESOURCE'
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
datadir = config.get(key, 'datadir', fallback='')
self.datadir = datadir
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class WatchdogMetadata(MetadataABC):
"""
Watchdog Metadata Class
"""
def __init__(self):
"""
Constructor
"""
self.status_msg = ''
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
def get_status_msg(self):
"""
Return the status message
"""
return self.status_msg
class HologramMetadata(MetadataABC):
"""
Hologram Metadata Class
"""
def __init__(self):
"""
Constructor
"""
self.wavelength = [635e-3] # NOTE must be a list
self.dx = 3.45 # Pixel width in x-direction
self.dy = 3.45 # Pixel width in y-direction
self.crop_fraction = None #Fraction of the image to crop for analysis
self.rebin_factor = 1 # Rebin the image by factor. Must be integer
self.bgd_sub = False
self.bgd_file = ''
self.status_msg = ''
def get_status_msg(self):
"""
Return the status message
"""
return self.status_msg
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
key = 'HOLOGRAM'
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
wavelength_str = config.get(key, 'wavelength', fallback='405e-9')
wavelength = [float(w) for w in wavelength_str.split(',')]
dx = config.getfloat(key, 'dx', fallback=3.45e-6)
dy = config.getfloat(key, 'dy', fallback=3.45e-6)
crop_fraction = config.getfloat(key, 'crop_fraction', fallback=0)
rebin_factor = config.getint(key, 'rebin_factor', fallback=1)
bgd_sub = config.getboolean(key, 'bgd_sub', fallback=False)
self.wavelength = wavelength
self.dx = dx
self.dy = dy
self.crop_fraction = crop_fraction
self.rebin_factor = rebin_factor
self.bgd_sub = bgd_sub
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class ReconstructionMetadata(MetadataABC):
"""
Reconstruction Metadata Class
"""
RECONST_NONE = 0 # Don't compute the reconstruction
RECONST_AMP = 1 # Compute amplitude only
RECONST_PHASE = 2 # Compute phase only
RECONST_INTENSITY = 3 # Compute phase only
RECONST_AMP_AND_PHASE = 4 # Compute amplitude and phase only
RECONST_INT_AND_PHASE = 5 # Compute intensity and phase only
RECONST_ALL = 6 # Compute both everything
def __init__(self, configfile=None):
"""
Constructor
"""
self.propagation_distance = [0.01] #NOTE must be a list
#must be a list, match same number of values as wavelength
self.chromatic_shift = [0]
#True => Compute spectral peak per reconstruction; False => Don't
#compute spectral peak
self.compute_spectral_peak = False
#True => Compute the digital phase mask; False => Don't compute
#digital phase mask
self.compute_digital_phase_mask = False
### Reference Hologram Parameters
self.ref_holo = self.ReferenceHologramMetadata()
### Phase Mask
self.phase_mask_reset = False
### Phase Unwrapping Parameters
self.phase_unwrapping = self.PhaseUnwrappingMetadata()
### Fitting Parameters
self.fitting = self.FittingMetadata()
self.fitting_apply = False
### Region Of Interest Parameters
self.roi_x = self.RoiMetadata()
self.roi_y = self.RoiMetadata()
### Center Image Stuff
self.center_image = self.CenterImageMetadata()
self.processing_mode = self.RECONST_NONE
self.running = False
#True => store reconstruction data to disk; False => Don't store
#reconstruction data to disk
self.store_files = False
self.status_msg = ''
self.load_config(configfile)
def get_status_msg(self):
"""
Return the status message
"""
return self.status_msg
def _processing_mode(self, mode_str):
"""
Return mode based on string value
"""
if mode_str.lower() == 'all':
processing_mode = ReconstructionMetadata.RECONST_ALL
elif mode_str.lower() == 'amplitude':
processing_mode = ReconstructionMetadata.RECONST_AMP
elif mode_str.lower() == 'phase':
processing_mode = ReconstructionMetadata.RECONST_PHASE
elif mode_str.lower() == 'intensity':
processing_mode = ReconstructionMetadata.RECONST_INTENSITY
elif mode_str.lower() == 'amp_and_phase':
processing_mode = ReconstructionMetadata.RECONST_AMP_AND_PHASE
elif mode_str.lower() == 'int_and_phase':
processing_mode = ReconstructionMetadata.RECONST_INT_AND_PHASE
else:
processing_mode = ReconstructionMetadata.RECONST_NONE
return processing_mode
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
if filepath is None:
return
key = 'RECONSTRUCTION'
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
propagation_dist_str = config.get(key, 'propagation_distance', fallback='0.01')
propagation_distance = [float(w) for w in propagation_dist_str.split(',')]
chromatic_shift_str = config.get(key, 'chromatic_shift', fallback='0')
chromatic_shift = [float(w) for w in chromatic_shift_str.split(',')]
compute_spectral_peak = config.getboolean(key,
'compute_spectral_peak',
fallback=False)
compute_digital_phase_mask = config.getboolean(key,
'compute_digital_phase_mask',
fallback=False)
phase_mask_reset = config.getboolean(key, 'phase_mask_reset', fallback=False)
fitting_apply = config.getboolean(key, 'fitting_apply', fallback=False)
store_files = config.getboolean(key, 'store_files', fallback=False)
roi_x_offset = config.getint(key, 'roi_pos_x', fallback=0)
roi_y_offset = config.getint(key, 'roi_pos_y', fallback=0)
roi_x_size = config.getint(key, 'roi_size_x', fallback=2048)
roi_y_size = config.getint(key, 'roi_size_y', fallback=2048)
mode_str = config.get(key, 'processing_mode', fallback='none')
processing_mode = self._processing_mode(mode_str)
self.propagation_distance = propagation_distance
self.chromatic_shift = chromatic_shift
self.compute_spectral_peak = compute_spectral_peak
self.compute_digital_phase_mask = compute_digital_phase_mask
self.phase_mask_reset = phase_mask_reset
self.fitting_apply = fitting_apply
self.store_files = store_files
self.roi_x.offset = roi_x_offset
self.roi_y.offset = roi_y_offset
self.roi_x.size = roi_x_size
self.roi_y.size = roi_y_size
self.processing_mode = processing_mode
self.ref_holo.load_config(filepath)
self.phase_unwrapping.load_config(filepath)
self.fitting.load_config(filepath)
self.center_image.load_config(filepath)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class ReferenceHologramMetadata(MetadataABC):
"""
Reference Hologram Metadata Class
"""
def __init__(self, path='', enabled=False, averaging_sec=0.0,
averaging_enabled=False, save=False):
"""
Constructor
"""
self.path = path
self.enabled = enabled
self.save = save
self.averaging_sec = averaging_sec
self.averaging_enabled = averaging_enabled
def get_enabled(self):
"""
Return enabled flag
"""
return self.enabled
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'REFERENCE_HOLOGRAM'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
self.path = config.get(key, 'path', fallback='')
self.enabled = config.getboolean(key, 'enabled', fallback=False)
self.save = config.getboolean(key, 'save', fallback=False)
self.averaging_sec = config.getfloat(key, 'averaging_sec', fallback=0.)
self.averaging_enabled = config.getboolean(key, 'averaging_enabled', fallback=False)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class CenterImageMetadata(MetadataABC):
"""
Center Image of reconstruction Metadata Class
"""
def __init__(self, center=False, center_and_tilt=False, max_value=False,
wide_spectrum=False, configfile=None):
"""
Constructor
"""
self.center = center
self.center_and_tilt = center_and_tilt
self.max_value = max_value
self.wide_spectrum = wide_spectrum
self.load_config(configfile)
def get_center(self):
"""
Return center flag
"""
return self.center
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'CENTER_IMAGE'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
center = config.getboolean(key, 'center', fallback=False)
center_and_tilt = config.getboolean(key, 'center_and_tilt', fallback=False)
max_value = config.getboolean(key, 'max_value', fallback=False)
wide_spectrum = config.getboolean(key, 'wide_spectrum', fallback=False)
self.center = center
self.center_and_tilt = center_and_tilt
self.max_value = max_value
self.wide_spectrum = wide_spectrum
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class PhaseUnwrappingMetadata(MetadataABC):
"""
Phase Unwrapping Metadata Class
"""
PHASE_UNWRAPPING_NONE = 0
PHASE_UNWRAPPING_ALG1 = 1
PHASE_UNWRAPPING_ALG2 = 2
def __init__(self, enabled=False, algorithm=PHASE_UNWRAPPING_NONE):
"""
Constructor
"""
self.enabled = enabled
self.algorithm = algorithm
def get_algorithm(self):
"""
Return algorithm ID
"""
return self.algorithm
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'PHASE_UNWRAPPING'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
enabled = config.getboolean(key, 'enabled', fallback=False)
algorithm_str = config.get(key, 'algorithm', fallback='none')
if algorithm_str == 'algorithm_1':
algorithm = ReconstructionMetadata.PhaseUnwrappingMetadata.PHASE_UNWRAPPING_ALG1
elif algorithm_str == 'algorithm_2':
algorithm = ReconstructionMetadata.PhaseUnwrappingMetadata.PHASE_UNWRAPPING_ALG2
else:
algorithm = ReconstructionMetadata.PhaseUnwrappingMetadata.PHASE_UNWRAPPING_NONE
self.enabled = enabled
self.algorithm = algorithm
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class FittingMetadata(MetadataABC):
"""
Fitting Metadata Class
"""
FITTING_MODE_NONE = 0
FITTING_MODE_1D_SEGMENT = 1
FITTING_MODE_2D_SEGMENT = 2
FITTING_METHOD_NONE = 0
FITTING_METHOD_POLYNOMIAL = 0
def __init__(self, mode=FITTING_MODE_NONE, method=FITTING_METHOD_NONE,
order=0, applied=False):
"""
Constructor
"""
self.mode = mode
self.method = method
self.order = order
self.applied = applied
def get_mode(self):
"""
Return Mode
"""
return self.mode
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'FITTING'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
mode_str = config.get(key, 'mode', fallback='')
if mode_str.lower() == '1d_segment':
self.mode = ReconstructionMetadata.FittingMetadata.FITTING_MODE_1D_SEGMENT
elif mode_str.lower() == '2d_segment':
self.mode = ReconstructionMetadata.FittingMetadata.FITTING_MODE_2D_SEGMENT
else:
self.mode = ReconstructionMetadata.FittingMetadata.FITTING_MODE_NONE
method_str = config.get(key, 'method', fallback='')
if method_str.lower() == 'polynomial':
self.method = ReconstructionMetadata.FittingMetadata.FITTING_METHOD_POLYNOMIAL
else:
self.method = ReconstructionMetadata.FittingMetadata.FITTING_METHOD_NONE
self.order = config.getint(key, 'order', fallback=0)
self.applied = config.getboolean(key, 'applied', fallback=False)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class RoiMetadata(MetadataABC):
"""
Region Of Interest Metadata Class
"""
def __init__(self, offset=0, size=2048):
"""
Constructor
"""
self.offset = offset
self.size = size
def get_size(self):
"""
Return size of ROI
"""
return self.size
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
class ReconstructionDoneMetadata(MetadataABC):
"""
Reconstruction Done Metadata Class
"""
def __init__(self, done=True):
"""
Constructor
"""
self.done = done
def get_done(self):
"""
Return the done flag
"""
return self.done
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
class FouriermaskMetadata(MetadataABC):
"""
Fourier Mask Metadata Class
"""
def __init__(self):
"""
Constructor
"""
self.center_list = []
self.mask = None
self.status_msg = ""
def get_status_msg(self):
"""
Return the status message
"""
return self.status_msg
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'FOURIERMASK'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
center_x_str = config.get(key, 'center_x', fallback='702, 1117, 1439')
center_x = [float(c) for c in center_x_str.split(',')]
center_y_str = config.get(key, 'center_y', fallback='749, 6161, 893')
center_y = [float(c) for c in center_y_str.split(',')]
radius_str = config.get(key, 'radius', fallback='170, 170, 170')
radius = [float(c) for c in radius_str.split(',')]
wavelength_str = config.get('HOLOGRAM', 'wavelength', fallback='405e-9')
wavelength = [float(w) for w in wavelength_str.split(',')]
N = config.getint('CAMERA', 'N', fallback=2048)
self.center_list = []
for i in range(len(wavelength)):
self.center_list.append(Circle(center_x[i], center_y[i], radius[i]))
dk = 1 #2*np.pi/(self.hololen * self._pix_width_x)
#self.mask = Mask(N, self.center_list[0:len(wavelength)], dk)
self.mask = Mask(N, self.center_list, dk)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class SessionMetadata(MetadataABC):
"""
Session Metadata Class
"""
def __init__(self):
self.name = ""
self.description = ""
self.holo = HologramMetadata()
self.lens = self.LensMetadata()
self.status_msg = ""
def get_status_msg(self):
"""
Return the status message
"""
return self.status_msg
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'SESSION'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
self.name = config.get(key, 'name', fallback='')
self.description = config.get(key, 'description', fallback='')
self.holo.load_config(filepath)
self.lens.load_config(filepath)
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
class LensMetadata(MetadataABC):
"""
Lens Metadata
"""
def __init__(self):
"""
Constructor
"""
self.focal_length = 1e-3 #mm
self.numerical_aperture = 0.1
self.system_magnification = 1.0
self.reconst_space = False #False==>Object space; True==>Detector space
def get_focal_length(self):
"""
Return the status message
"""
return self.focal_length
def load_config(self, filepath):
"""
Read the config file and load data pertaining to this metadata
"""
key = 'LENS'
if filepath is None:
return
try:
config = configparser.ConfigParser()
dataset = config.read(filepath)
if not dataset:
raise ValueError("File [%s] doesn't exist."%(filepath))
focal_length = config.getfloat(key, 'focal_length', fallback=1e-3)
numerical_aperture = config.getfloat(key, 'numerical_aperture', fallback=0.1)
system_magnification = config.getfloat(key, 'system_magnification', fallback=1.0)
reconst_space = config.getfloat(key, 'reconst_space', fallback=False)
self.focal_length = focal_length
self.numerical_aperture = numerical_aperture
self.system_magnification = system_magnification
self.reconst_space = reconst_space
except configparser.Error as err:
print('File read error: [%s] due to error [%s]. Key=[%s].'\
%(filepath, repr(err), key))
if __name__ == "__main__":
FNAME = 'DEFAULT.ini'
MetadataDictionary(FNAME)
|
from pygame import *
#окно игры
win_width = 600
win_height = 500
window = display.set_mode((win_width, win_height))
display.set_caption("Пинг понг")
#фон сцены
color_b = (200, 255, 255)
window.fill(color_b)
# ракетки и мяч
class GameSprite(sprite.Sprite):
def __init__(self, filename, x, y, speed, width, height):
super().__init__()
self.image = transform.scale(image.load(filename), (width, height))
self.speed = speed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
# ракетки
class Player(GameSprite):
def update_r(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_UP] and self.rect.y > 5:
self.rect.y = self.rect.y - self.speed
if keys_pressed[K_DOWN] and self.rect.y < win_height - 80:
self.rect.y = self.rect.y + self.speed
def update_l(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_a] and self.rect.y > 5:
self.rect.y = self.rect.y - self.speed
if keys_pressed[K_z] and self.rect.y < win_height - 80:
self.rect.y = self.rect.y + self.speed
r1_x = 30
r1_y = 200
r2_x = 520
r2_y = 200
r_w = 50
r_h = 70
racket1 = Player("racket.png", r1_x, r1_y, 4, r_w, r_h)
racket2 = Player("racket.png", r2_x, r2_y, 4, r_w, r_h)
# мяч
b_x = 200
b_y = 200
ball = GameSprite("tenis_ball.png", b_x, b_y, 4, 50, 50)
ball_step_x = 2
ball_step_y = 2
# Надписи для результатов
font.init()
font = font.Font(None, 35)
winner1 = font.render('Выиграла Ракетка 1', True, (180, 0,0))
winner2 = font.render('Выиграла Ракетка 2', True, (180, 0,0))
game = True
finish = False
clock = time.Clock()
FPS = 60 # частота кадров
while game:
for ev in event.get():
if ev.type == QUIT:
game = False
if finish != True:
window.fill(color_b)
racket1.update_l()
racket2.update_r()
ball.rect.x = ball.rect.x + ball_step_x
ball.rect.y = ball.rect.y + ball_step_y
# обработка столкновений
if sprite.collide_rect(racket1, ball) or sprite.collide_rect(racket2, ball):
ball_step_x = -1 * ball_step_x
ball_step_y = -1 * ball_step_y
# мяч достиг границы экрана
if ball.rect.y > win_height-50 or ball.rect.y < 0:
ball_step_y = -1 * ball_step_y
# мяч улетел дальше ракетки
if ball.rect.x < 0:
finish = True
window.blit(winner2, (200, 200))
if ball.rect.x > win_width:
finish = True
window.blit(winner1, (200, 200))
racket1.reset()
racket2.reset()
ball.reset()
display.update()
clock.tick(FPS)
|
from saturn import state
from saturn.socks import SocksHello, SocksAuthenticate
from saturn.socks.request import SocksRequest
class Dispatcher:
def __init__(self, server, loop, transport):
self.server_transport = transport
self.server = server
self.loop = loop
self.client_transport = None
self.state = state.NotAuthenticated()
self.busy = False
self.previous = None
async def handle(self, data):
result = None
if isinstance(self.state, state.Connected):
self.client_transport.write(data)
elif isinstance(self.state, state.NotAuthenticated):
result = SocksHello(self, data).reply()
elif isinstance(self.state, state.WaitingAuthenticationData):
result = await SocksAuthenticate(self, data).authenticate()
elif isinstance(self.state, state.Authenticated):
request = SocksRequest.parse(self, data)
result = await request.go()
return result
def reply(self, data):
self.server_transport.write(data)
|
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import json
import os
from skimage import io
from PIL import Image
from pycocotools.coco import COCO
from utils import plotOnImage, clip_detect, GID
MIN_IMAGE=0
coco_joints = [ 'nose', 'left_eye', 'right_eye',
'left_ear', 'right_ear', 'left_shoulder',
'right_shoulder', 'left_elbow', 'right_elbow',
'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee',
'left_ankle', 'right_ankle' ]
BBOX_IDX = { 'x' : 0, 'y' : 1, 'w' : 2, 'h' : 3 }
def coco_valid_joints(jointsx, jointsy, jointsv, dims):
"""
coco_valid_joints: Function to decide which joints are valid. Each dataset
has different criteria, so can't have a single function. Boo.
jointsx: ndarray joints x cood
jointsy: ndarray joints y coord
dims: (width, height)
"""
assert(jointsx.shape==jointsy.shape)
assert(jointsx.shape==jointsv.shape)
val_joints=(jointsv>0)
# This all seems unnecessary because no images "break the rules"
# Can keep to check in future if new data added
#inval_joints=(jointsv==0)
#zerox=jointsx[inval_joints]
#zeroy=jointsy[inval_joints]
#if (zerox|zeroy).any():
# print('INVALID BUT NONZERO X/Y')
# print(jointsx)
# print(jointsy)
# print(jointsv)
#if (jointsx>=dims[0]).any():
# print(f'BAD X BIG {index} {dims[0]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsx<0).any():
# print(f'BAD X SMALL {index} {dims[0]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsy>=dims[1]).any():
# print(f'BAD Y BIG {index} {dims[1]}')
# print(dims)
# print(jointsx)
# print(jointsy)
#if (jointsy<0).any():
# print(f'BAD Y SMALL {index} {dims[1]}')
# print(dims)
# print(jointsx)
# print(jointsy)
return val_joints
def coco_bbox(index, jointsx, jointsy, jointsv, dims):
# dims: (width, height)
jx=np.array(jointsx).round(0)
jy=np.array(jointsy).round(0)
jv=np.array(jointsv)
joint_mask=coco_valid_joints(jx, jy, jv, dims)
if joint_mask.any() == True:
x0=int(jx[joint_mask].min())
x1=int(jx[joint_mask].max())
y0=int(jy[joint_mask].min())
y1=int(jy[joint_mask].max())
else:
x0=x1=y0=y1=0
return (x0, y0, x1, y1)
class PyCOCO:
def __init__(self, base_path, path_to_trn_img, path_to_trn_annot, path_to_val_img, path_to_val_annot, path_to_test_img, path_to_test_info, path_to_silhouette):
self._coco_trn = PyCOCOTrainVal(base_path, path_to_trn_img, path_to_trn_annot, path_to_silhouette)
self._coco_val = PyCOCOTrainVal(base_path, path_to_val_img, path_to_val_annot, path_to_silhouette)
self._coco_tst = PyCOCOTest(base_path, path_to_test_img, path_to_test_info)
def gather_data(self, which_set, gid=None):
if gid==None:
gid = GID()
result = []
if which_set == 'train':
result=self._coco_trn.gather_data(which_set, gid)
elif which_set == 'val':
result=self._coco_val.gather_data(which_set, gid)
elif which_set == 'test':
result=self._coco_tst.gather_data(gid)
elif which_set == 'toy':
result=self._coco_trn.gather_data(which_set, gid)
return result
class PyCOCOTrainVal:
def __init__(self, base_path, path_to_img, path_to_annot, path_to_silhouette):
self._base_path = base_path
self._path_to_img = path_to_img
self._path_to_annot = path_to_annot
self._path_to_silhouette = path_to_silhouette
# Create pre-processing dirs if they don't exist
# Test images don't have keypoints/segmentations, so only train/val/toy
os.system(f'mkdir -p {base_path}{path_to_silhouette}/train')
os.system(f'mkdir -p {base_path}{path_to_silhouette}/val')
os.system(f'mkdir -p {base_path}{path_to_silhouette}/toy')
# Load data set
self._coco=COCO(self._base_path+self._path_to_annot)
# Load categories. Code to retrieve names included in comment
self._cats = self._coco.loadCats(self._coco.getCatIds())
#nms=[cat['name'] for cat in cats]
#print('COCO categories: \n{}\n'.format(' '.join(nms)))
# get all images containing given categories, select one at random
#catIds = self._coco.getCatIds(catNms=['person','dog','skateboard']);
self._catIds = self._coco.getCatIds(catNms=['person']);
self._imgIds = self._coco.getImgIds(catIds=self._catIds );
self.num_images = len(self._imgIds)
print('Found images: ' + str(self.num_images))
def _image_path(self, index):
if index < MIN_IMAGE or index > self.num_images-1:
raise Exception(f'Invalid image index: {index}. Must be in range [{MIN_IMAGE}, {self.num_images-1}]')
img = self._coco.loadImgs(self._imgIds[index])[0]
return self._path_to_img + '/' + img['file_name']
def disp_image(self, index):
# Read in image and normalize. These are jpeg's, so need to be divided by 255 to
# get values in range [0, 1]
img=matplotlib.image.imread(self._base_path+self._image_path(index))
img=img/255
plt.imshow(img)
plt.show()
def disp_annotations(self, index):
# Read in image and normalize. These are jpeg's, so need to be divided by 255 to
# get values in range [0, 1]
img=matplotlib.image.imread(self._base_path+self._image_path(index))
img=img/255
height=img.shape[0]
width=img.shape[1]
# Annotations
img_meta = self._coco.loadImgs(self._imgIds[index])[0]
annIds = self._coco.getAnnIds(imgIds=img_meta['id'], catIds=self._catIds, iscrowd=None)
anns = self._coco.loadAnns(annIds)
for a in anns:
print(a)
# Let's only work with the first one for now
joints = np.array(anns[0]['keypoints'])
print(joints)
jointsx=joints[0::3]
jointsy=joints[1::3]
jointsv=joints[2::3]
# Note: Very few images actually need this. Mainly cosmetic, to get rid of
# some whitespace around the image and keep a 1:1 pixel mapping
if clip_detect(jointsx, 0, width-1):
np.clip(jointsx, 0, width-1, out=jointsx)
if clip_detect(jointsy, 0, height-1):
np.clip(jointsy, 0, height-1, out=jointsy)
jointsxy=np.transpose(np.array([jointsy, jointsx]))
img=plotOnImage(img, jointsxy, 'ro')
plt.imshow(img)
plt.show()
def gather_data(self, which_set, gid=None):
if gid==None:
gid = GID()
result = []
if which_set == 'train':
for i in range(self.num_images):
the_annotations=self._get_annotations(i, which_set)
self._format_annotations(result,i,the_annotations,gid)
elif which_set == 'val':
for i in range(self.num_images):
the_annotations=self._get_annotations(i, which_set)
self._format_annotations(result,i,the_annotations,gid)
elif which_set == 'toy':
for i in range(30):
the_annotations=self._get_annotations(i, which_set)
self._format_annotations(result,i,the_annotations,gid)
else:
print('Invalid set: ' + which_set)
assert(1==0)
return result
def _get_annotations(self, index, which_set):
img_meta = self._coco.loadImgs(self._imgIds[index])[0]
annIds = self._coco.getAnnIds(imgIds=img_meta['id'], catIds=self._catIds, iscrowd=None)
anns = self._coco.loadAnns(annIds)
# Just check that we never see 0 annotations
if(len(anns)<1):
print('Unexpected length: ' + str(len(anns)))
print(anns)
exit()
# Image-level stuff: path, width, height
# Note: height/width in metdata doesn't agree with this but this number is right
image_path = self._image_path(index)
image_filename_only=image_path[-16:-4]
img=Image.open(self._base_path+self._image_path(index))
width, height = img.size
annotations=[]
for i, ann in enumerate(anns):
annotation = {}
# Leave out ID in case there aren't any useful annotations
annotation['path'] = image_path
annotation['width'] = width
annotation['height'] = height
# Calculate minimal bbox from joints
jointsx=ann['keypoints'][0::3]
jointsy=ann['keypoints'][1::3]
jointsv=ann['keypoints'][2::3]
bbox=coco_bbox(index, jointsx, jointsy, jointsv, (width, height))
if bbox != (0,0,0,0):
annotation['jointsx']=ann['keypoints'][0::3]
annotation['jointsy']=ann['keypoints'][1::3]
annotation['jointsv']=ann['keypoints'][2::3]
annotation['bbox'] = bbox
# See if there is silhouette info available. If so, generate silhouette.
# Note: There appears to always be segmentation available if there are keypoints
if 'segmentation' in ann:
mask = self._coco.annToMask(ann)
filename=self._path_to_silhouette+'/'+which_set+'/'
filename+=f'{image_filename_only}_{i:02d}.png'
masked_image = np.zeros_like(mask, dtype=np.uint8)
masked_image[:,:] = np.where((mask==1), 255, 0)
im=Image.fromarray(masked_image)
im.save(self._base_path+filename)
annotation['silhouette']=filename
annotations.append(annotation)
return annotations
def _format_annotations(self, results, index, annotations, gid):
for ann in annotations:
annotation = {}
annotation['ID'] = gid.next()
annotation['set'] = 'COCO'
annotation['path'] = ann['path']
# Minimal bounding box containing joints
annotation['bbox']=ann['bbox']
jointsx=ann['jointsx']
jointsy=ann['jointsy']
jointsv=ann['jointsv']
# TODO: Remove this when confirmed no invalid joints
jx=np.array(jointsx).round(0)
jy=np.array(jointsy).round(0)
jv=np.array(jointsv)
valj=coco_valid_joints(jx, jy, jv, (ann['width'], ann['height']))
assert(valj.any())
# Now deal with the joints
for j in range(len(coco_joints)):
annotation[f'x{j}'] = ann['jointsx'][j]
annotation[f'y{j}'] = ann['jointsy'][j]
annotation[f'v{j}'] = ann['jointsv'][j]
if 'silhouette' in ann:
annotation['silhouette']=ann['silhouette']
# Not sure if we need this
# Now, add silhouette info if available
#if uann[1] is not None:
# silhouette_filename = uann[1][2] # In numpy format, image_name is index 2
# silhouette_filename = silhouette_filename[:5]+'_segmentation_full.png'
# annotation['silhouette'] = self._base_path+self._upi_s1h_img+'/'+silhouette_filename
results.append(annotation)
return
def _format_annotation(self, index, number):
# Deprecated, but leaving for reference
img_meta = self._coco.loadImgs(self._imgIds[index])[0]
annIds = self._coco.getAnnIds(imgIds=img_meta['id'], catIds=self._catIds, iscrowd=None)
anns = self._coco.loadAnns(annIds)
# We will only use the first annotation that shows up for each image. If we need more,
# update thisannotation that shows up for each image. If we need more,
# update this.
if(len(anns)<1):
print('Unexpected length: ' + str(len(anns)))
print(anns)
exit()
# If height/width needed, uncomment
# Update: Not sure what these are, but they don't match the image height/width
# Use PIL instead
#cocoheight=self._coco.dataset['images'][index]['height']
#cocowidth=self._coco.dataset['images'][index]['width']
# Get image height/width from image (height/width in dataset is not this)
img=Image.open(self._base_path+self._image_path(index))
width, height = img.size
annotation = {}
annotation['ID'] = number
annotation['path'] = self._image_path(index)
#print(f"{annotation['path']} {index}")
# Calculate minimal bbox from joints
jointsx=anns[0]['keypoints'][0::3]
jointsy=anns[0]['keypoints'][1::3]
jointsv=anns[0]['keypoints'][2::3]
bbox=coco_bbox(index, jointsx, jointsy, jointsv, (width, height))
if bbox != (0,0,0,0):
annotation['bbox'] = bbox
#if index>5:
# exit()
# COCO-format bboxes, just in case we need it sometime
#annotation['bbox_x'] = anns[0]['bbox'][BBOX_IDX['x']]
#annotation['bbox_y'] = anns[0]['bbox'][BBOX_IDX['y']]
#annotation['bbox_w'] = anns[0]['bbox'][BBOX_IDX['w']]
#annotation['bbox_h'] = anns[0]['bbox'][BBOX_IDX['h']]
ann_joints = anns[0]['keypoints']
iidx=0
for j in range(len(coco_joints)):
for xy in ['x', 'y', 'v']:
annotation[f'{xy}{j}'] = ann_joints[iidx]
iidx+=1
return annotation
class PyCOCOTest:
def __init__(self, base_path, path_to_img, path_to_img_info):
self._base_path = base_path
self._path_to_img = path_to_img
self._path_to_img_info = path_to_img_info
# Load data set. Need to just work with raw JSON unfortunately.
infile=open(self._base_path+self._path_to_img_info,'r')
self._img_info=json.load(infile)
infile.close()
# As far as I know, there's no way to filter by category, so we'll
# just handle all files in the test set
self.num_images = len(self._img_info['images'])
print('Found images: ' + str(self.num_images))
def _image_path(self, index):
if index < MIN_IMAGE or index > self.num_images-1:
raise Exception(f'Invalid image index: {index}. Must be in range [{MIN_IMAGE}, {self.num_images-1}]')
img = self._img_info['images'][index]
return self._path_to_img + '/' + img['file_name']
def disp_image(self, index):
# Read in image and normalize. These are jpeg's, so need to be divided by 255 to
# get values in range [0, 1]
img=matplotlib.image.imread(self._base_path+self._image_path(index))
img=img/255
plt.imshow(img)
plt.show()
def gather_data(self, gid=None):
if gid==None:
gid = GID()
result = []
# Test set only. Only available info will be path to image.
for i in range(self.num_images):
result.append(self._format_annotation(i,gid.next()))
return result
def _format_annotation(self, index, number):
height=self._img_info['images'][index]['height']
width=self._img_info['images'][index]['width']
annotation = {}
annotation['ID'] = number
annotation['set'] = 'COCO'
annotation['path'] = self._image_path(index)
# If height/width needed, uncomment
# Get image height/width from image (height/width in dataset is not this)
#img=Image.open(self._base_path+self._image_path(index))
#width, height = img.size
#annotation['bbox_x'] = 0
#annotation['bbox_y'] = 0
#annotation['bbox_h'] = height
#annotation['bbox_w'] = width
return annotation
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='awsudo2',
description='sudo-like utility to manage AWS credentials',
url='https://github.com/outersystems/awsudo2',
packages=['awsudo2'],
entry_points={
'console_scripts': [
'awsudo2 = awsudo2.main:main',
],
},
install_requires=[
'boto',
'retrying',
'awscli',
'pytz==2019.3',
'boto3==1.12.21'
],
)
|
"""
File: ta09-solution.py
Author: Br. Burton
Demonstrates the use of exceptions.
"""
class BalanceError(Exception):
"""
Raised if the balance would be negative.
"""
def __init__(self, message):
super().__init__(message)
class OutOfChecksError(Exception):
"""
Raised if the number of checks is exceeded
"""
def __init__(self, message):
super().__init__(message)
class CheckingAccount:
"""
Represents a checking account with a balance and a number
of checks.
"""
def __init__(self, starting_balance, num_checks):
"""
Creates a new account with the given balance and number of checks
:param starting_balance:
:param num_checks:
:return:
"""
if starting_balance < 0:
raise BalanceError("Starting balance cannot be negative.")
self.balance = starting_balance
self.check_count = num_checks
def deposit(self, amount):
"""
Adds the amount to the balance
"""
self.balance += amount
def write_check(self, amount):
"""
Decreases the balance by the amount, and decreases
the check count.
:param amount:
:return:
"""
if self.balance - amount < 0:
raise BalanceError("Balance cannot be negative")
if self.check_count <= 0:
raise OutOfChecksError("Insufficient number of checks")
self.balance -= amount
self.check_count -= 1
def display(self):
"""
Displays the checking account information
:return:
"""
print("Checks: {}, Balance: ${:.2f}".format(self.check_count, self.balance))
def apply_for_credit(self, amount):
"""
Obtains credit for the current account.
:param amount:
:return:
"""
pass
def display_menu():
"""
Displays the available commands.
"""
print()
print("Commands:")
print(" quit - Quit")
print(" new - Create new account")
print(" display - Display account information")
print(" deposit - Desposit money")
print(" check - Write a check")
def get_more_checks(account):
"""
Asks the user if they want more checks, and then
adds them to the account
:param account:
:return:
"""
more_checks = input("Would you like to buy more checks (yes/no)? ")
if more_checks == "yes":
account.balance -= 5
account.check_count += 25
def main():
"""
Used to test the CheckingAccount class.
"""
acc = None
command = ""
while command != "quit":
display_menu()
command = input("Enter a command: ")
if command == "new":
try:
balance = float(input("Starting balance: "))
num_checks = int(input("Numbers of checks: "))
acc = CheckingAccount(balance, num_checks)
except BalanceError as ex:
print("Error: {}".format(str(ex)))
elif command == "display":
acc.display()
elif command == "deposit":
amount = float(input("Amount: "))
acc.deposit(amount)
elif command == "check":
try:
amount = float(input("Amount: "))
acc.write_check(amount)
except BalanceError as ex:
print("Error: {}".format(str(ex)))
except OutOfChecksError as ex:
print("Error: {}".format(str(ex)))
get_more_checks(acc)
elif command == "credit":
amount = float(input("Amount: "))
acc.apply_for_credit(amount)
if __name__ == "__main__":
main() |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Firewall human manager plugin.
"""
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import firewall_manager_base
from . import process_firewalld_services
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall human manager plugin.
"""
class _HumanManager(firewall_manager_base.FirewallManagerBase):
def __init__(self, plugin):
super(Plugin._HumanManager, self).__init__(plugin)
self._output = []
@property
def name(self):
return osetupcons.Const.FIREWALL_MANAGER_HUMAN
def detect(self):
return True
def selectable(self):
return False
def print_manual_configuration_instructions(self):
self.plugin.dialog.note(
text=_(
'The following network ports should be opened:\n'
'{ports}'
).format(
ports='\n'.join(
sorted(
process_firewalld_services.Process.getInstance(
environment=self.environment,
).parseFirewalld(
format=' {protocol}:{port}\n',
).splitlines()
)
) + '\n'
),
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
before=(
osetupcons.Stages.KEEP_ONLY_VALID_FIREWALL_MANAGERS,
),
)
def _setup(self):
self.environment[
osetupcons.ConfigEnv.FIREWALL_MANAGERS
].append(Plugin._HumanManager(self))
# vim: expandtab tabstop=4 shiftwidth=4
|
from matplotlib import patches as mpatches
class WellComponent:
"""Component of a well, e.g. casing, screen.
Args:
top (float): top depth
bottom (float): bottom depth
diameter (float): diameter
"""
def __init__(self, top: float, bottom: float, diameter: float):
self.top = top
self.bottom = bottom
self.diameter = diameter
self.left_diameter_fraction = 0.25
self.right_diameter_fraction = 0.75
class Casing(WellComponent):
facecolor = "k"
fill = True
class SlottedCasing(WellComponent):
facecolor = "k"
fill = False
hatch_density = 3
hatch_symbol = "/"
@property
def hatch(self):
return self.hatch_symbol * self.hatch_density
def get_left_artist(self):
return mpatches.Rectangle(
(1 / 4, seg_from),
pipe_width * 0.9,
seg_length,
facecolor="k",
fill=False,
hatch=hatch,
transform=t,
)
class WirewoundScreen(WellComponent):
facecolor = "k"
fill = False
hatch_density = 3
hatch_symbol = "-"
@property
def hatch(self):
return self.hatch_symbol * self.hatch_density
def get_left_artist(self):
return mpatches.Rectangle(
(1 / 4, seg_from),
pipe_width * 0.9,
seg_length,
facecolor="k",
fill=False,
hatch=hatch,
transform=t,
)
class Well:
diameters_to_fractions = {
1: ((0.25, 0.75), ),
2: ((0.2, 0.8), (0.3, 0.7)),
}
def __init__(self, components):
self.components = components
@property
def diameters_mapping(self):
diameters = {c.diameter: [] for c in self.components}
for c in self.components:
diameters[c.diameter].append(c)
return diameters
@property
def diameters(self):
return sorted(tuple([c.diameter for c in self.components]))
def plot(self):
# Go through and set diameter fractions on the whole well.
for diameter in self.diameters:
|
import math
'''
isReceiving returns true if a transaction was a return
Integer transactionAmount
'''
def isReceiving(transactionAmount):
if transactionAmount == 0:
return None # should not happen
else:
return transactionAmount > 0
'''
isPaying returns true is a transaction was a payment
Integer transactionAmount
'''
def isPaying(transactionAmount):
if transactionAmount == 0:
return None # should not happen
return transactionAmount < 0
'''
getAbsoluteAmount returns the absolute value of a relative transaction amount
Integer transactionAmount
'''
def getAbsoluteAmount(transactionAmount):
return math.fabs(transactionAmount)
'''
checks if a String represents a Fractional or Integral
'''
def isNumber(str):
if (str[0] == '.' or str[len(str) - 1] == '.'):
return False
foundFloatingPoint = False
for digit in str:
if not digit.isdigit():
if (digit == '.'):
if (foundFloatingPoint):
return False
else:
foundFloatingPoint = True
else:
return False
return True
'''
accepted characters: A-z (case-insensitive), 0-9 and underscores.
length: 5-32 characters.
'''
def isValidTelegramUsername(string):
length = len(string)
validLength = length >= 5 and length <= 32
if validLength:
for char in string:
if not(char.isalpha() or char.isdigit() or char == '_'):
return False
return True
else:
return False
'''
tests
'''
def main():
print(isPaying(-1), isPaying(1), isReceiving(-1), isReceiving(1), getAbsoluteAmount(-1), getAbsoluteAmount(-1))
if __name__ == '__main__':
main()
|
from flask_wtf import FlaskForm
from wtforms import StringField, HiddenField, RadioField
from wtforms.validators import DataRequired, Email
from app.design_system_fields import DSCheckboxField, DSRadioField
class LoginForm(FlaskForm):
email = StringField(label="Email address", validators=[Email()])
class LoginWithPayloadForm(FlaskForm):
# Used only as a hook into FlaskForm's CSRF protection
pass
class DeleteAccountForm(FlaskForm):
# Used only as a hook into FlaskForm's CSRF protection
pass
class DeleteProductSignoffForm(FlaskForm):
# Used only as a hook into FlaskForm's CSRF protection
pass
class ToggleChecklistFeatureForm(FlaskForm):
# Used only as a hook into FlaskForm's CSRF protection
pass
class ChooseGithubRepoForm(FlaskForm):
repo_choice = DSCheckboxField(label="Choose the repositories to connect with", coerce=int)
def __init__(self, repos=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if repos is None:
repos = []
self.repo_choice.choices = [(r.id, r.fullname) for r in repos]
class TransferGithubRepoForm(FlaskForm):
repo_choice = DSRadioField(label="Transfer repositories to your account", coerce=int)
def __init__(self, repos=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if repos is None:
repos = []
self.repo_choice.choices = [(r.id, r.fullname) for r in repos]
class AuthorizeTrelloForm(FlaskForm):
trello_integration = StringField(label="Trello Authorisation Token", validators=[DataRequired()])
class ChooseTrelloBoardForm(FlaskForm):
board_choice = DSRadioField(label="Choose your board", validators=[DataRequired()])
def __init__(self, boards=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if boards is None:
boards = []
self.board_choice.choices = [(board.id, board.name) for board in boards]
class ChooseTrelloListForm(FlaskForm):
list_choice = DSRadioField(label="Choose your list", validators=[DataRequired()])
def __init__(self, lists=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if lists is None:
lists = []
self.list_choice.choices = [(l.id, l.name) for l in lists]
|
from flask import Flask, request, render_template
from flask_sslify import SSLify
from flask_cors import CORS
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson import ApiException
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, RelationsOptions
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from os import urandom, environ
import datetime as dt
import json
import tables_wks
'''
Flask Server Configuration
'''
flask_server = Flask(__name__)
flask_server.config['SECRET_KEY'] = urandom(16)
CORS(flask_server)
#SSLify(flask_server)
'''
Flask Server Routes
'''
@flask_server.route("/", methods=['GET', 'POST'])
def root():
now = dt.datetime.utcnow() - dt.timedelta(hours=3)
now_str = dt.datetime.strftime(now, "%H:%M:%S - %d/%m/%Y")
idented_resp = None
idented_models = None
msg = None
if request.method == 'POST':
#print(request.form)
try:
# Extract form data
nlu_apikey = request.form['nlu_apikey']
nlu_url = request.form['nlu_url']
custom_model = request.form['custom_model']
limit = request.form['limit']
mentions = request.form['mentions']
sentiment = request.form['sentiment']
emotion = request.form['emotion']
data = request.form['data']
flag = "both"
# Build NLU object:
authenticator = IAMAuthenticator(nlu_apikey)
nlu_object = NaturalLanguageUnderstandingV1(version='2019-07-12', authenticator=authenticator)
nlu_object.set_service_url(nlu_url)
# List models:
model_list = nlu_object.list_models().get_result()
if len(model_list['models']) != 0:
idented_models = json.dumps(model_list, indent=2)
#print(model_list)
#print(type(model_list))
#print(model_list['models'])
#print(type(model_list['models']))
# Execute API Call to NLU
if custom_model != "default":
response = nlu_object.analyze(
text=data,
features=Features(
entities=EntitiesOptions(limit=int(limit), mentions=mentions, model=custom_model, sentiment=sentiment, emotion=emotion),
relations=RelationsOptions(model=custom_model)
)
).get_result()
print(json.dumps(response, indent=2))
# Entities Table
entities_table = tables_wks.gen_entities_table_custom(response["entities"])
# Relations Table
relations_table = tables_wks.gen_relations_table(response["relations"])
else:
response = nlu_object.analyze(
text=data,
features=Features(
entities=EntitiesOptions(limit=int(limit), mentions=mentions, sentiment=sentiment, emotion=emotion),
relations=RelationsOptions()
)
).get_result()
print(json.dumps(response, indent=2))
#print(json.dumps(response, indent=2))
#print(response["entities"])
#print(response["relations"])
# Entities Table
entities_table = tables_wks.gen_entities_table(response["entities"])
# Relations Table
relations_table = tables_wks.gen_relations_table(response["relations"])
except ApiException as ex:
msg = ex.message
finally:
if msg != None:
return render_template(
'form.html',
msg=msg,
timestamp=now_str
)
else:
return render_template(
'result.html',
relations_table = relations_table,
entities_table = entities_table,
data=data,
model=custom_model,
model_list=idented_models,
msg=msg,
timestamp=now_str
)
else:
return render_template('form.html')
'''
Main
'''
if __name__ == '__main__':
flask_server.run(
host="0.0.0.0",
port=int(environ.get("PORT", 5000)),
debug=False
)
|
from microbit import *
def clear_line(line):
display.set_pixel(0, line, 0)
display.set_pixel(1, line, 0)
display.set_pixel(2, line, 0)
display.set_pixel(3, line, 0)
display.set_pixel(4, line, 0)
return
def clear_row(row):
display.set_pixel(row, 0, 0)
display.set_pixel(row, 1, 0)
display.set_pixel(row, 2, 0)
display.set_pixel(row, 3, 0)
display.set_pixel(row, 4, 0)
return
time = 0
target = 1000
display_time = 0
x_sec = -1
y_sec = 0
val_sec = 4
x_sec_ten = -1
y_sec_ten = 1
val_sec_ten = 9
x_min = -1
y_min = 2
val_min = 4
x_min_ten = -1
y_min_ten = 3
val_min_ten = 9
x_hour = -1
y_hour = 4
val_hour = 4
debounce = 0
start_stop = 0
while True:
if debounce == 0:
if button_a.is_pressed() == True:
debounce = 1
sleep(100)
start_stop = start_stop + 1
if start_stop >= 2:
start_stop = 0
if debounce == 0:
if start_stop == 0:
if button_b.is_pressed() == True:
debounce = 1
sleep(100)
start_stop = 0
x_sec = -1
y_sec = 0
val_sec = 4
x_sec_ten = -1
y_sec_ten = 1
val_sec_ten = 9
x_min = -1
y_min = 2
val_min = 4
x_min_ten = -1
y_min_ten = 3
val_min_ten = 9
x_hour = -1
y_hour = 4
val_hour = 4
display.clear()
display.scroll("Stopwatch reset !!!", delay=100)
if debounce == 1:
if button_a.is_pressed() == False:
debounce = 0
sleep(100)
if start_stop == 1:
if (running_time() - time) >= target:
time = running_time()
display_time = display_time + 1
if val_sec == 4:
x_sec = x_sec + 1
if x_sec >= 0:
display.set_pixel(x_sec, y_sec, val_sec)
if x_sec == 4:
x_sec = -2
val_sec = 9
if val_sec == 9:
x_sec = x_sec + 1
if x_sec >= 0:
display.set_pixel(x_sec, y_sec, val_sec)
if x_sec == 4:
x_sec = -1
val_sec = 4
x_sec_ten = x_sec_ten + 1
clear_line(0)
if x_sec_ten < 5:
display.set_pixel(x_sec_ten, y_sec_ten, val_sec_ten)
if x_sec_ten == 5:
x_sec_ten = -1
clear_line(1)
if val_min == 4:
x_min = x_min +1
if x_min >= 0:
display.set_pixel(x_min, y_min, val_min)
if x_min == 4:
x_min = -2
val_min = 9
if val_min == 9:
x_min = x_min +1
if x_min >= 0:
display.set_pixel(x_min, y_min, val_min)
if x_min == 4:
x_min = -1
val_min = 4
x_min_ten = x_min_ten + 1
clear_line(2)
if x_min_ten < 5:
display.set_pixel(x_min_ten, y_min_ten, val_min_ten)
if x_min_ten == 5:
x_min_ten = -1
clear_line(3)
if val_hour == 4:
x_hour = x_hour +1
if x_hour >= 0:
display.set_pixel(x_hour, y_hour, val_hour)
if x_hour == 4:
x_hour = -2
val_hour = 9
if val_hour == 9:
x_hour = x_hour +1
if x_hour >= 0:
display.set_pixel(x_hour, y_hour, val_hour)
if x_hour == 4:
x_sec = -1
y_sec = 0
val_sec = 4
x_sec_ten = -1
y_sec_ten = 1
val_sec_ten = 9
x_min = -1
y_min = 2
val_min = 4
x_min_ten = -1
y_min_ten = 3
val_min_ten = 9
x_hour = -1
y_hour = 4
val_hour = 4
display.clear()
display.scroll("Stopwatch reset !!!", delay=100) |
#!/usr/bin/python
from auvlib.data_tools import std_data, gsf_data, xtf_data, csv_data, utils
from auvlib.bathy_maps import mesh_map, patch_draper, sss_gen_sim
import sys
import os
import numpy as np
import math
def create_mesh(path):
gsf_pings = utils.parse_or_load_gsf(path)
mbes_pings = gsf_data.convert_pings(gsf_pings)
V, F, bounds = mesh_map.mesh_from_pings(mbes_pings, 0.5)
height_map, bounds = mesh_map.height_map_from_pings(mbes_pings, 0.5)
return V, F, height_map, bounds
def match_or_load_xtf(xtf_path, csv_path):
if os.path.exists("matched_cache.cereal"):
xtf_pings = xtf_data.xtf_sss_ping.read_data("matched_cache.cereal")
else:
xtf_pings = utils.parse_or_load_xtf(xtf_path)
nav_entries = utils.parse_or_load_csv(csv_path)
xtf_pings = csv_data.convert_matched_entries(xtf_pings, nav_entries)
xtf_data.write_data(xtf_pings, "matched_cache.cereal")
return xtf_pings
sensor_yaw = 5.*math.pi/180.
sensor_offset = np.array([2., -1.5, 0.])
V, F, height_map, bounds = create_mesh(sys.argv[1])
xtf_pings = match_or_load_xtf(sys.argv[2], sys.argv[3])
xtf_pings = xtf_data.correct_sensor_offset(xtf_pings, sensor_offset)
sound_speeds = csv_data.csv_asvp_sound_speed.parse_file(sys.argv[4])
Vb, Fb, Cb = patch_draper.get_vehicle_mesh()
viewer = sss_gen_sim.SSSGenSim(V, F, xtf_pings, bounds, sound_speeds, height_map)
viewer.set_sidescan_yaw(sensor_yaw)
viewer.set_vehicle_mesh(Vb, Fb, Cb)
viewer.set_ray_tracing_enabled(False)
viewer.set_sss_from_waterfall(False)
viewer.show()
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
# This script is an experimental feature. Use at your own risk.
# Contributions are welcome.
import numpy as np
import onnxruntime as onnxrt
ort_float_set = set([np.float32, np.float64])
pd_float_set = set(['float64'])
ort_int_set = set([np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64])
pd_int_set = set(['int64'])
types_dict = {
'tensor(float16)': np.float16,
'tensor(float)': np.float32,
'tensor(double)': np.float64,
'tensor(int8)': np.int8,
'tensor(uint8)': np.uint8,
'tensor(int16)': np.int16,
'tensor(uint16)': np.uint16,
'tensor(int32)': np.int32,
'tensor(uint32)': np.uint32,
'tensor(int64)': np.int64,
'tensor(uint64)': np.uint64,
'tensor(bool)': np.bool,
'tensor(string)': np.object
}
class DataFrameTool():
"""
This is a utility class used to run a model with pandas.DataFrame input
"""
def __init__(self, model_path, sess_options=None):
"""
:param model_path: path to the model to be loaded
:param sess_options: see onnxruntime.SessionsOptions
"""
self._model_path = model_path
self._sess_options = sess_options
self._sess = onnxrt.InferenceSession(self._model_path, self._sess_options)
def _reshape_input(self, input_array, expected_shape):
"""
:param - input_array numpy array. This one is obtained from DataFrame and expected to have
: a rank if 1.
:expected_shape - shape fetched from the model which may include dynamic elements.
: expected_shape may at most have one -1, None or zero which will be computed from
: the size of the input_array. We replace None and zeros to -1 and let np.ndarray.reshape deal with it.
"""
# expected_shape rank is one, we will let onnxruntime to deal with it
if len(expected_shape) == 1:
return input_array
inferred_shape = [dim if dim else -1 for dim in expected_shape]
return input_array.reshape(inferred_shape)
def _validate_type(self, input_meta, col_type):
"""
: input_meta - meta info obtained from the model for the given input
: col_type - dtype of the column
: throws if conditions are not met
float16 and bool will always require exact match
We attempt to convert any type to a string if it is required.
With strings we always want to put this into a flat array, cast to np.object and then reshape as object
Any other type to qualify for casting must match either integer or floating point types
"""
expected_type = types_dict[input_meta.type]
if input_meta.type == 'tensor(string)':
return
elif expected_type == col_type:
return
elif expected_type in ort_float_set and str(col_type) in pd_float_set:
return
elif expected_type in ort_int_set and str(col_type) in pd_int_set:
return
raise TypeError("Input {} requires type {} unable to cast column type {} ".format(
input_meta.name, expected_type, col_type))
def _process_input_list(self, df, input_metas, require):
"""
Return a dictionary of input_name : a typed and shaped np.array of values for a given input_meta
The function does the heavy lifting for _get_input_feeds()
:param df: See :class:`pandas.DataFrame`.
:param input_metas: a list of name/type pairs
:require is a boolean. If True this helper throws on a missing input.
"""
feeds = {}
# Process mandadory inputs. Raise an error if anything is not present
for input_meta in input_metas:
# We fully expect all the types are in the above dictionary
assert input_meta.type in types_dict, "Update types_dict for the new type"
if input_meta.name in df.columns:
self._validate_type(input_meta, df[input_meta.name].dtype)
# With strings we must cast first to np.object then then reshape
# so we do it for everything
input_array = np.array(df[input_meta.name]).astype(types_dict[input_meta.type])
feeds[input_meta.name] = self._reshape_input(input_array, input_meta.shape)
elif require:
raise RuntimeError(
"This model requires input {} of type {} but it is not found in the DataFrame".format(
input_meta.name, types_dict[input_meta.type]))
return feeds
def _get_input_feeds(self, df, sess):
"""
Return a dictionary of input_name : a typed and shaped np.array of values
This function accepts Pandas DataFrame as the first argument and onnxruntime
session with a loaded model. The function interrogates the model for the inputs
and matches the model input names to the DataFrame instance column names.
It requires exact matches for bool and float16 types. It attempts to convert to
string any input type if string is required.
It attempts to convert floating types to each other and does the same for all of the
integer types without requiring an exact match.
:param df: See :class:`pandas.DataFrame`. The function only considers the first row (0) of each column
and feeds the data to the appropriate model inputs.
:param sess: See :class:`onnxruntime.InferenceSession`.
::
For example: pd.DataFrame([[0], [4],[20]],index=[0], columns=['A', 'B', 'C'])
"""
if df.empty:
raise RuntimeError('input DataFrame is empty')
# Process mandadory inputs. Raise an error if anything is not present
feeds = self._process_input_list(df, sess.get_inputs(), True)
# Process optional overridable initializers. If present the initialzier value
# is overriden by the input. If not, the initialzier value embedded in the model takes effect.
initializers = self._process_input_list(df, sess.get_overridable_initializers(), False)
feeds.update(initializers)
return feeds
def execute(self, df, output_names, run_options=None):
"Return a list of output values restricted to output names if not empty"
"""
Compute the predictions.
:param df: See :class:`pandas.DataFrame`.
:param output_names: name of the outputs that we are interested in
:param run_options: See :class:`onnxruntime.RunOptions`.
::
sess.run([output_name], {input_name: x})
"""
input_feed = self._get_input_feeds(df, self._sess)
return self._sess.run(output_names, input_feed, run_options)
|
import re
import os
import csv
import ast
import json
import mosspy
import pymongo
import requests
from pprint import pprint
client = pymongo.MongoClient()
db = client.da_database['spring97-ca3']
scores = client.da_database['spring97-ca3-scores']
problems = db.distinct('challenge')
users = db.distinct('hacker_username')
with open('config.json', 'r') as f:
config = json.load(f)
mossServer = {}
# Set of headers from a request after login to HackerRank
# This script doesn't support automatic login to browser (yet). So you should set any request's
# headers after login here to make it able to retrieve submissions.
headers = {
"Host": "www.hackerrank.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:60.0) Gecko/20100101 Firefox/60.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Referer": "https://www.hackerrank.com/contests/ut-da-spring97-ca3/challenges/da-spring97-2/submissions/code/1307528564",
"X-CSRF-Token": "ccZ6GZpUXxEBOacTjN1yvjCy9/0FZE3tu190NAX2rfFzUZSrBQupi1ijjt8f742BqHBUGH8ATQZx5SZxNl+fsw==",
"X-Request-Unique-Id": "1eckk0fp6",
"X-Requested-With": "XMLHttpRequest",
"Cookie": "hackerrank_mixpanel_token=64b69f6c-6517-406b-83b3-0048cd204876; h_r=community_home; h_l=in_app; h_v=log_in; __utma=74197771.1818293894.1524200376.1530176350.1530210593.19; __utmz=74197771.1525810345.9.4.utmcsr=cecm.ut.ac.ir|utmccn=(referral)|utmcmd=referral|utmcct=/mod/forum/discuss.php; optimizelyEndUserId=oeu1524200377524r0.6940224341179795; optimizelySegments=%7B%221709580323%22%3A%22false%22%2C%221717251348%22%3A%22ff%22%2C%221719390155%22%3A%22referral%22%2C%222308790558%22%3A%22none%22%7D; optimizelyBuckets=%7B%7D; _hp2_id.698647726=%7B%22userId%22%3A%226940524239390275%22%2C%22pageviewId%22%3A%227056858165071794%22%2C%22sessionId%22%3A%228103777451893119%22%2C%22identity%22%3Anull%2C%22trackerVersion%22%3A%224.0%22%7D; _biz_uid=2ffe511894644395ec2b72059ea8ff6a; _biz_nA=122; _biz_pendingA=%5B%5D; hacker_editor_theme=light; enableIntellisenseUserPref=true; ut-ce-da-spring97_crp=*nil*; ut-da-spring97-ca3_crp=*nil*; mp_bcb75af88bccc92724ac5fd79271e1ff_mixpanel=%7B%22distinct_id%22%3A%20%2264b69f6c-6517-406b-83b3-0048cd204876%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.hackerrank.com%2Fadministration%2Fcontests%2Fedit%2F33496%2Foverview%22%2C%22%24initial_referring_domain%22%3A%20%22www.hackerrank.com%22%7D; mp_86cf4681911d3ff600208fdc823c5ff5_mixpanel=%7B%22distinct_id%22%3A%20%22162e2c4dd441870-02a44e2de1f4c-495861-13c680-162e2c4dd4614eb%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.hackerrank.com%2Fadministration%2Fcontests%2Fedit%2F33496%2Foverview%22%2C%22%24initial_referring_domain%22%3A%20%22www.hackerrank.com%22%7D; default_cdn_url=hrcdn.net; _hrank_session=c4b194d8ce6acc079886885666d5e97be3e094e4aa63e844f24cc443a86dbc489cd619b6f158a26b6504821c82715ac9cc35b03d17d4130829fec6f9fd5258ef; session_id=7wpazrnf-1530173687097; cdn_url=hrcdn.net; cdn_set=true; __utmc=74197771; remember_hacker_token=BAhbCFsGaQOpCiNJIhlmNEVRWThnbDRKMzlDVkFQU1BrSAY6BkVUSSIXMTUzMDE3MzcwMy4wODYxMjgyBjsARg%3D%3D--0b19088618d0ffb29caf5013334d51af367c96cf; metrics_user_identifier=230aa9-b30006d00d5dcf5983f6d3b0b2913f257aeafd09; react_var=false__trm6; react_var2=true__trm6; web_browser_id=6212849f7c4be88d360d684d7b3c54cf; _biz_flagsA=%7B%22Version%22%3A1%2C%22XDomain%22%3A%221%22%7D",
"Connection": "keep-alive",
}
def makeNormal(data):
if data:
return ast.literal_eval(json.dumps(data))
return None
def init():
for p in problems:
makeDirs(p['name'])
def makeDirs(path):
if not os.path.exists(path):
os.makedirs(path)
def loadData(dataPath = './data.json'):
with open(dataPath) as f:
data = json.load(f)
db.insert_many(data)
def getCode(submissionId):
URL = 'https://www.hackerrank.com/rest/contests/{}/submissions/{}?&_=1530252865797'
res = requests.get(URL.format(config['contest'], submissionId), headers=headers).json()
return res['model']['code'].replace(u"\u2018", "'").replace(u"\u2019", "'"), res['model']['language']
def getLangExtension(language):
if re.match('^c$', language):
return '.c'
if re.match('^c+', language):
return '.cpp'
if re.match('^java', language):
return '.java'
if re.match('^python', language):
return '.py'
def findBestSubmission(user, problem):
submission = db.find_one(
{
"hacker_username": user,
"challenge": problem,
"time_from_start": {
"$lte": config['contestEndTime']
}
},
projection={
'_id': False,
'id': True,
'language': True,
'time_from_start': True,
'hacker_username': True,
'score': True,
'challenge': True,
},
sort=[('score', pymongo.DESCENDING), ('time_from_start', pymongo.DESCENDING)]
)
return submission
def saveSubmission(submission):
challenge = submission['challenge']['name']
username = submission['hacker_username']
path = challenge + '/' + username + getLangExtension(submission['language'])
with open(path, 'w') as f:
code, language = getCode(submission['id'])
f.write(code)
def computeScore(user):
res = []
for p in problems:
s = makeNormal(findBestSubmission(user, p))
if s:
res.append(s['score'])
saveSubmission(s)
return res
def sendToMoss():
for p in problems:
for l in config['languages']:
moss = mosspy.Moss(config['userid'], l)
moss.addFilesByWildcard('./{}/*.{}'.format(p['name'], getLangExtension(l)))
url = moss.send()
print "Moss url: ", url
print p['name'], l
# Seems buggy from mosspy project
mosspy.download_report(url, 'Plagiarism/{}/{}/'.format(p['name'], l), connections=8)
def main():
print users
init()
finalResult = []
with open(config['outputPath'], "w") as output:
writer = csv.writer(output, lineterminator='\n')
for u in users:
s = computeScore(u)
writer.writerow([u] + s)
scores.insert_one({'username': u, 'score': s})
finalResult.append([u] + s)
pprint(finalResult)
sendToMoss()
if __name__ == '__main__':
main()
|
"""
Autogen example.
"""
from os import path
def grade(autogen, key):
key_path = path.join(autogen.get_instance_path(public=False), "private_key")
f = open(key_path)
flag = f.read().strip()
f.close()
if flag == key or key == "test":
return (True, "Autogen!")
else:
return (False, ":<")
|
"""LAF handling of local Lones requests"""
import http.client
import logging
from laf.server.app import error
from laf.server.app import handler
from laf.server.app import loneutils
__all__ = ['handler']
_LOG = logging.getLogger(__name__)
def local_handler(lone, requests, configdict, luser, lhost):
"""
handles local requests
"""
results = []
(accept, major_version, schemafile) = loneutils.get_accept_header(
lone, configdict['basedir'])
# validate request
final_requests = loneutils.jsonschema_validation(
configdict['basedir'],
schemafile,
accept,
requests,
luser,
lhost)
for request in final_requests:
(resp, status_code) = handler.process_req(configdict,
lone,
request,
major_version)
if status_code not in [http.client.OK, http.client.NO_CONTENT]:
err_object = error.APIError(resp,
status_code,
request.lone,
request.verb,
request.pk,
request.obj,
luser,
lhost)
resp = err_object.error_message()
results.append(resp)
return results
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FEC COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'OF ACCOUNT', 'number': '3-'},
{'name': 'OF RECEIPT', 'number': '4-'},
{'name': 'TOTAL AMOUNT TRANSFERED', 'number': '5'},
{'name': 'ADMIN/VOTER DRIVE', 'number': '6-I)'},
{'name': 'EVENT', 'number': '7-II) a. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '8'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '9'},
{'name': 'EVENT', 'number': '10-II) b. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '11'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '12'},
{'name': 'EVENT', 'number': '13-II) c. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '14'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '15'},
{'name': 'EVENT', 'number': '16-II) d. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '17'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '18'},
{'name': 'TOT DIRECT FUNDRAISING AMOUNT', 'number': '19'},
{'name': 'EVENT', 'number': '20-III) a. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '21'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '22'},
{'name': 'EVENT', 'number': '23-III) b. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '24'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '25'},
{'name': 'EVENT', 'number': '26-III) c. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '27'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '28'},
{'name': 'EVENT', 'number': '29-III) d. '},
{'name': 'ACTIVITY/EVENT NUMBER', 'number': '30'},
{'name': 'AMOUNT TRANSFERRED FOR THIS ACTIVITY', 'number': '31'},
{'name': 'TOT EXEMPT ACTIVITY DIRECT CANDIDATE SUPPORT', 'number': '32'},
{'name': 'AMENDED', 'number': '33'},
]
self.fields_names = self.hash_names(self.fields)
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import functools
import glob
import os
from pathlib import Path
import pkg_resources
import shutil
import subprocess
import sys
import tempfile
import click
# import click_completion
# click_completion.init()
from sequana.utils import config
from sequana import version
from sequana.iem import IEM
from sequana import GFF3
from sequana import FastQ, FastA
from sequana.rnadiff import RNADiffAnalysis, RNADesign
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["main"]
# This is a recipe from https://stackoverflow.com/questions/48391777/nargs-equivalent-for-options-in-click
# to allow command line such as
# sequana enrichment-panther --ontologies MF BP CC
class OptionEatAll(click.Option):
def __init__(self, *args, **kwargs):
self.save_other_options = kwargs.pop("save_other_options", True)
nargs = kwargs.pop("nargs", -1)
assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs)
super(OptionEatAll, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._eat_all_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# method to hook to the parser.process
done = False
value = [value]
if self.save_other_options:
# grab everything up to the next option
while state.rargs and not done:
for prefix in self._eat_all_parser.prefixes:
if state.rargs[0].startswith(prefix):
done = True
if not done:
value.append(state.rargs.pop(0))
else:
# grab everything remaining
value += state.rargs
state.rargs[:] = []
value = tuple(value)
# call the actual process
self._previous_parser_process(value, state)
retval = super(OptionEatAll, self).add_to_parser(parser, ctx)
for name in self.opts:
our_parser = parser._long_opt.get(name) or parser._short_opt.get(name)
if our_parser:
self._eat_all_parser = our_parser
self._previous_parser_process = our_parser.process
our_parser.process = parser_process
break
return retval
# This can be used by all commands as a simple decorator
def common_logger(func):
@click.option(
"--logger",
default="INFO",
type=click.Choice(["INFO", "DEBUG", "WARNING", "CRITICAL", "ERROR"]),
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def get_env_vars(ctx, args, incomplete):
return [k for k in os.environ.keys() if incomplete in k]
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
pipelines = [
item.key for item in pkg_resources.working_set if item.key.startswith("sequana")
]
if len(pipelines):
version += "\nThe following pipelines are installed:\n"
for item in pkg_resources.working_set:
if item.key.startswith("sequana") and item.key != "sequana":
version += "\n - {} version: {}".format(item.key, item.version)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=version)
def main(**kwargs):
"""This is the main entry point for a set of Sequana applications.
Pipelines such as sequana_rnaseq, sequana_variant_calling have their own
application and help.
In addition, more advanced tools such as sequana_taxonomy or
sequana_coverage have their own standalone.
To setup completion, type this command depending on your shell (bash):
\b
eval "$(_SEQUANA_COMPLETE=source_bash sequana)"
eval "$(_SEQUANA_COMPLETE=source_zsh sequana)"
eval (env _SEQUANA_COMPLETE=source_fish sequana)
"""
pass
# =====================================================================================
# fastq-related tools
# =====================================================================================
@main.command()
@click.argument("filename", type=click.STRING, nargs=-1)
@click.option(
"-o",
"--output",
help="filename where to save results. to be used with --head, --tail",
)
@click.option("--count-sequences", is_flag=True)
@click.option("--head", type=click.INT, help="number of sequences to extract from the head")
@click.option("--merge", is_flag=True, help="merge all compressed input fastq files into a single file")
@click.option("--tail", type=click.INT, help="number of reads to extract from the tail")
@click.option("--explode", is_flag=True, help="Create a fasta file for each sequence found in the original files")
def fasta(**kwargs):
"""Set of useful utilities for FastA manipulation.
"""
filenames = kwargs["filename"]
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs["count_sequences"]:
for filename in filenames:
f = FastA(filename)
Nreads = len(f)
print(f"Number of reads in {filename}: {Nreads}")
elif kwargs["merge"]:
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(["gz"]):
raise ValueError("Your input FastA files must be zipped")
output_filename = kwargs["output"]
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(["zcat"] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, "wb")
p2 = subprocess.run(["pigz"], stdin=p1.stdout, stdout=fout)
elif kwargs["explode"]:
for filename in filenames:
f = FastA(filename)
f.explode()
# =====================================================================================
# fastq-related tools
# =====================================================================================
@main.command()
@click.argument("filename", type=click.STRING, nargs=-1)
@click.option(
"-o",
"--output",
help="filename where to save results. to be used with --head, --tail",
)
@click.option("--count-reads", is_flag=True)
@click.option("--head", type=click.INT, help="number of reads to extract from the head")
@click.option("--merge", is_flag=True, help="merge all compressed input fastq files into a single file")
@click.option("--tail", type=click.INT, help="number of reads to extract from the tail")
def fastq(**kwargs):
"""Set of useful utilities for FastQ manipulation.
Input file can be gzipped or not. The --output-file
"""
filenames = kwargs["filename"]
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs["count_reads"]:
for filename in filenames:
f = FastQ(filename)
Nreads = f.count_reads()
Nlines = Nreads * 4
print(f"Number of reads in {filename}: {Nreads}")
print(f"Number of lines in {filename}: {Nlines}")
elif kwargs["head"]:
for filename in filenames:
f = FastQ(filename)
if kwargs["output"] is None:
logger.error("Please use --output to tell us where to save the results")
sys.exit(1)
N = kwargs["head"] * 4
f.extract_head(N=N, output_filename=kwargs["output"])
elif kwargs["tail"]: # pragma: no cover
raise NotImplementedError
elif kwargs["merge"]:
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(["gz"]):
raise ValueError("Your input FastQ files must be zipped")
output_filename = kwargs["output"]
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(["zcat"] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, "wb")
p2 = subprocess.run(["pigz"], stdin=p1.stdout, stdout=fout)
else: # pragma: no cover
print("Use one of the commands")
# =====================================================================================
# samplesheet-related tools
# =====================================================================================
@main.command()
@click.argument("name", type=click.STRING)
@click.option("--check", is_flag=True)
@click.option("--extract-adapters", is_flag=True)
@click.option("--quick-fix", is_flag=True)
@click.option("--output", default=None)
def samplesheet(**kwargs):
"""Utilities to manipulate sample sheet"""
name = kwargs["name"]
if kwargs["check"]:
iem = IEM(name)
iem.validate()
logger.info("SampleSheet looks correct")
elif kwargs["extract_adapters"]:
iem = IEM(name)
iem.to_fasta()
elif kwargs["quick_fix"]:
iem = IEM(name, tryme=True)
if kwargs["output"]:
filename = kwargs["output"]
else:
filename = name + ".fixed"
logger.info("Saving fixed version in {}".format(filename))
iem.quick_fix(output_filename=filename)
# =====================================================================================
# summary about data files
# =====================================================================================
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=-1)
@click.option(
"--module",
required=False,
type=click.Choice(["bamqc", "bam", "fasta", "fastq", "gff", "vcf"]),
)
def summary(**kwargs):
"""Create a HTML report for various type of NGS formats.
\b
* bamqc
* fastq
This will process all files in the given pattern (in back quotes)
sequentially and procude one HTML file per input file.
Other module all work in the same way. For example, for FastQ files::
sequana summary one_input.fastq
sequana summary `ls *fastq`
"""
names = kwargs["name"]
module = kwargs["module"]
if module is None:
if names[0].endswith("fastq.gz") or names[0].endswith(".fastq"):
module = "fastq"
elif names[0].endswith(".bam"):
module = "bam"
elif names[0].endswith(".gff") or names[0].endswith("gff3"):
module = "gff"
elif names[0].endswith("fasta.gz") or names[0].endswith(".fasta"):
module = "fasta"
else:
logger.error("please use --module to tell us about the input fimes")
sys.exit(1)
if module == "bamqc":
for name in names:
print(f"Processing {name}")
from sequana.modules_report.bamqc import BAMQCModule
report = BAMQCModule(name, "bamqc.html")
elif (
module == "fasta"
): # there is no module per se. HEre we just call FastA.summary()
from sequana.fasta import FastA
for name in names:
f = FastA(name)
f.summary()
elif (
module == "fastq"
): # there is no module per se. HEre we just call FastA.summary()
from sequana.fastq import FastQ
from sequana import FastQC
for filename in names:
ff = FastQC(filename, max_sample=1e6, verbose=False)
stats = ff.get_stats()
print(stats)
elif module == "bam":
import pandas as pd
from sequana import BAM
for filename in names:
ff = BAM(filename)
stats = ff.get_stats()
df = pd.Series(stats).to_frame().T
print(df)
elif module == "gff":
import pandas as pd
from sequana import GFF3
for filename in names:
ff = GFF3(filename)
print(f"#filename: {filename}")
print("#Number of entries per genetic type:")
print(ff.df.value_counts("genetic_type").to_string())
print("#Number of duplicated attribute (if any) per attribute:")
ff.get_duplicated_attributes_per_genetic_type()
elif module == "vcf":
from sequana.freebayes_vcf_filter import VCF_freebayes
for filename in names:
print(f"#filename: {filename}")
vcf = VCF_freebayes(filename)
columns = ("chr", 'position', 'depth', 'reference', 'alternative',
"freebayes_score", 'strand_balance', 'frequency')
print(",".join(columns))
for variant in vcf.get_variants():
resume = variant.resume
print(",".join([str(resume[col]) for col in columns]))
# =====================================================================================
# compare RNA seq analysis
# =====================================================================================
@main.command()
@click.option(
"--file1",
type=click.Path(),
default=None,
required=True,
help="""The first input RNA-seq table to compare""",
)
@click.option(
"--file2",
type=click.Path(),
default=None,
required=True,
help="""The second input RNA-seq table to compare""",
)
@common_logger
def rnaseq_compare(**kwargs):
"""Compare 2 tables created by the 'sequana rnadiff' command"""
from sequana.compare import RNADiffCompare
from pylab import savefig
c = RNADiffCompare(kwargs["file1"], kwargs["file2"])
print(c.r1.summary())
print(c.r2.summary())
c.plot_volcano_differences()
savefig("sequana_rnaseq_compare_volcano.png", dpi=200)
# a dynamic call back function to introspect the design batch column
def rnadiff_auto_batch_column(ctx, args, incomplete):
if "--design" in args:
dfile = args[args.index("--design")]
d = RNADesign(dfile)
else:
d = RNADesign("design.csv")
batch = (x for x in d.df.columns if x not in {"label", "condition"})
if len(batch) == 0:
logger.warning("No batch effect included in your design file")
else:
return [c for c in batch if incomplete in c[0]]
# =====================================================================================
# RNAdiff analysis
# =====================================================================================
@main.command()
@click.option(
"--annotation",
type=click.Path(),
default=None,
help="""The annotation GFF file used to perform the feature count""",
)
@click.option(
"--output-directory",
type=click.Path(),
default="rnadiff",
help="""Output directory where are saved the results. Use --force if it exists already""",
)
@click.option(
"--force/--no-force",
default=False,
help="If output directory exists, use this option to erase previous results",
)
@click.option(
"--features",
type=click.Path(),
default="all_features.out",
help="""The Counts from feature counts. This should be the output of the
sequana_rnaseq pipeline all_features.out """,
)
# FIXME I think it would be better to have a single file with multiple columns
# for alternative condition (specified using the "condition" option)
@click.option(
"--design",
type=click.Path(),
default="design.csv",
help="""It should have been generated by sequana_rnaseq. If
not, it must be a comma separated file with two columns. One for the label to be
found in the --features file and one column with the condition to which it
belong. Extra columns can be added to add batch effet. With 3 replicates and 2 conditions,
it should look like:
\b
label,condition
WT1,WT
WT2,WT
WT3,WT
file1,cond1
fileother,cond1
""",
)
@click.option(
"--condition",
type=str,
default="condition",
help="""The name of the column in design.csv to use as condition
for the differential analysis. Default is 'condition'""",
)
@click.option(
"--feature-name",
default="gene",
help="The feature name compatible with your GFF (default is 'gene')",
)
@click.option(
"--attribute-name",
default="ID",
help="""The attribute used as identifier. Compatible with your GFF (default is 'ID')""",
)
@click.option(
"--reference",
type=click.Path(),
default=None,
help="""The reference to test DGE against. If provided, conditions not
involving the reference are ignored. Otherwise all combinations are
tested""",
)
@click.option(
"--comparisons",
type=click.Path(),
default=None,
help="""By default, if a reference is provided, all conditions versus that
reference are tested. If no reference, the entire combinatory is performed
(Ncondition * (Ncondition-1) / 2. In both case all condtions found in the
design file are used. If a comparison file is provided, only conditions found in
it will be used. """,
)
@click.option(
"--cooks-cutoff",
type=click.Path(),
default=None,
help="""if none, let DESeq2 choose the cutoff""",
)
@click.option(
"--independent-filtering/--no-independent-filtering",
default=False,
help="""Do not perform independent_filtering by default. low counts may not
have adjusted pvalues otherwise""",
)
@click.option(
"--beta-prior/--no-beta-prior",
default=False,
help="Use beta prior or not. Default is no beta prior",
)
@click.option(
"--batch",
type=str,
default=None,
help="""set the column name (in your design) corresponding to the batch
effect to be included in the statistical model as batch ~ condition""",
autocompletion=rnadiff_auto_batch_column,
)
@click.option(
"--fit-type",
default="parametric",
help="DESeq2 type of fit. Default is 'parametric'",
)
@click.option(
"--minimum-mean-reads-per-gene",
default=0,
help="Filter out fene where the mean number of reads is below this value. By default all genes are kept",
)
@click.option(
"--keep-all-conditions/--no-keep-all-conditions",
default=False,
help="""Even though sub set of comparisons are provided, keep all conditions
in the analysis and report only the provided comparisons""",
)
@click.option(
"--hover-name",
default=None,
help="""In volcano plot, we set the hover name to Name if present in the GFF,
otherwise to gene_id if present, then locus_tag, and finally ID and gene_name. One can specify
a hover name to be used with this option""",
)
@click.option(
"--report-only",
is_flag=True,
help="""If analysis was done, you may want to redo the HTML report only using this option""",
)
@click.option("--xticks-fontsize", default=10, help="""Reduce fontsize of xticks""")
@common_logger
def rnadiff(**kwargs):
"""Perform RNA-seq differential analysis and reporting.
This command performs the differential analysis of feature counts using DESeq2.
A HTML report is created as well as a set of output files, including summary
tables of the analysis.
This command performs the differential analysis of gene expression based
on the output of feature counts tool. The expected input is a tabulated file
which is the aggregation of feature counts for each sample. This file is
produced by the Sequana RNA-seq pipeline (https://github.com/sequana/rnaseq).
It is named all_features.out and looks like:
Geneid Chr Start End Strand Length BAM1 BAM2 BAM3 BAM4
ENSG0001 1 1 10 + 10 120 130 140 150
ENSG0002 2 1 10 + 10 120 130 0 0
To perform this analysis, you will also need the GFF file used during the RNA-seq
analysis. You also need a design file that give the correspondance
between the sample names found in the feature_count file above and the
conditions of your RNA-seq analysis. The design looks like:
label,condition
BAM1,condition_A
BAM2,condition_A
BAM3,condition_B
BAM4,condition_B
Here is an example:
\b
sequana rnadiff --annotation Lepto.gff
--design design.csv --features all_features.out
--feature-name gene --attribute-name ID
The feature-name is the feature that was used in your counting.
The attribute-name is the main attribute to use in the HTML reports.
Note however, that all attributes found in your GFF file are repored
in the HTML page
Batch effet can be included by adding a column in the design.csv file. For
example if called 'day', you can take this information into account using
'--batch day'
By default, when comparing conditions, all combination are computed. If
you have N conditions, we compute the N(N-1)/2 comparisons. The
reference is automatically chosen as the last one found in the design
file. In this example:
label,condition
BAM1,A
BAM2,A
BAM3,B
BAM4,B
we compare A versus B. If you do not want that behaviour, use
'--reference A'.
In a more complex design,
label,condition
BAM1,A
BAM2,A
BAM3,B
BAM4,B
BAM5,C
BAM6,C
The comparisons are A vs B, A vs C and B vs C.
If you wish to perform different comparisons or restrict the
combination, you can use a comparison input file. For instance, to
perform the C vs A and C vs B comparisons only, create this
file (e.g. comparison.csv):
alternate,reference
C,A
C,B
and use '--comparison comparison.csv'.
"""
from sequana import logger
import pandas as pd
from sequana.featurecounts import FeatureCount
from sequana.rnadiff import RNADiffAnalysis, RNADesign
from sequana.modules_report.rnadiff import RNAdiffModule
logger.setLevel(kwargs["logger"])
from easydev import cmd_exists, mkdirs
if not cmd_exists("Rscript"):
logger.critical(
"""Rscript not found; You will need R and the DESeq2 package to be installed.
You may install it yourself or use damona using the rtools:1.0.0 image """
)
sys.exit(1)
outdir = kwargs["output_directory"]
feature = kwargs["feature_name"]
attribute = kwargs["attribute_name"]
if os.path.exists(outdir) and not kwargs["force"]:
logger.error(f"{outdir} exist already. Use --force to overwrite")
sys.exit(1)
if kwargs["annotation"]:
gff_filename = kwargs["annotation"]
logger.info(f"Checking annotation file (feature and attribute)")
gff = GFF3(gff_filename)
if feature not in gff.features:
logger.error(
f"{feature} not found in the GFF. Most probably a wrong feature name"
)
sys.exit(1)
attributes = gff.get_attributes(feature)
if attribute not in attributes:
logger.error(
f"{attribute} not found in the GFF for the provided feature. Most probably a wrong feature name. Please change --attribute-name option or do not provide any GFF"
)
sys.exit(1)
else:
gff = None
comparisons = kwargs["comparisons"]
if comparisons:
# use \s*,\s* to strip spaces
compa_df = pd.read_csv(comparisons, sep="\s*,\s*", engine="python")
comparisons = list(zip(compa_df["alternative"], compa_df["reference"]))
logger.info(f"Differential analysis to be saved into ./{outdir}")
for k in sorted(
[
"independent_filtering",
"beta_prior",
"batch",
"cooks_cutoff",
"fit_type",
"reference",
]
):
logger.info(f" Parameter {k} set to : {kwargs[k]}")
# The analysis is here
r = RNADiffAnalysis(
kwargs["features"],
kwargs["design"],
kwargs["condition"],
keep_all_conditions=kwargs["keep_all_conditions"],
batch=kwargs["batch"],
comparisons=comparisons,
reference=kwargs["reference"],
fc_feature=feature,
fc_attribute=attribute,
outdir=outdir,
gff=gff,
cooks_cutoff=kwargs.get("cooks_cutoff"),
independent_filtering=kwargs.get("independent_filtering"),
beta_prior=kwargs.get("beta_prior"),
fit_type=kwargs.get("fit_type"),
minimum_mean_reads_per_gene=kwargs.get("minimum_mean_reads_per_gene"),
)
if not kwargs["report_only"]:
try:
logger.info(f"Running DGE. Saving results into {outdir}")
results = r.run()
results.to_csv(f"{outdir}/rnadiff.csv")
except Exception as err:
logger.error(err)
logger.error(f"please see {outdir}/code/rnadiff.err file for errors")
sys.exit(1)
logger.info(f"Reporting. Saving in summary.html")
# this define the output directory where summary.html is saved
config.output_dir = outdir
import seaborn as sns
report = RNAdiffModule(
outdir,
gff=gff,
fc_attribute=attribute,
fc_feature=feature,
alpha=0.05,
log2_fc=0,
condition=kwargs["condition"],
annot_cols=None,
pattern="*vs*_degs_DESeq2.csv",
palette=sns.color_palette(desat=0.6, n_colors=13),
hover_name=kwargs["hover_name"],
pca_fontsize=6,
xticks_fontsize=kwargs.get("xticks_fontsize", 10),
)
#
# save info.txt with sequana version
teardown(outdir)
# =====================================================================================
# Biomart tools
# =====================================================================================
@main.command()
@click.option(
"--mart",
default="ENSEMBL_MART_ENSEMBL",
show_default=True,
help="A valid mart name",
)
@click.option(
"--dataset",
required=True,
help="A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl",
)
@click.option(
"--attributes",
default="ensembl_gene_id,go_id,entrezgene_id,external_gene_name",
show_default=True,
help="""A valid set of attributes to look for in the dataset. Multiple
attributes are separeted by a comma (no spaces accepted)""",
)
@click.option(
"--output",
default=None,
help="""by default save results into a CSV file named
biomart_<dataset>_<YEAR>_<MONTH>_<DAY>.csv""",
)
@common_logger
def biomart(**kwargs):
"""Retrieve information from biomart and save into CSV file
This command uses BioMart from BioServices to introspect a MART service
(--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,
for all ensembl IDs, it will fetch the requested attributes (--attributes).
Finally, it saves the CSV file into an output file (--output). This takes
about 5-10 minutes to retrieve the data depending on the connection.
Example:
sequana biomart --mart mmusculus_gene_ensembl --mart ENSEMBL_MART_ENSEMBL \
--dataset mmusculus_gene_ensembl \
--attributes ensembl_gene_id,external_gene_name,go_id \
--output test.csv
"""
logger.setLevel(kwargs["logger"])
mart = kwargs["mart"]
attributes = kwargs["attributes"]
dataset = kwargs["dataset"]
from sequana.enrichment.mart import Mart
conv = Mart(dataset, mart)
df = conv.query(attributes.split(","))
conv.save(df, filename=kwargs["output"])
# =====================================================================================
# feature counts
# =====================================================================================
@main.command()
@click.option(
"--pattern",
help="The pattern of the feature counts files to merge",
show_default=True,
default="*feature.out",
)
@click.option(
"--output",
help="The output filename where to save the merged counts",
show_default=True,
default="all_features.out",
)
@common_logger
def feature_counts(**kwargs):
"""Merge several feature counts files into one file"""
from sequana.featurecounts import FeatureCountMerger
fcm = FeatureCountMerger(kwargs["pattern"])
fcm.to_tsv(output_filename=kwargs["output"])
# =====================================================================================
# ENRICHMENT KEGG
# =====================================================================================
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=1)
@click.option(
"--annotation-attribute",
type=click.STRING,
default="Name",
show_default=True,
help="a valid attribute to be used to map on KEGG database",
)
@click.option(
"--kegg-name",
type=click.STRING,
default=None,
help=(
"a valid KEGG name (hsa for human, mmu for mus musculus); "
"See the taxonomy command to retrieved other names"
),
)
@click.option(
"--log2-foldchange-cutoff",
type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value",
)
@click.option(
"--padj-cutoff",
type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue above this value default (0.05).",
)
@click.option(
"--biomart",
type=click.STRING,
default=None,
help="""you may need a biomart mapping of your identifier for the kegg
pathways analysis. If you do not have this file, you can use 'sequana biomart'
command""",
)
@click.option(
"--plot-linearx",
type=click.BOOL,
default=False,
is_flag=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""",
)
@click.option(
"--kegg-pathways-directory",
type=click.Path(),
default=None,
help="""a place where to find the pathways for each organism""",
)
@click.option(
"--max-pathways",
type=click.INT,
default=40,
show_default=True,
help="""Max number of pathways to show (most enriched)""",
)
@click.option(
"--kegg-background",
type=click.INT,
default=None,
help="""a background for kegg enrichment. If None, set to number of genes found in KEGG""",
)
@click.option("--output-directory", default="enrichment_kegg")
@common_logger
def enrichment_kegg(**kwargs):
"""Create a HTML report showing KEGG enriched pathways
\b
Example for the enrichment module:
sequana enrichment-kegg rnadiff.csv --log2-foldchange-cutoff 2
The KEGG pathways are loaded and it may take time. Once done, they are saved
in kegg_pathways/organism and be loaded next time:
\b
sequana enrichment-kegg rnadiff/rnadiff.csv --log2-foldchange-cutoff 2 \\
--kegg-name lbi --annotation-attribute file.gff
"""
from sequana.utils import config
import pandas as pd
from sequana.modules_report import ModuleKEGGEnrichment
from sequana.rnadiff import RNADiffResults
logger.setLevel(kwargs["logger"])
keggname = kwargs["kegg_name"]
params = {
"padj": kwargs["padj_cutoff"],
"log2_fc": kwargs["log2_foldchange_cutoff"],
"mapper": kwargs["biomart"],
"nmax": kwargs["max_pathways"],
"kegg_background": kwargs["kegg_background"],
"preload_directory": kwargs["kegg_pathways_directory"],
"plot_logx": not kwargs["plot_linearx"],
}
filename = kwargs["biomart"]
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
filename = kwargs["kegg_pathways_directory"]
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
logger.info(f"Reading RNAdiff results from {kwargs['name']}")
dirpath = os.path.dirname(os.path.abspath(kwargs["name"]))
rnadiff = RNADiffResults(dirpath, index_col=0, header=[0, 1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs["annotation_attribute"]
padj = params["padj"]
log2fc = params["log2_fc"]
# setting these attributes set the gene list with log2fc and padj filter
rnadiff._log2_fc = log2fc
rnadiff._alpha = padj
gene_lists = rnadiff.get_gene_lists(
annot_col=annot_col, Nmax=kwargs.get("max_genes", 1000000)
) # no filter on number of genes
output_directory = kwargs["output_directory"]
for compa, gene_dict in gene_lists.items():
config.output_dir = f"{output_directory}/{compa}"
os.makedirs(f"{output_directory}", exist_ok=True)
# we define the data and its annotation that will be used by the KEGG
# enrichment. No need to apply any filter, we pass the entire data set
# so that even small fold change can be shown
df = rnadiff.comparisons[compa].df.copy()
df = pd.concat([df, rnadiff.annotation.loc[df.index].copy()], axis=1)
df.reset_index(inplace=True)
ModuleKEGGEnrichment(
gene_dict,
keggname,
df,
enrichment_params=params,
command=" ".join(["sequana"] + sys.argv[1:]),
)
# =====================================================================================
# SALMON
# =====================================================================================
@main.command()
@click.option("-i", "--input", required=True, help="The salmon input file.")
@click.option("-o", "--output", required=True, help="The feature counts output file")
@click.option(
"-f", "--gff", required=True, help="A GFF file compatible with your salmon file"
)
@click.option(
"-a",
"--attribute",
default="ID",
help="A valid attribute to be found in the GFF file and salmon input",
)
@click.option("-a", "--feature", default="gene", help="A valid feature")
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file"""
from sequana import salmon
salmon_input = kwargs["input"]
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs["attribute"]
feature = kwargs["feature"]
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
# =====================================================================================
# GTF Fixer
# =====================================================================================
@main.command()
@click.option("-i", "--input", required=True)
@click.option("-o", "--output", required=True)
def gtf_fixer(**kwargs):
"""Reads GTF and fix known issues (exon and genes uniqueness)"""
from sequana.gtf import GTFFixer
gtf = GTFFixer(kwargs["input"])
res = gtf.fix_exons_uniqueness(kwargs["output"])
# res = gtf.fix_exons_uniqueness(kwargs['output'])
print(res)
# =====================================================================================
# ENRICHMENT PANTHER
# =====================================================================================
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=1)
@click.option(
"--annotation-attribute",
type=click.STRING,
# required=True,
default="index",
show_default=True,
help="a valid taxon identifiers",
)
@click.option(
"--panther-taxon",
type=click.INT,
required=True,
help="a valid taxon identifiers",
)
@click.option(
"--log2-foldchange-cutoff",
type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value",
)
@click.option(
"--padj-cutoff",
type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue abobe this value default (0.05).",
)
@click.option(
"--plot-linearx",
type=click.BOOL,
default=False,
is_flag=True,
show_default=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""",
)
@click.option(
"--compute-levels/--no-compute-levels",
default=True,
help="""Compute the levels of each go term, set --no-compute-levels to skip this step""",
)
@click.option(
"--max-genes",
type=click.INT,
default=2500,
show_default=True,
help="""Maximum number of genes (up or down) to use in PantherDB.""",
)
@click.option(
"--ontologies",
default=("MF", "BP", "CC"),
help="""Provide the ontologies to be included in the analysis and HTML report.
Valid choices are: from MF, BP, CC, SLIM_MF, SLIM_BP, SLIM_CC, PROTEIN,
PANTHER_PATHWAY, REACTOME_PATHWAY""",
cls=OptionEatAll,
show_default=True,
)
@click.option(
"--max-enriched-go-terms",
type=click.INT,
default=40,
show_default=True,
help="""Max number of enriched go terms to show in the plots (most
enriched). All enriched GO terms are stored in tables""",
)
@click.option("--output-directory", show_default=True, default="enrichment_panther")
@common_logger
def enrichment_panther(**kwargs):
"""Create a HTML report for various sequana out
\b
* enrichment: the output of RNADiff pipeline
Example for the enrichment module:
sequana enrichment-panther rnadiff.csv --panther-taxon 10090
--log2-foldchange-cutoff 2
sequana enrichment rnadiff/rnadiff.csv
--panther-taxon 189518 \
--log2-foldchange-cutoff 2 \
\b
Valid ontologies are: MF, BP, CC, SLIM_MF, SLIM_BP, SLIM_CC,
PROTEIN, "PANTHER_PATHWAY", "REACTOME_PATHWAY"
"""
import pandas as pd
from sequana.utils import config
from sequana.modules_report import ModulePantherEnrichment
from sequana.rnadiff import RNADiffResults
valid = [
"MF",
"BP",
"CC",
"SLIM_MF",
"SLIM_BP",
"SLIM_CC",
"PROTEIN",
"PANTHER_PATHWAY",
"REACTOME_PATHWAY",
]
ontologies = eval(kwargs["ontologies"])
for ontology in ontologies:
if ontology not in valid:
logger.erro(f"Provided incorrect ontology ({ontology}). Must be in {valid}")
sys.exit(1)
logger.setLevel(kwargs["logger"])
taxon = kwargs["panther_taxon"]
if taxon == 0:
logger.error("You must provide a taxon with --panther-taxon")
sys.exit(1)
params = {
"padj": kwargs["padj_cutoff"],
"log2_fc": kwargs["log2_foldchange_cutoff"],
"max_entries": kwargs["max_genes"],
"nmax": kwargs["max_enriched_go_terms"],
"plot_logx": not kwargs["plot_linearx"],
"plot_compute_levels": kwargs["compute_levels"],
}
logger.info(f"Reading RNAdiff results from {kwargs['name']}")
dirpath = os.path.dirname(os.path.abspath(kwargs["name"]))
rnadiff = RNADiffResults(dirpath, index_col=0, header=[0, 1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs.get("annotation_attribute", "index")
logger.info(f"Using the annotation column '{annot_col}'")
# setting these attributes set the gene list with log2fc and padj filter
rnadiff._log2_fc = params["log2_fc"]
rnadiff._alpha = params["padj"]
gene_lists = rnadiff.get_gene_lists(
annot_col=annot_col, Nmax=kwargs.get("max_genes", None)
)
output_directory = kwargs["output_directory"]
for compa, gene_dict in gene_lists.items():
config.output_dir = f"{output_directory}/{compa}"
os.makedirs(f"{output_directory}", exist_ok=True)
# for now, let us keep the 'all' category
# del gene_dict["all"]
ModulePantherEnrichment(
gene_dict,
taxon,
enrichment_params=params,
command=" ".join(["sequana"] + sys.argv[1:]),
ontologies=ontologies,
)
# =====================================================================================
# taxonomy
# =====================================================================================
@main.command()
@click.option(
"--search-kegg",
type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organisms""",
)
@click.option(
"--search-panther",
type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""",
)
@common_logger
def taxonomy(**kwargs):
"""Tool to retrieve taxonomic information.
sequana taxonomy --search-kegg leptospira
"""
if kwargs["search_kegg"]:
from sequana.kegg import KEGGHelper
k = KEGGHelper()
results = k.search(kwargs["search_kegg"].lower())
print(results)
elif kwargs["search_panther"]:
import pandas as pd
from sequana import sequana_data
df = pd.read_csv(sequana_data("panther.csv"), index_col=0)
pattern = kwargs["search_panther"]
f1 = df[[True if pattern in x else False for x in df["name"]]]
f2 = df[[True if pattern in x else False for x in df.short_name]]
f3 = df[[True if pattern in x else False for x in df.long_name]]
indices = list(f1.index) + list(f2.index) + list(f3.index)
if len(indices) == 0:
# maybe it is a taxon ID ?
f4 = df[[True if pattern in str(x) else False for x in df.taxon_id]]
indices = list(f4.index)
indices = set(indices)
print(df.loc[indices])
# =====================================================================================
# GFF to light GFF
# =====================================================================================
@main.command()
@click.argument("input", type=click.Path(exists=True))
@click.argument("output")
@click.option(
"--features",
type=click.Path(),
default="gene",
help="""list of features to be extracted""",
)
@common_logger
def gff_to_light_gff(**kwargs):
"""Extract the feature of interest in the input GFF to create a light version
sequana gff-to-light-gff input.gff output.gff --features gene,exon
"""
from sequana import logger
logger.setLevel(kwargs["logger"])
filename = kwargs["input"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
g = GFF3(filename)
g.read_and_save_selected_features(kwargs["output"], features=kwargs["features"])
# =====================================================================================
# GFF to light GTF
# =====================================================================================
@main.command()
@click.argument("gff_filename", type=click.Path(exists=True))
@common_logger
def gff_to_gtf(**kwargs):
"""Convert a GFF file into GTF
This is experimental convertion. Use with care.
"""
filename = kwargs["gff_filename"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
g = GFF3(filename)
if filename.endswith(".gff"):
g.to_gtf(os.path.basename(filename).replace(".gff", ".gtf"))
elif filename.endswith(".gff3"):
g.to_gtf(os.path.basename(filename).replace(".gff3", ".gtf"))
def teardown(workdir):
# common function to be used by subcommands to store called command
from pathlib import Path
from easydev import mkdirs
workdir = Path(workdir)
mkdirs(workdir / ".sequana")
with open(Path(workdir) / ".sequana" / "info.txt", "w") as fout:
from sequana import version
fout.write(f"# sequana version: {version}\n")
fout.write(" ".join(["sequana"] + sys.argv[1:]))
|
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
import numpy as np
import subprocess
import os
import time
import json
import glob
def utm_getZone(longitude):
return (int(1+(longitude+180.0)/6.0))
def utm_isNorthern(latitude):
if (latitude < 0.0):
return 0
else:
return 1
def createUTMTransform(polyGeom):
# pt = polyGeom.Boundary().GetPoint()
utm_zone = utm_getZone(polyGeom.GetEnvelope()[0])
is_northern = utm_isNorthern(polyGeom.GetEnvelope()[2])
utm_cs = osr.SpatialReference()
utm_cs.SetWellKnownGeogCS('WGS84')
utm_cs.SetUTM(utm_zone, is_northern);
wgs84_cs = osr.SpatialReference()
wgs84_cs.ImportFromEPSG(4326)
transform_WGS84_To_UTM = osr.CoordinateTransformation(wgs84_cs, utm_cs)
transform_UTM_To_WGS84 = osr.CoordinateTransformation(utm_cs, wgs84_cs)
return transform_WGS84_To_UTM, transform_UTM_To_WGS84, utm_cs
def converWGS2UTM():
pass
def convertUTM2WGS():
pass
def getRasterExtent(srcImage):
geoTrans = srcImage.GetGeoTransform()
ulX = geoTrans[0]
ulY = geoTrans[3]
xDist = geoTrans[1]
yDist = geoTrans[5]
rtnX = geoTrans[2]
rtnY = geoTrans[4]
cols = srcImage.RasterXSize
rows = srcImage.RasterYSize
lrX = ulX + xDist * cols
lrY = ulY + yDist * rows
# Create ring
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(lrX, lrY)
ring.AddPoint(lrX, ulY)
ring.AddPoint(ulX, ulY)
ring.AddPoint(ulX, lrY)
ring.AddPoint(lrX, lrY)
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return geoTrans, poly, ulX, ulY, lrX, lrY
def createPolygonFromCorners(lrX,lrY,ulX, ulY):
# Create ring
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(lrX, lrY)
ring.AddPoint(lrX, ulY)
ring.AddPoint(ulX, ulY)
ring.AddPoint(ulX, lrY)
ring.AddPoint(lrX, lrY)
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return poly
def clipShapeFile(shapeSrc, outputFileName, polyToCut):
source_layer = shapeSrc.GetLayer()
source_srs = source_layer.GetSpatialRef()
# Create the output Layer
outGeoJSon = outputFileName.replace('.tif', '.geojson')
outDriver = ogr.GetDriverByName("geojson")
if os.path.exists(outGeoJSon):
outDriver.DeleteDataSource(outGeoJSon)
outDataSource = outDriver.CreateDataSource(outGeoJSon)
outLayer = outDataSource.CreateLayer("groundTruth", source_srs, geom_type=ogr.wkbPolygon)
# Add input Layer Fields to the output Layer
inLayerDefn = source_layer.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outLayer.CreateField(fieldDefn)
outLayer.CreateField(ogr.FieldDefn("partialBuilding", ogr.OFTInteger))
outLayerDefn = outLayer.GetLayerDefn()
source_layer.SetSpatialFilter(polyToCut)
for inFeature in source_layer:
outFeature = ogr.Feature(outLayerDefn)
for i in range (0, inLayerDefn.GetFieldCount()):
outFeature.SetField(inLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
geom = inFeature.GetGeometryRef()
geomNew = geom.Intersection(polyToCut)
if geomNew:
if geom.GetArea() == geomNew.GetArea():
outFeature.SetField("partialBuilding", 0)
else:
outFeature.SetField("partialBuilding", 1)
else:
outFeature.SetField("partialBuilding", 1)
outFeature.SetGeometry(geomNew)
outLayer.CreateFeature(outFeature)
def cutChipFromMosaic(rasterFile, shapeFileSrc, outlineSrc,outputDirectory='', outputPrefix='clip_',
clipSizeMX=100, clipSizeMY=100, numBands=8):
#rasterFile = '/Users/dlindenbaum/dataStorage/spacenet/mosaic_8band/013022223103.tif'
srcImage = gdal.Open(rasterFile)
geoTrans, poly, ulX, ulY, lrX, lrY = getRasterExtent(srcImage)
rasterFileBase = os.path.basename(rasterFile)
if outputDirectory=="":
outputDirectory=os.path.dirname(rasterFile)
transform_WGS84_To_UTM, transform_UTM_To_WGS84, utm_cs = createUTMTransform(poly)
poly.Transform(transform_WGS84_To_UTM)
env = poly.GetEnvelope()
minX = env[0]
minY = env[2]
maxX = env[1]
maxY = env[3]
#return poly to WGS84
poly.Transform(transform_UTM_To_WGS84)
shapeSrc = ogr.Open(shapeFileSrc)
outline = ogr.Open(outlineSrc)
layer = outline.GetLayer()
for feature in layer:
geom = feature.GetGeometryRef()
for llX in np.arange(minX, maxX, clipSizeMX):
for llY in np.arange(minY, maxY, clipSizeMY):
uRX = llX+clipSizeMX
uRY = llY+clipSizeMY
polyCut = createPolygonFromCorners(llX, llY, uRX, uRY)
polyCut.Transform(transform_UTM_To_WGS84)
if (polyCut).Intersection(geom):
print "Do it."
envCut = polyCut.GetEnvelope()
minXCut = envCut[0]
minYCut = envCut[2]
maxXCut = envCut[1]
maxYCut = envCut[3]
outputFileName = os.path.join(outputDirectory, outputPrefix+rasterFileBase.replace('.tif', "_{}_{}.tif".format(minXCut,minYCut)))
## Clip Image
subprocess.call(["gdalwarp", "-te", "{}".format(minXCut), "{}".format(minYCut),
"{}".format(maxXCut), "{}".format(maxYCut), rasterFile, outputFileName])
outGeoJSon = outputFileName.replace('.tif', '.geojson')
### Clip poly to cust to Raster Extent
polyVectorCut=polyCut.Intersection(poly)
clipShapeFile(shapeSrc, outputFileName, polyVectorCut)
#subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
# "-spat", "{}".format(minXCut), "{}".format(minYCut),
# "{}".format(maxXCut), "{}".format(maxYCut), "-clipsrc", outGeoJSon, shapeFileSrc])
## ClipShapeFile
else:
print "Ain't nobody got time for that!"
if __name__ == '__main__':
start = time.time()
#rasterFile = glob.glob('/mnt/work/input/inrast/*.tif')[0]
#shapeFileSrc = glob.glob('/mnt/work/input/inshape/*.geojson')[0]
#os.makedirs('/mnt/work/output/chips')
#os.chdir('/mnt/work/output/chips')
#outdir = '/mnt/work/output/chips/'
rastList = ['013022223130_mask.tif',
'013022223132_mask.tif',
'013022232020_mask.tif',
'013022232023_mask.tif',
'013022232033_mask.tif',
'013022232122_mask.tif',
'013022232210_mask.tif',
'013022223131_mask.tif',
'013022223133_mask.tif',
'013022232022_mask.tif',
'013022232032_mask.tif',
'013022232120_mask.tif',
'013022232200_mask.tif']
outlineSrc = '/usr/local/share/spacenet/rioBuildings_08022016/Rio_AOI_OutLine_Combined/Rio_Outline_CombinedGeo.geojson'
buildingsSrc = '/usr/local/share/spacenet/rioBuildings_08022016/Rio_AOI_OutLine_Combined/Rio_Buildings_Combined_Geo.geojson'
outdir = '/usr/local/share/spacenet/chips3band'
# Read in the AOI outline file
outline = ogr.Open(outlineSrc)
layer = outline.GetLayer()
for image in rastList:
rasterFile = os.path.join('/usr/local/share/spacenet/mosaic', image)
print rasterFile
print " ========================================= Let's make some chips! ========================================= "
outdir = '/usr/local/share/spacenet/chips3band'
cutChipFromMosaic(rasterFile, buildingsSrc, outlineSrc, outputDirectory=outdir, outputPrefix='clip2_',
clipSizeMX=200, clipSizeMY=200, numBands=3)
rasterFile = os.path.join('/usr/local/share/spacenet/mosaic_8band', image)
outdir = '/usr/local/share/spacenet/chips8band'
cutChipFromMosaic(rasterFile, buildingsSrc, outlineSrc, outputDirectory=outdir, outputPrefix='clip2_',
clipSizeMX=200, clipSizeMY=200, numBands=8)
stop = time.time()
print stop-start |
CRLF = b'\r\n'
def encode_request(from_pid, to_pid, method, body=None, content_type=None, legacy=False):
"""
Encode a request into a raw HTTP request. This function returns a string
of bytes that represent a valid HTTP/1.0 request, including any libprocess
headers required for communication.
Use the `legacy` option (set to True) to use the legacy User-Agent based
libprocess identification.
"""
if body is None:
body = b''
if not isinstance(body, (bytes, bytearray)):
raise TypeError('Body must be a sequence of bytes.')
headers = [
'POST /{process}/{method} HTTP/1.0'.format(process=to_pid.id, method=method),
'Connection: Keep-Alive',
'Content-Length: %d' % len(body)
]
if legacy:
headers.append('User-Agent: libprocess/{pid}'.format(pid=from_pid))
else:
headers.append('Libprocess-From: {pid}'.format(pid=from_pid))
if content_type is not None:
headers.append('Content-Type: {content_type}'.format(content_type=content_type))
headers = [header.encode('utf8') for header in headers]
def iter_fragments():
for fragment in headers:
yield fragment
yield CRLF
yield CRLF
if body:
yield body
return b''.join(iter_fragments())
|
from easy_vqa import get_train_questions, get_test_questions, get_train_image_paths, get_test_image_paths, get_answers
def test_all_questions():
train_qs, train_answers, train_image_ids = get_train_questions()
test_qs, test_answers, test_image_ids = get_test_questions()
assert len(train_qs) > 0 and len(test_qs) > 0
assert len(train_answers) == len(train_qs) and len(test_answers) == len(test_qs)
assert len(train_image_ids) == len(train_qs) and len(test_image_ids) == len(test_qs)
def test_all_image_paths():
train_im_paths = get_train_image_paths()
test_im_paths = get_test_image_paths()
assert len(train_im_paths) > 0
assert len(test_im_paths) > 0
def test_answers():
answers = get_answers()
assert len(answers) > 0
|
class UnprivilegedAccess(Exception):
pass
class MalformedInstruction(Exception):
pass
|
import uuid
from django.db import models
from django.utils import timezone
from django.contrib import auth
from organization.managers import TeacherManager, StudentManager
from core.models import UserProfile
class Teacher(auth.models.User):
objects = TeacherManager()
class Meta:
proxy = True
def save(self, *args, **kwargs):
super(Teacher, self).save(*args, **kwargs)
UserProfile.objects.get_or_create(
user_id=self.id,
defaults={
'user_type': 'teacher'
})
class Student(auth.models.User):
objects = StudentManager()
class Meta:
proxy = True
def save(self, *args, **kwargs):
super(Student, self).save(*args, **kwargs)
UserProfile.objects.get_or_create(
user_id=self.id,
defaults={
'user_type': 'student'
})
class Subject(models.Model):
title = models.CharField(
max_length=50
)
teachers = models.ManyToManyField(
'core.UserProfile',
limit_choices_to={
'user_type': 'teacher'
}
)
class Group(models.Model):
title = models.CharField(
max_length=10
)
class StudentQuizResult(models.Model):
student = models.ForeignKey(
'core.UserProfile',
on_delete=models.CASCADE,
limit_choices_to={
'user_type': 'student'
}
)
personal_link = models.UUIDField(
default=uuid.uuid4,
editable=False
)
quiz = models.ForeignKey(
'quizes.Quiz',
on_delete=models.CASCADE
)
passing_date = models.DateTimeField(
default=timezone.now,
)
total_points = models.IntegerField(
default=0
)
is_repassing_allowed = models.BooleanField(
default=False
)
is_active = models.BooleanField(
default=False
)
questions_amount = models.IntegerField()
def __str__(self):
return 'Quiz {}, student {} {}'.format(self.quiz.id, self.student.user.last_name, self.student.user.first_name) |
__author__ = 'MegabytePhreak'
import rdlcompiler.systemrdl.parser as parser
def test_enum():
p = parser.RdlParser()
p.parse('enum myenum { True = 1\'b0; False = 1\'b1 { name="FALSE"; descn="The opposite of \nTRUE"; }; };')
|
from __future__ import print_function
from awips.dataaccess import DataAccessLayer as DAL
from dynamicserialize.dstypes.com.raytheon.uf.common.dataquery.requests import RequestConstraint
from awips.test.dafTests import baseDafTestCase
from awips.test.dafTests import params
#
# Test DAF support for bufrua data
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/18/16 5548 tgurney More cleanup
# 06/09/16 5587 bsteffen Add getIdentifierValues tests
# 06/13/16 5574 tgurney Add advanced query tests
# 06/30/16 5725 tgurney Add test for NOT IN
# 12/07/16 5981 tgurney Parameterize
# 12/15/16 5981 tgurney Add envelope test
#
#
class BufrUaTestCase(baseDafTestCase.DafTestCase):
"""Test DAF support for bufrua data"""
datatype = "bufrua"
location = params.STATION_ID
def testGetAvailableParameters(self):
req = DAL.newDataRequest(self.datatype)
self.runParametersTest(req)
def testGetAvailableLocations(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier("reportType", "2020")
self.runLocationsTest(req)
def testGetAvailableTimes(self):
req = DAL.newDataRequest(self.datatype)
req.setLocationNames(self.location)
req.addIdentifier("reportType", "2020")
self.runTimesTest(req)
def testGetGeometryData(self):
req = DAL.newDataRequest(self.datatype)
req.setLocationNames(self.location)
req.addIdentifier("reportType", "2020")
req.setParameters("sfcPressure", "staName", "rptType", "tdMan")
print("Testing getGeometryData()")
geomData = DAL.getGeometryData(req)
self.assertIsNotNone(geomData)
print("Number of geometry records: " + str(len(geomData)))
print("Sample geometry data:")
for record in geomData[:self.sampleDataLimit]:
print("level=", record.getLevel(), end="")
# One dimensional parameters are reported on the 0.0UNKNOWN level.
# 2D parameters are reported on MB levels from pressure.
if record.getLevel() == "0.0UNKNOWN":
print(" sfcPressure=" + record.getString("sfcPressure") + record.getUnit("sfcPressure"), end="")
print(" staName=" + record.getString("staName"), end="")
print(" rptType=" + record.getString("rptType") + record.getUnit("rptType"), end="")
else:
print(" tdMan=" + str(record.getNumber("tdMan")) + record.getUnit("tdMan"), end="")
print(" geometry=", record.getGeometry())
print("getGeometryData() complete\n\n")
def testGetGeometryDataWithEnvelope(self):
req = DAL.newDataRequest(self.datatype)
req.setParameters("staName", "rptType")
req.setEnvelope(params.ENVELOPE)
data = self.runGeometryDataTest(req)
for item in data:
self.assertTrue(params.ENVELOPE.contains(item.getGeometry()))
def testGetIdentifierValues(self):
req = DAL.newDataRequest(self.datatype)
optionalIds = set(DAL.getOptionalIdentifiers(req))
self.runGetIdValuesTest(optionalIds)
def testGetInvalidIdentifierValuesThrowsException(self):
self.runInvalidIdValuesTest()
def testGetNonexistentIdentifierValuesThrowsException(self):
self.runNonexistentIdValuesTest()
def _runConstraintTest(self, key, operator, value):
req = DAL.newDataRequest(self.datatype)
constraint = RequestConstraint.new(operator, value)
req.addIdentifier(key, constraint)
# As an identifier it is "reportType" but as a parameter it is
# "rptType"... this is weird...
req.setParameters("staName", "rptType")
return self.runGeometryDataTest(req)
def testGetDataWithEqualsString(self):
geometryData = self._runConstraintTest('reportType', '=', '2022')
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
def testGetDataWithEqualsInt(self):
geometryData = self._runConstraintTest('reportType', '=', 2022)
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
def testGetDataWithEqualsLong(self):
geometryData = self._runConstraintTest('reportType', '=', 2022)
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
# No float test because no float identifiers are available
def testGetDataWithEqualsNone(self):
geometryData = self._runConstraintTest('reportType', '=', None)
for record in geometryData:
self.assertEqual(record.getType('rptType'), 'NULL')
def testGetDataWithNotEquals(self):
geometryData = self._runConstraintTest('reportType', '!=', 2022)
for record in geometryData:
self.assertNotEqual(record.getString('rptType'), '2022')
def testGetDataWithNotEqualsNone(self):
geometryData = self._runConstraintTest('reportType', '!=', None)
for record in geometryData:
self.assertNotEqual(record.getType('rptType'), 'NULL')
def testGetDataWithGreaterThan(self):
geometryData = self._runConstraintTest('reportType', '>', 2022)
for record in geometryData:
self.assertGreater(record.getString('rptType'), '2022')
def testGetDataWithLessThan(self):
geometryData = self._runConstraintTest('reportType', '<', 2022)
for record in geometryData:
self.assertLess(record.getString('rptType'), '2022')
def testGetDataWithGreaterThanEquals(self):
geometryData = self._runConstraintTest('reportType', '>=', 2022)
for record in geometryData:
self.assertGreaterEqual(record.getString('rptType'), '2022')
def testGetDataWithLessThanEquals(self):
geometryData = self._runConstraintTest('reportType', '<=', 2022)
for record in geometryData:
self.assertLessEqual(record.getString('rptType'), '2022')
def testGetDataWithInTuple(self):
collection = ('2022', '2032')
geometryData = self._runConstraintTest('reportType', 'in', collection)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithInList(self):
collection = ['2022', '2032']
geometryData = self._runConstraintTest('reportType', 'in', collection)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithInGenerator(self):
collection = ('2022', '2032')
generator = (item for item in collection)
geometryData = self._runConstraintTest('reportType', 'in', generator)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithNotInList(self):
collection = ('2022', '2032')
geometryData = self._runConstraintTest('reportType', 'not in', collection)
for record in geometryData:
self.assertNotIn(record.getString('rptType'), collection)
def testGetDataWithInvalidConstraintTypeThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('reportType', 'junk', '2022')
def testGetDataWithInvalidConstraintValueThrowsException(self):
with self.assertRaises(TypeError):
self._runConstraintTest('reportType', '=', {})
def testGetDataWithEmptyInConstraintThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('rptType', 'in', [])
def testGetDataWithNestedInConstraintThrowsException(self):
collection = ('2022', '2032', ())
with self.assertRaises(TypeError):
self._runConstraintTest('rptType', 'in', collection)
|
def html_get_ab(soup):
# set the defaul abstract value to None.
ab = ''
#Using the tags to identify the abstract from an html page
h_tags = soup.find_all(['h2','h3'])
for tag in h_tags:
if tag.text.strip().lower() == 'abstract':
parent = tag.parent
ab = parent.get_text(separator = u' ')
ab = ' '.join(ab.split())
return ab
if ab == '':
ab = soup.find('div',{'class':'abstract-group'})
if ab:
ab = ab.get_text(separator = u' ').strip()
return ab
if ab == '':
ab = soup.find('div',{'id':'abstract'})
if ab:
ab = ab.get_text(separator = u' ')
return ab
if ab == '':
ab = soup.find('meta',{'name':'citation_abstract'})
if ab:
ab = ab['content'].strip()
return ab
if ab == '':
ab = soup.find('meta',{'name':'Description'})
if ab:
ab = ab['content'].strip()
return ab |
word = 'I love writing code, I used to work with javascript but now I am loving python more! Do you code in python'
word_array = word.split(' ') # split string into array by passing empty space as delimiter ' '
word_count = word_array.__len__() # use __len__ to get the number of words
for index in range(word_count):
print(f'{index} => {word_array[index]}')
print(f'There are {word_count} characters in the defined string. ') #count lenght of string |
from test_plus.test import TestCase
from django.test import override_settings
from containers.serializers import ContainerSerializer
class TestContainerSerializer(TestCase):
@override_settings(KIOSC_NETWORK_MODE="host")
def test_valid_mode_host(self):
data = {
"title": "some title",
"repository": "some repos",
"tag": "some tag",
"host_port": 8080,
}
expected = {
**data,
"date_last_status_update": None,
"image_id": None,
"container_ip": None,
"container_id": None,
"container_path": None,
"heartbeat_url": None,
"environment": None,
"environment_secret_keys": None,
"command": None,
"containertemplatesite": None,
"containertemplateproject": None,
"description": None,
}
serializer = ContainerSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data, expected)
@override_settings(KIOSC_NETWORK_MODE="host")
def test_invalid_mode_host_missing_host_port(self):
data = {
"title": "some title",
"repository": "some repos",
"tag": "some tag",
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("host_port" in serializer.errors)
self.assertEqual(serializer.errors["host_port"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="host")
def test_invalid_mode_host_missing_title(self):
data = {
"repository": "some repos",
"tag": "some tag",
"host_port": 8080,
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("title" in serializer.errors)
self.assertEqual(serializer.errors["title"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="host")
def test_invalid_mode_host_missing_repository(self):
data = {
"title": "some title",
"tag": "some tag",
"host_port": 8080,
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("repository" in serializer.errors)
self.assertEqual(serializer.errors["repository"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="host")
def test_invalid_mode_host_missing_tag(self):
data = {
"title": "some title",
"repository": "some repos",
"host_port": 8080,
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("tag" in serializer.errors)
self.assertEqual(serializer.errors["tag"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="docker-shared")
def test_valid_mode_shared(self):
data = {
"title": "some title",
"repository": "some repos",
"tag": "some tag",
}
expected = {
**data,
"date_last_status_update": None,
"image_id": None,
"container_ip": None,
"container_id": None,
"container_path": None,
"heartbeat_url": None,
"host_port": None,
"environment": None,
"environment_secret_keys": None,
"command": None,
"containertemplatesite": None,
"containertemplateproject": None,
"description": None,
}
serializer = ContainerSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data, expected)
@override_settings(KIOSC_NETWORK_MODE="docker-shared")
def test_invalid_mode_shared_missing_title(self):
data = {
"repository": "some repos",
"tag": "some tag",
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("title" in serializer.errors)
self.assertEqual(serializer.errors["title"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="docker-shared")
def test_invalid_mode_shared_missing_repository(self):
data = {
"title": "some title",
"tag": "some tag",
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("repository" in serializer.errors)
self.assertEqual(serializer.errors["repository"][0].code, "required")
@override_settings(KIOSC_NETWORK_MODE="docker-shared")
def test_invalid_mode_shared_missing_tag(self):
data = {
"title": "some title",
"repository": "some repos",
}
serializer = ContainerSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue("tag" in serializer.errors)
self.assertEqual(serializer.errors["tag"][0].code, "required")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.